3 based on Anton Blanchard microwatt dcache.vhdl
7 from enum
import Enum
, unique
9 from nmigen
import Module
, Signal
, Elaboratable
, Cat
, Repl
, Array
, Const
10 from nmutil
.util
import Display
12 from random
import randint
14 from nmigen
.cli
import main
15 from nmutil
.iocontrol
import RecordObject
16 from nmigen
.utils
import log2_int
17 from soc
.experiment
.mem_types
import (LoadStore1ToDCacheType
,
18 DCacheToLoadStore1Type
,
22 from soc
.experiment
.wb_types
import (WB_ADDR_BITS
, WB_DATA_BITS
, WB_SEL_BITS
,
23 WBAddrType
, WBDataType
, WBSelType
,
24 WBMasterOut
, WBSlaveOut
,
25 WBMasterOutVector
, WBSlaveOutVector
,
26 WBIOMasterOut
, WBIOSlaveOut
)
28 from soc
.experiment
.cache_ram
import CacheRam
29 #from soc.experiment.plru import PLRU
30 from nmutil
.plru
import PLRU
33 from nmigen_soc
.wishbone
.sram
import SRAM
34 from nmigen
import Memory
35 from nmigen
.cli
import rtlil
37 from nmigen
.back
.pysim
import Simulator
, Delay
, Settle
39 from nmigen
.sim
.cxxsim
import Simulator
, Delay
, Settle
40 from nmutil
.util
import wrap
43 # TODO: make these parameters of DCache at some point
44 LINE_SIZE
= 64 # Line size in bytes
45 NUM_LINES
= 16 # Number of lines in a set
46 NUM_WAYS
= 4 # Number of ways
47 TLB_SET_SIZE
= 64 # L1 DTLB entries per set
48 TLB_NUM_WAYS
= 4 # L1 DTLB number of sets
49 TLB_LG_PGSZ
= 12 # L1 DTLB log_2(page_size)
50 LOG_LENGTH
= 0 # Non-zero to enable log data collection
52 # BRAM organisation: We never access more than
53 # -- WB_DATA_BITS at a time so to save
54 # -- resources we make the array only that wide, and
55 # -- use consecutive indices for to make a cache "line"
57 # -- ROW_SIZE is the width in bytes of the BRAM
58 # -- (based on WB, so 64-bits)
59 ROW_SIZE
= WB_DATA_BITS
// 8;
61 # ROW_PER_LINE is the number of row (wishbone
62 # transactions) in a line
63 ROW_PER_LINE
= LINE_SIZE
// ROW_SIZE
65 # BRAM_ROWS is the number of rows in BRAM needed
66 # to represent the full dcache
67 BRAM_ROWS
= NUM_LINES
* ROW_PER_LINE
69 print ("ROW_SIZE", ROW_SIZE
)
70 print ("ROW_PER_LINE", ROW_PER_LINE
)
71 print ("BRAM_ROWS", BRAM_ROWS
)
72 print ("NUM_WAYS", NUM_WAYS
)
74 # Bit fields counts in the address
76 # REAL_ADDR_BITS is the number of real address
80 # ROW_BITS is the number of bits to select a row
81 ROW_BITS
= log2_int(BRAM_ROWS
)
83 # ROW_LINE_BITS is the number of bits to select
85 ROW_LINE_BITS
= log2_int(ROW_PER_LINE
)
87 # LINE_OFF_BITS is the number of bits for
88 # the offset in a cache line
89 LINE_OFF_BITS
= log2_int(LINE_SIZE
)
91 # ROW_OFF_BITS is the number of bits for
93 ROW_OFF_BITS
= log2_int(ROW_SIZE
)
95 # INDEX_BITS is the number if bits to
97 INDEX_BITS
= log2_int(NUM_LINES
)
99 # SET_SIZE_BITS is the log base 2 of the set size
100 SET_SIZE_BITS
= LINE_OFF_BITS
+ INDEX_BITS
102 # TAG_BITS is the number of bits of
103 # the tag part of the address
104 TAG_BITS
= REAL_ADDR_BITS
- SET_SIZE_BITS
106 # TAG_WIDTH is the width in bits of each way of the tag RAM
107 TAG_WIDTH
= TAG_BITS
+ 7 - ((TAG_BITS
+ 7) % 8)
109 # WAY_BITS is the number of bits to select a way
110 WAY_BITS
= log2_int(NUM_WAYS
)
112 # Example of layout for 32 lines of 64 bytes:
114 .. tag |index| line |
116 .. | |---| | ROW_LINE_BITS (3)
117 .. | |--- - --| LINE_OFF_BITS (6)
118 .. | |- --| ROW_OFF_BITS (3)
119 .. |----- ---| | ROW_BITS (8)
120 .. |-----| | INDEX_BITS (5)
121 .. --------| | TAG_BITS (45)
124 print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
125 (TAG_BITS
, INDEX_BITS
, ROW_BITS
,
126 ROW_OFF_BITS
, LINE_OFF_BITS
, ROW_LINE_BITS
))
127 print ("index @: %d-%d" % (LINE_OFF_BITS
, SET_SIZE_BITS
))
128 print ("row @: %d-%d" % (LINE_OFF_BITS
, ROW_OFF_BITS
))
129 print ("tag @: %d-%d width %d" % (SET_SIZE_BITS
, REAL_ADDR_BITS
, TAG_WIDTH
))
131 TAG_RAM_WIDTH
= TAG_WIDTH
* NUM_WAYS
133 print ("TAG_RAM_WIDTH", TAG_RAM_WIDTH
)
136 return Array(Signal(TAG_RAM_WIDTH
, name
="cachetag_%d" % x
) \
137 for x
in range(NUM_LINES
))
139 def CacheValidBitsArray():
140 return Array(Signal(NUM_WAYS
, name
="cachevalid_%d" % x
) \
141 for x
in range(NUM_LINES
))
143 def RowPerLineValidArray():
144 return Array(Signal(name
="rows_valid%d" % x
) \
145 for x
in range(ROW_PER_LINE
))
148 TLB_SET_BITS
= log2_int(TLB_SET_SIZE
)
149 TLB_WAY_BITS
= log2_int(TLB_NUM_WAYS
)
150 TLB_EA_TAG_BITS
= 64 - (TLB_LG_PGSZ
+ TLB_SET_BITS
)
151 TLB_TAG_WAY_BITS
= TLB_NUM_WAYS
* TLB_EA_TAG_BITS
153 TLB_PTE_WAY_BITS
= TLB_NUM_WAYS
* TLB_PTE_BITS
;
156 return (1<<log2_int(x
, False)) == x
158 assert (LINE_SIZE
% ROW_SIZE
) == 0, "LINE_SIZE not multiple of ROW_SIZE"
159 assert ispow2(LINE_SIZE
), "LINE_SIZE not power of 2"
160 assert ispow2(NUM_LINES
), "NUM_LINES not power of 2"
161 assert ispow2(ROW_PER_LINE
), "ROW_PER_LINE not power of 2"
162 assert ROW_BITS
== (INDEX_BITS
+ ROW_LINE_BITS
), "geometry bits don't add up"
163 assert (LINE_OFF_BITS
== ROW_OFF_BITS
+ ROW_LINE_BITS
), \
164 "geometry bits don't add up"
165 assert REAL_ADDR_BITS
== (TAG_BITS
+ INDEX_BITS
+ LINE_OFF_BITS
), \
166 "geometry bits don't add up"
167 assert REAL_ADDR_BITS
== (TAG_BITS
+ ROW_BITS
+ ROW_OFF_BITS
), \
168 "geometry bits don't add up"
169 assert 64 == WB_DATA_BITS
, "Can't yet handle wb width that isn't 64-bits"
170 assert SET_SIZE_BITS
<= TLB_LG_PGSZ
, "Set indexed by virtual address"
173 def TLBValidBitsArray():
174 return Array(Signal(TLB_NUM_WAYS
, name
="tlbvalid%d" % x
) \
175 for x
in range(TLB_SET_SIZE
))
178 return Array(Signal(TLB_EA_TAG_BITS
, name
="tlbtagea%d" % x
) \
179 for x
in range (TLB_NUM_WAYS
))
182 return Array(Signal(TLB_TAG_WAY_BITS
, name
="tlbtags%d" % x
) \
183 for x
in range (TLB_SET_SIZE
))
186 return Array(Signal(TLB_PTE_WAY_BITS
, name
="tlbptes%d" % x
) \
187 for x
in range(TLB_SET_SIZE
))
190 return Array(Signal(WAY_BITS
, name
="hitway_%d" % x
) \
191 for x
in range(TLB_NUM_WAYS
))
193 # Cache RAM interface
195 return Array(Signal(WB_DATA_BITS
, name
="cache_out%d" % x
) \
196 for x
in range(NUM_WAYS
))
198 # PLRU output interface
200 return Array(Signal(WAY_BITS
, name
="plru_out%d" % x
) \
201 for x
in range(NUM_LINES
))
203 # TLB PLRU output interface
205 return Array(Signal(TLB_WAY_BITS
, name
="tlbplru_out%d" % x
) \
206 for x
in range(TLB_SET_SIZE
))
208 # Helper functions to decode incoming requests
210 # Return the cache line index (tag index) for an address
212 return addr
[LINE_OFF_BITS
:SET_SIZE_BITS
]
214 # Return the cache row index (data memory) for an address
216 return addr
[ROW_OFF_BITS
:SET_SIZE_BITS
]
218 # Return the index of a row within a line
219 def get_row_of_line(row
):
220 return row
[:ROW_BITS
][:ROW_LINE_BITS
]
222 # Returns whether this is the last row of a line
223 def is_last_row_addr(addr
, last
):
224 return addr
[ROW_OFF_BITS
:LINE_OFF_BITS
] == last
226 # Returns whether this is the last row of a line
227 def is_last_row(row
, last
):
228 return get_row_of_line(row
) == last
230 # Return the next row in the current cache line. We use a
231 # dedicated function in order to limit the size of the
232 # generated adder to be only the bits within a cache line
233 # (3 bits with default settings)
235 row_v
= row
[0:ROW_LINE_BITS
] + 1
236 return Cat(row_v
[:ROW_LINE_BITS
], row
[ROW_LINE_BITS
:])
238 # Get the tag value from the address
240 return addr
[SET_SIZE_BITS
:REAL_ADDR_BITS
]
242 # Read a tag from a tag memory row
243 def read_tag(way
, tagset
):
244 return tagset
.word_select(way
, TAG_WIDTH
)[:TAG_BITS
]
246 # Read a TLB tag from a TLB tag memory row
247 def read_tlb_tag(way
, tags
):
248 return tags
.word_select(way
, TLB_EA_TAG_BITS
)
250 # Write a TLB tag to a TLB tag memory row
251 def write_tlb_tag(way
, tags
, tag
):
252 return read_tlb_tag(way
, tags
).eq(tag
)
254 # Read a PTE from a TLB PTE memory row
255 def read_tlb_pte(way
, ptes
):
256 return ptes
.word_select(way
, TLB_PTE_BITS
)
258 def write_tlb_pte(way
, ptes
, newpte
):
259 return read_tlb_pte(way
, ptes
).eq(newpte
)
262 # Record for storing permission, attribute, etc. bits from a PTE
263 class PermAttr(RecordObject
):
264 def __init__(self
, name
=None):
265 super().__init
__(name
=name
)
266 self
.reference
= Signal()
267 self
.changed
= Signal()
268 self
.nocache
= Signal()
270 self
.rd_perm
= Signal()
271 self
.wr_perm
= Signal()
274 def extract_perm_attr(pte
):
279 # Type of operation on a "valid" input
283 OP_BAD
= 1 # NC cache hit, TLB miss, prot/RC failure
284 OP_STCX_FAIL
= 2 # conditional store w/o reservation
285 OP_LOAD_HIT
= 3 # Cache hit on load
286 OP_LOAD_MISS
= 4 # Load missing cache
287 OP_LOAD_NC
= 5 # Non-cachable load
288 OP_STORE_HIT
= 6 # Store hitting cache
289 OP_STORE_MISS
= 7 # Store missing cache
292 # Cache state machine
295 IDLE
= 0 # Normal load hit processing
296 RELOAD_WAIT_ACK
= 1 # Cache reload wait ack
297 STORE_WAIT_ACK
= 2 # Store wait ack
298 NC_LOAD_WAIT_ACK
= 3 # Non-cachable load wait ack
303 # In order to make timing, we use the BRAMs with
304 # an output buffer, which means that the BRAM
305 # output is delayed by an extra cycle.
307 # Thus, the dcache has a 2-stage internal pipeline
308 # for cache hits with no stalls.
310 # All other operations are handled via stalling
311 # in the first stage.
313 # The second stage can thus complete a hit at the same
314 # time as the first stage emits a stall for a complex op.
316 # Stage 0 register, basically contains just the latched request
318 class RegStage0(RecordObject
):
319 def __init__(self
, name
=None):
320 super().__init
__(name
=name
)
321 self
.req
= LoadStore1ToDCacheType(name
="lsmem")
322 self
.tlbie
= Signal()
323 self
.doall
= Signal()
324 self
.tlbld
= Signal()
325 self
.mmu_req
= Signal() # indicates source of request
328 class MemAccessRequest(RecordObject
):
329 def __init__(self
, name
=None):
330 super().__init
__(name
=name
)
332 self
.valid
= Signal()
334 self
.real_addr
= Signal(REAL_ADDR_BITS
)
335 self
.data
= Signal(64)
336 self
.byte_sel
= Signal(8)
337 self
.hit_way
= Signal(WAY_BITS
)
338 self
.same_tag
= Signal()
339 self
.mmu_req
= Signal()
342 # First stage register, contains state for stage 1 of load hits
343 # and for the state machine used by all other operations
344 class RegStage1(RecordObject
):
345 def __init__(self
, name
=None):
346 super().__init
__(name
=name
)
347 # Info about the request
348 self
.full
= Signal() # have uncompleted request
349 self
.mmu_req
= Signal() # request is from MMU
350 self
.req
= MemAccessRequest(name
="reqmem")
353 self
.hit_way
= Signal(WAY_BITS
)
354 self
.hit_load_valid
= Signal()
355 self
.hit_index
= Signal(INDEX_BITS
)
356 self
.cache_hit
= Signal()
359 self
.tlb_hit
= Signal()
360 self
.tlb_hit_way
= Signal(TLB_NUM_WAYS
)
361 self
.tlb_hit_index
= Signal(TLB_WAY_BITS
)
363 # 2-stage data buffer for data forwarded from writes to reads
364 self
.forward_data1
= Signal(64)
365 self
.forward_data2
= Signal(64)
366 self
.forward_sel1
= Signal(8)
367 self
.forward_valid1
= Signal()
368 self
.forward_way1
= Signal(WAY_BITS
)
369 self
.forward_row1
= Signal(ROW_BITS
)
370 self
.use_forward1
= Signal()
371 self
.forward_sel
= Signal(8)
373 # Cache miss state (reload state machine)
374 self
.state
= Signal(State
)
376 self
.write_bram
= Signal()
377 self
.write_tag
= Signal()
378 self
.slow_valid
= Signal()
379 self
.real_adr
= Signal(REAL_ADDR_BITS
)
380 self
.wb
= WBMasterOut("wb")
381 self
.reload_tag
= Signal(TAG_BITS
)
382 self
.store_way
= Signal(WAY_BITS
)
383 self
.store_row
= Signal(ROW_BITS
)
384 self
.store_index
= Signal(INDEX_BITS
)
385 self
.end_row_ix
= Signal(ROW_LINE_BITS
)
386 self
.rows_valid
= RowPerLineValidArray()
387 self
.acks_pending
= Signal(3)
388 self
.inc_acks
= Signal()
389 self
.dec_acks
= Signal()
391 # Signals to complete (possibly with error)
392 self
.ls_valid
= Signal()
393 self
.ls_error
= Signal()
394 self
.mmu_done
= Signal()
395 self
.mmu_error
= Signal()
396 self
.cache_paradox
= Signal()
398 # Signal to complete a failed stcx.
399 self
.stcx_fail
= Signal()
402 # Reservation information
403 class Reservation(RecordObject
):
406 self
.valid
= Signal()
407 self
.addr
= Signal(64-LINE_OFF_BITS
)
410 class DTLBUpdate(Elaboratable
):
412 self
.tlbie
= Signal()
413 self
.tlbwe
= Signal()
414 self
.doall
= Signal()
415 self
.updated
= Signal()
416 self
.v_updated
= Signal()
417 self
.tlb_hit
= Signal()
418 self
.tlb_req_index
= Signal(TLB_SET_BITS
)
420 self
.tlb_hit_way
= Signal(TLB_WAY_BITS
)
421 self
.tlb_tag_way
= Signal(TLB_TAG_WAY_BITS
)
422 self
.tlb_pte_way
= Signal(TLB_PTE_WAY_BITS
)
423 self
.repl_way
= Signal(TLB_WAY_BITS
)
424 self
.eatag
= Signal(TLB_EA_TAG_BITS
)
425 self
.pte_data
= Signal(TLB_PTE_BITS
)
427 self
.dv
= Signal(TLB_PTE_WAY_BITS
)
429 self
.tb_out
= Signal(TLB_TAG_WAY_BITS
)
430 self
.pb_out
= Signal(TLB_NUM_WAYS
)
431 self
.db_out
= Signal(TLB_PTE_WAY_BITS
)
433 def elaborate(self
, platform
):
438 tagset
= Signal(TLB_TAG_WAY_BITS
)
439 pteset
= Signal(TLB_PTE_WAY_BITS
)
441 tb_out
, pb_out
, db_out
= self
.tb_out
, self
.pb_out
, self
.db_out
443 with m
.If(self
.tlbie
& self
.doall
):
444 pass # clear all back in parent
445 with m
.Elif(self
.tlbie
):
446 with m
.If(self
.tlb_hit
):
447 comb
+= db_out
.eq(self
.dv
)
448 comb
+= db_out
.bit_select(self
.tlb_hit_way
, 1).eq(1)
449 comb
+= self
.v_updated
.eq(1)
451 with m
.Elif(self
.tlbwe
):
453 comb
+= tagset
.eq(self
.tlb_tag_way
)
454 comb
+= write_tlb_tag(self
.repl_way
, tagset
, self
.eatag
)
455 comb
+= tb_out
.eq(tagset
)
457 comb
+= pteset
.eq(self
.tlb_pte_way
)
458 comb
+= write_tlb_pte(self
.repl_way
, pteset
, self
.pte_data
)
459 comb
+= pb_out
.eq(pteset
)
461 comb
+= db_out
.bit_select(self
.repl_way
, 1).eq(1)
463 comb
+= self
.updated
.eq(1)
464 comb
+= self
.v_updated
.eq(1)
469 class DCachePendingHit(Elaboratable
):
471 def __init__(self
, tlb_pte_way
, tlb_valid_way
, tlb_hit_way
,
472 cache_valid_idx
, cache_tag_set
,
477 self
.virt_mode
= Signal()
478 self
.is_hit
= Signal()
479 self
.tlb_hit
= Signal()
480 self
.hit_way
= Signal(WAY_BITS
)
481 self
.rel_match
= Signal()
482 self
.req_index
= Signal(INDEX_BITS
)
483 self
.reload_tag
= Signal(TAG_BITS
)
485 self
.tlb_hit_way
= tlb_hit_way
486 self
.tlb_pte_way
= tlb_pte_way
487 self
.tlb_valid_way
= tlb_valid_way
488 self
.cache_valid_idx
= cache_valid_idx
489 self
.cache_tag_set
= cache_tag_set
490 self
.req_addr
= req_addr
491 self
.hit_set
= hit_set
493 def elaborate(self
, platform
):
499 virt_mode
= self
.virt_mode
501 tlb_pte_way
= self
.tlb_pte_way
502 tlb_valid_way
= self
.tlb_valid_way
503 cache_valid_idx
= self
.cache_valid_idx
504 cache_tag_set
= self
.cache_tag_set
505 req_addr
= self
.req_addr
506 tlb_hit_way
= self
.tlb_hit_way
507 tlb_hit
= self
.tlb_hit
508 hit_set
= self
.hit_set
509 hit_way
= self
.hit_way
510 rel_match
= self
.rel_match
511 req_index
= self
.req_index
512 reload_tag
= self
.reload_tag
514 rel_matches
= Array(Signal(name
="rel_matches_%d" % i
) \
515 for i
in range(TLB_NUM_WAYS
))
516 hit_way_set
= HitWaySet()
518 # Test if pending request is a hit on any way
519 # In order to make timing in virtual mode,
520 # when we are using the TLB, we compare each
521 # way with each of the real addresses from each way of
522 # the TLB, and then decide later which match to use.
524 with m
.If(virt_mode
):
525 for j
in range(TLB_NUM_WAYS
):
526 s_tag
= Signal(TAG_BITS
, name
="s_tag%d" % j
)
528 s_pte
= Signal(TLB_PTE_BITS
)
529 s_ra
= Signal(REAL_ADDR_BITS
)
530 comb
+= s_pte
.eq(read_tlb_pte(j
, tlb_pte_way
))
531 comb
+= s_ra
.eq(Cat(req_addr
[0:TLB_LG_PGSZ
],
532 s_pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]))
533 comb
+= s_tag
.eq(get_tag(s_ra
))
535 for i
in range(NUM_WAYS
):
536 is_tag_hit
= Signal(name
="is_tag_hit_%d_%d" % (j
, i
))
537 comb
+= is_tag_hit
.eq(go
& cache_valid_idx
[i
] &
538 (read_tag(i
, cache_tag_set
) == s_tag
)
540 with m
.If(is_tag_hit
):
541 comb
+= hit_way_set
[j
].eq(i
)
543 comb
+= hit_set
[j
].eq(s_hit
)
544 with m
.If(s_tag
== reload_tag
):
545 comb
+= rel_matches
[j
].eq(1)
547 comb
+= is_hit
.eq(hit_set
[tlb_hit_way
])
548 comb
+= hit_way
.eq(hit_way_set
[tlb_hit_way
])
549 comb
+= rel_match
.eq(rel_matches
[tlb_hit_way
])
551 s_tag
= Signal(TAG_BITS
)
552 comb
+= s_tag
.eq(get_tag(req_addr
))
553 for i
in range(NUM_WAYS
):
554 is_tag_hit
= Signal(name
="is_tag_hit_%d" % i
)
555 comb
+= is_tag_hit
.eq(go
& cache_valid_idx
[i
] &
556 (read_tag(i
, cache_tag_set
) == s_tag
))
557 with m
.If(is_tag_hit
):
558 comb
+= hit_way
.eq(i
)
560 with m
.If(s_tag
== reload_tag
):
561 comb
+= rel_match
.eq(1)
566 class DCache(Elaboratable
):
567 """Set associative dcache write-through
568 TODO (in no specific order):
569 * See list in icache.vhdl
570 * Complete load misses on the cycle when WB data comes instead of
571 at the end of line (this requires dealing with requests coming in
575 self
.d_in
= LoadStore1ToDCacheType("d_in")
576 self
.d_out
= DCacheToLoadStore1Type("d_out")
578 self
.m_in
= MMUToDCacheType("m_in")
579 self
.m_out
= DCacheToMMUType("m_out")
581 self
.stall_out
= Signal()
583 self
.wb_out
= WBMasterOut()
584 self
.wb_in
= WBSlaveOut()
586 self
.log_out
= Signal(20)
588 def stage_0(self
, m
, r0
, r1
, r0_full
):
589 """Latch the request in r0.req as long as we're not stalling
593 d_in
, d_out
, m_in
= self
.d_in
, self
.d_out
, self
.m_in
595 r
= RegStage0("stage0")
597 # TODO, this goes in unit tests and formal proofs
598 with m
.If(d_in
.valid
& m_in
.valid
):
599 sync
+= Display("request collision loadstore vs MMU")
601 with m
.If(m_in
.valid
):
602 sync
+= r
.req
.valid
.eq(1)
603 sync
+= r
.req
.load
.eq(~
(m_in
.tlbie | m_in
.tlbld
))
604 sync
+= r
.req
.dcbz
.eq(0)
605 sync
+= r
.req
.nc
.eq(0)
606 sync
+= r
.req
.reserve
.eq(0)
607 sync
+= r
.req
.virt_mode
.eq(0)
608 sync
+= r
.req
.priv_mode
.eq(1)
609 sync
+= r
.req
.addr
.eq(m_in
.addr
)
610 sync
+= r
.req
.data
.eq(m_in
.pte
)
611 sync
+= r
.req
.byte_sel
.eq(~
0) # Const -1 sets all to 0b111....
612 sync
+= r
.tlbie
.eq(m_in
.tlbie
)
613 sync
+= r
.doall
.eq(m_in
.doall
)
614 sync
+= r
.tlbld
.eq(m_in
.tlbld
)
615 sync
+= r
.mmu_req
.eq(1)
617 sync
+= r
.req
.eq(d_in
)
618 sync
+= r
.tlbie
.eq(0)
619 sync
+= r
.doall
.eq(0)
620 sync
+= r
.tlbld
.eq(0)
621 sync
+= r
.mmu_req
.eq(0)
622 with m
.If(~
(r1
.full
& r0_full
)):
624 sync
+= r0_full
.eq(r
.req
.valid
)
626 def tlb_read(self
, m
, r0_stall
, tlb_valid_way
,
627 tlb_tag_way
, tlb_pte_way
, dtlb_valid_bits
,
628 dtlb_tags
, dtlb_ptes
):
630 Operates in the second cycle on the request latched in r0.req.
631 TLB updates write the entry at the end of the second cycle.
635 m_in
, d_in
= self
.m_in
, self
.d_in
637 index
= Signal(TLB_SET_BITS
)
638 addrbits
= Signal(TLB_SET_BITS
)
641 amax
= TLB_LG_PGSZ
+ TLB_SET_BITS
643 with m
.If(m_in
.valid
):
644 comb
+= addrbits
.eq(m_in
.addr
[amin
: amax
])
646 comb
+= addrbits
.eq(d_in
.addr
[amin
: amax
])
647 comb
+= index
.eq(addrbits
)
649 # If we have any op and the previous op isn't finished,
650 # then keep the same output for next cycle.
651 with m
.If(~r0_stall
):
652 sync
+= tlb_valid_way
.eq(dtlb_valid_bits
[index
])
653 sync
+= tlb_tag_way
.eq(dtlb_tags
[index
])
654 sync
+= tlb_pte_way
.eq(dtlb_ptes
[index
])
656 def maybe_tlb_plrus(self
, m
, r1
, tlb_plru_victim
):
657 """Generate TLB PLRUs
662 if TLB_NUM_WAYS
== 0:
664 for i
in range(TLB_SET_SIZE
):
666 tlb_plru
= PLRU(TLB_WAY_BITS
)
667 setattr(m
.submodules
, "maybe_plru_%d" % i
, tlb_plru
)
668 tlb_plru_acc_en
= Signal()
670 comb
+= tlb_plru_acc_en
.eq(r1
.tlb_hit
& (r1
.tlb_hit_index
== i
))
671 comb
+= tlb_plru
.acc_en
.eq(tlb_plru_acc_en
)
672 comb
+= tlb_plru
.acc_i
.eq(r1
.tlb_hit_way
)
673 comb
+= tlb_plru_victim
[i
].eq(tlb_plru
.lru_o
)
675 def tlb_search(self
, m
, tlb_req_index
, r0
, r0_valid
,
676 tlb_valid_way
, tlb_tag_way
, tlb_hit_way
,
677 tlb_pte_way
, pte
, tlb_hit
, valid_ra
, perm_attr
, ra
):
682 hitway
= Signal(TLB_WAY_BITS
)
684 eatag
= Signal(TLB_EA_TAG_BITS
)
686 TLB_LG_END
= TLB_LG_PGSZ
+ TLB_SET_BITS
687 comb
+= tlb_req_index
.eq(r0
.req
.addr
[TLB_LG_PGSZ
: TLB_LG_END
])
688 comb
+= eatag
.eq(r0
.req
.addr
[TLB_LG_END
: 64 ])
690 for i
in range(TLB_NUM_WAYS
):
691 is_tag_hit
= Signal()
692 comb
+= is_tag_hit
.eq(tlb_valid_way
[i
]
693 & (read_tlb_tag(i
, tlb_tag_way
) == eatag
))
694 with m
.If(is_tag_hit
):
698 comb
+= tlb_hit
.eq(hit
& r0_valid
)
699 comb
+= tlb_hit_way
.eq(hitway
)
702 comb
+= pte
.eq(read_tlb_pte(hitway
, tlb_pte_way
))
705 comb
+= valid_ra
.eq(tlb_hit | ~r0
.req
.virt_mode
)
706 with m
.If(r0
.req
.virt_mode
):
707 comb
+= ra
.eq(Cat(Const(0, ROW_OFF_BITS
),
708 r0
.req
.addr
[ROW_OFF_BITS
:TLB_LG_PGSZ
],
709 pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]))
710 comb
+= perm_attr
.reference
.eq(pte
[8])
711 comb
+= perm_attr
.changed
.eq(pte
[7])
712 comb
+= perm_attr
.nocache
.eq(pte
[5])
713 comb
+= perm_attr
.priv
.eq(pte
[3])
714 comb
+= perm_attr
.rd_perm
.eq(pte
[2])
715 comb
+= perm_attr
.wr_perm
.eq(pte
[1])
717 comb
+= ra
.eq(Cat(Const(0, ROW_OFF_BITS
),
718 r0
.req
.addr
[ROW_OFF_BITS
:REAL_ADDR_BITS
]))
720 comb
+= perm_attr
.reference
.eq(1)
721 comb
+= perm_attr
.changed
.eq(1)
722 comb
+= perm_attr
.nocache
.eq(0)
723 comb
+= perm_attr
.priv
.eq(1)
724 comb
+= perm_attr
.rd_perm
.eq(1)
725 comb
+= perm_attr
.wr_perm
.eq(1)
727 def tlb_update(self
, m
, r0_valid
, r0
, dtlb_valid_bits
, tlb_req_index
,
728 tlb_hit_way
, tlb_hit
, tlb_plru_victim
, tlb_tag_way
,
729 dtlb_tags
, tlb_pte_way
, dtlb_ptes
):
737 comb
+= tlbie
.eq(r0_valid
& r0
.tlbie
)
738 comb
+= tlbwe
.eq(r0_valid
& r0
.tlbld
)
740 m
.submodules
.tlb_update
= d
= DTLBUpdate()
741 with m
.If(tlbie
& r0
.doall
):
742 # clear all valid bits at once
743 for i
in range(TLB_SET_SIZE
):
744 sync
+= dtlb_valid_bits
[i
].eq(0)
745 with m
.If(d
.updated
):
746 sync
+= dtlb_tags
[tlb_req_index
].eq(d
.tb_out
)
747 sync
+= dtlb_ptes
[tlb_req_index
].eq(d
.pb_out
)
748 with m
.If(d
.v_updated
):
749 sync
+= dtlb_valid_bits
[tlb_req_index
].eq(d
.db_out
)
751 comb
+= d
.dv
.eq(dtlb_valid_bits
[tlb_req_index
])
753 comb
+= d
.tlbie
.eq(tlbie
)
754 comb
+= d
.tlbwe
.eq(tlbwe
)
755 comb
+= d
.doall
.eq(r0
.doall
)
756 comb
+= d
.tlb_hit
.eq(tlb_hit
)
757 comb
+= d
.tlb_hit_way
.eq(tlb_hit_way
)
758 comb
+= d
.tlb_tag_way
.eq(tlb_tag_way
)
759 comb
+= d
.tlb_pte_way
.eq(tlb_pte_way
)
760 comb
+= d
.tlb_req_index
.eq(tlb_req_index
)
763 comb
+= d
.repl_way
.eq(tlb_hit_way
)
765 comb
+= d
.repl_way
.eq(tlb_plru_victim
[tlb_req_index
])
766 comb
+= d
.eatag
.eq(r0
.req
.addr
[TLB_LG_PGSZ
+ TLB_SET_BITS
:64])
767 comb
+= d
.pte_data
.eq(r0
.req
.data
)
769 def maybe_plrus(self
, m
, r1
, plru_victim
):
775 if TLB_NUM_WAYS
== 0:
778 for i
in range(NUM_LINES
):
780 plru
= PLRU(WAY_BITS
)
781 setattr(m
.submodules
, "plru%d" % i
, plru
)
782 plru_acc_en
= Signal()
784 comb
+= plru_acc_en
.eq(r1
.cache_hit
& (r1
.hit_index
== i
))
785 comb
+= plru
.acc_en
.eq(plru_acc_en
)
786 comb
+= plru
.acc_i
.eq(r1
.hit_way
)
787 comb
+= plru_victim
[i
].eq(plru
.lru_o
)
789 def cache_tag_read(self
, m
, r0_stall
, req_index
, cache_tag_set
, cache_tags
):
790 """Cache tag RAM read port
794 m_in
, d_in
= self
.m_in
, self
.d_in
796 index
= Signal(INDEX_BITS
)
799 comb
+= index
.eq(req_index
)
800 with m
.Elif(m_in
.valid
):
801 comb
+= index
.eq(get_index(m_in
.addr
))
803 comb
+= index
.eq(get_index(d_in
.addr
))
804 sync
+= cache_tag_set
.eq(cache_tags
[index
])
806 def dcache_request(self
, m
, r0
, ra
, req_index
, req_row
, req_tag
,
807 r0_valid
, r1
, cache_valids
, replace_way
,
808 use_forward1_next
, use_forward2_next
,
809 req_hit_way
, plru_victim
, rc_ok
, perm_attr
,
810 valid_ra
, perm_ok
, access_ok
, req_op
, req_go
,
812 tlb_hit
, tlb_hit_way
, tlb_valid_way
, cache_tag_set
,
813 cancel_store
, req_same_tag
, r0_stall
, early_req_row
):
814 """Cache request parsing and hit detection
819 m_in
, d_in
= self
.m_in
, self
.d_in
822 hit_way
= Signal(WAY_BITS
)
827 hit_set
= Array(Signal(name
="hit_set_%d" % i
) \
828 for i
in range(TLB_NUM_WAYS
))
829 cache_valid_idx
= Signal(NUM_WAYS
)
831 # Extract line, row and tag from request
832 comb
+= req_index
.eq(get_index(r0
.req
.addr
))
833 comb
+= req_row
.eq(get_row(r0
.req
.addr
))
834 comb
+= req_tag
.eq(get_tag(ra
))
836 if False: # display on comb is a bit... busy.
837 comb
+= Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
838 r0
.req
.addr
, ra
, req_index
, req_tag
, req_row
)
840 comb
+= go
.eq(r0_valid
& ~
(r0
.tlbie | r0
.tlbld
) & ~r1
.ls_error
)
841 comb
+= cache_valid_idx
.eq(cache_valids
[req_index
])
843 m
.submodules
.dcache_pend
= dc
= DCachePendingHit(tlb_pte_way
,
844 tlb_valid_way
, tlb_hit_way
,
845 cache_valid_idx
, cache_tag_set
,
849 comb
+= dc
.tlb_hit
.eq(tlb_hit
)
850 comb
+= dc
.reload_tag
.eq(r1
.reload_tag
)
851 comb
+= dc
.virt_mode
.eq(r0
.req
.virt_mode
)
853 comb
+= dc
.req_index
.eq(req_index
)
854 comb
+= is_hit
.eq(dc
.is_hit
)
855 comb
+= hit_way
.eq(dc
.hit_way
)
856 comb
+= req_same_tag
.eq(dc
.rel_match
)
858 # See if the request matches the line currently being reloaded
859 with m
.If((r1
.state
== State
.RELOAD_WAIT_ACK
) &
860 (req_index
== r1
.store_index
) & req_same_tag
):
861 # For a store, consider this a hit even if the row isn't
862 # valid since it will be by the time we perform the store.
863 # For a load, check the appropriate row valid bit.
864 rrow
= Signal(ROW_LINE_BITS
)
865 comb
+= rrow
.eq(req_row
)
866 valid
= r1
.rows_valid
[rrow
]
867 comb
+= is_hit
.eq(~r0
.req
.load | valid
)
868 comb
+= hit_way
.eq(replace_way
)
870 # Whether to use forwarded data for a load or not
871 with m
.If((get_row(r1
.req
.real_addr
) == req_row
) &
872 (r1
.req
.hit_way
== hit_way
)):
873 # Only need to consider r1.write_bram here, since if we
874 # are writing refill data here, then we don't have a
875 # cache hit this cycle on the line being refilled.
876 # (There is the possibility that the load following the
877 # load miss that started the refill could be to the old
878 # contents of the victim line, since it is a couple of
879 # cycles after the refill starts before we see the updated
880 # cache tag. In that case we don't use the bypass.)
881 comb
+= use_forward1_next
.eq(r1
.write_bram
)
882 with m
.If((r1
.forward_row1
== req_row
) & (r1
.forward_way1
== hit_way
)):
883 comb
+= use_forward2_next
.eq(r1
.forward_valid1
)
885 # The way that matched on a hit
886 comb
+= req_hit_way
.eq(hit_way
)
888 # The way to replace on a miss
889 with m
.If(r1
.write_tag
):
890 comb
+= replace_way
.eq(plru_victim
[r1
.store_index
])
892 comb
+= replace_way
.eq(r1
.store_way
)
894 # work out whether we have permission for this access
895 # NB we don't yet implement AMR, thus no KUAP
896 comb
+= rc_ok
.eq(perm_attr
.reference
897 & (r0
.req
.load | perm_attr
.changed
)
899 comb
+= perm_ok
.eq((r0
.req
.priv_mode | ~perm_attr
.priv
) &
901 (r0
.req
.load
& perm_attr
.rd_perm
)))
902 comb
+= access_ok
.eq(valid_ra
& perm_ok
& rc_ok
)
903 # Combine the request and cache hit status to decide what
904 # operation needs to be done
905 comb
+= nc
.eq(r0
.req
.nc | perm_attr
.nocache
)
906 comb
+= op
.eq(Op
.OP_NONE
)
908 with m
.If(~access_ok
):
909 comb
+= op
.eq(Op
.OP_BAD
)
910 with m
.Elif(cancel_store
):
911 comb
+= op
.eq(Op
.OP_STCX_FAIL
)
913 comb
+= opsel
.eq(Cat(is_hit
, nc
, r0
.req
.load
))
914 with m
.Switch(opsel
):
915 with m
.Case(0b101): comb
+= op
.eq(Op
.OP_LOAD_HIT
)
916 with m
.Case(0b100): comb
+= op
.eq(Op
.OP_LOAD_MISS
)
917 with m
.Case(0b110): comb
+= op
.eq(Op
.OP_LOAD_NC
)
918 with m
.Case(0b001): comb
+= op
.eq(Op
.OP_STORE_HIT
)
919 with m
.Case(0b000): comb
+= op
.eq(Op
.OP_STORE_MISS
)
920 with m
.Case(0b010): comb
+= op
.eq(Op
.OP_STORE_MISS
)
921 with m
.Case(0b011): comb
+= op
.eq(Op
.OP_BAD
)
922 with m
.Case(0b111): comb
+= op
.eq(Op
.OP_BAD
)
923 comb
+= req_op
.eq(op
)
924 comb
+= req_go
.eq(go
)
926 # Version of the row number that is valid one cycle earlier
927 # in the cases where we need to read the cache data BRAM.
928 # If we're stalling then we need to keep reading the last
930 with m
.If(~r0_stall
):
931 with m
.If(m_in
.valid
):
932 comb
+= early_req_row
.eq(get_row(m_in
.addr
))
934 comb
+= early_req_row
.eq(get_row(d_in
.addr
))
936 comb
+= early_req_row
.eq(req_row
)
938 def reservation_comb(self
, m
, cancel_store
, set_rsrv
, clear_rsrv
,
939 r0_valid
, r0
, reservation
):
940 """Handle load-with-reservation and store-conditional instructions
945 with m
.If(r0_valid
& r0
.req
.reserve
):
946 # XXX generate alignment interrupt if address
947 # is not aligned XXX or if r0.req.nc = '1'
948 with m
.If(r0
.req
.load
):
949 comb
+= set_rsrv
.eq(1) # load with reservation
951 comb
+= clear_rsrv
.eq(1) # store conditional
952 with m
.If(~reservation
.valid |
953 (r0
.req
.addr
[LINE_OFF_BITS
:64] != reservation
.addr
)):
954 comb
+= cancel_store
.eq(1)
956 def reservation_reg(self
, m
, r0_valid
, access_ok
, set_rsrv
, clear_rsrv
,
962 with m
.If(r0_valid
& access_ok
):
963 with m
.If(clear_rsrv
):
964 sync
+= reservation
.valid
.eq(0)
965 with m
.Elif(set_rsrv
):
966 sync
+= reservation
.valid
.eq(1)
967 sync
+= reservation
.addr
.eq(r0
.req
.addr
[LINE_OFF_BITS
:64])
969 def writeback_control(self
, m
, r1
, cache_out_row
):
970 """Return data for loads & completion control logic
974 d_out
, m_out
= self
.d_out
, self
.m_out
976 data_out
= Signal(64)
977 data_fwd
= Signal(64)
979 # Use the bypass if are reading the row that was
980 # written 1 or 2 cycles ago, including for the
981 # slow_valid = 1 case (i.e. completing a load
982 # miss or a non-cacheable load).
983 with m
.If(r1
.use_forward1
):
984 comb
+= data_fwd
.eq(r1
.forward_data1
)
986 comb
+= data_fwd
.eq(r1
.forward_data2
)
988 comb
+= data_out
.eq(cache_out_row
)
991 with m
.If(r1
.forward_sel
[i
]):
992 dsel
= data_fwd
.word_select(i
, 8)
993 comb
+= data_out
.word_select(i
, 8).eq(dsel
)
995 comb
+= d_out
.valid
.eq(r1
.ls_valid
)
996 comb
+= d_out
.data
.eq(data_out
)
997 comb
+= d_out
.store_done
.eq(~r1
.stcx_fail
)
998 comb
+= d_out
.error
.eq(r1
.ls_error
)
999 comb
+= d_out
.cache_paradox
.eq(r1
.cache_paradox
)
1002 comb
+= m_out
.done
.eq(r1
.mmu_done
)
1003 comb
+= m_out
.err
.eq(r1
.mmu_error
)
1004 comb
+= m_out
.data
.eq(data_out
)
1006 # We have a valid load or store hit or we just completed
1007 # a slow op such as a load miss, a NC load or a store
1009 # Note: the load hit is delayed by one cycle. However it
1010 # can still not collide with r.slow_valid (well unless I
1011 # miscalculated) because slow_valid can only be set on a
1012 # subsequent request and not on its first cycle (the state
1013 # machine must have advanced), which makes slow_valid
1014 # at least 2 cycles from the previous hit_load_valid.
1016 # Sanity: Only one of these must be set in any given cycle
1018 if False: # TODO: need Display to get this to work
1019 assert (r1
.slow_valid
& r1
.stcx_fail
) != 1, \
1020 "unexpected slow_valid collision with stcx_fail"
1022 assert ((r1
.slow_valid | r1
.stcx_fail
) | r1
.hit_load_valid
) != 1, \
1023 "unexpected hit_load_delayed collision with slow_valid"
1025 with m
.If(~r1
.mmu_req
):
1026 # Request came from loadstore1...
1027 # Load hit case is the standard path
1028 with m
.If(r1
.hit_load_valid
):
1029 sync
+= Display("completing load hit data=%x", data_out
)
1031 # error cases complete without stalling
1032 with m
.If(r1
.ls_error
):
1033 sync
+= Display("completing ld/st with error")
1035 # Slow ops (load miss, NC, stores)
1036 with m
.If(r1
.slow_valid
):
1037 sync
+= Display("completing store or load miss data=%x",
1041 # Request came from MMU
1042 with m
.If(r1
.hit_load_valid
):
1043 sync
+= Display("completing load hit to MMU, data=%x",
1045 # error cases complete without stalling
1046 with m
.If(r1
.mmu_error
):
1047 sync
+= Display("combpleting MMU ld with error")
1049 # Slow ops (i.e. load miss)
1050 with m
.If(r1
.slow_valid
):
1051 sync
+= Display("completing MMU load miss, data=%x",
1054 def rams(self
, m
, r1
, early_req_row
, cache_out_row
, replace_way
):
1056 Generate a cache RAM for each way. This handles the normal
1057 reads, writes from reloads and the special store-hit update
1060 Note: the BRAMs have an extra read buffer, meaning the output
1061 is pipelined an extra cycle. This differs from the
1062 icache. The writeback logic needs to take that into
1063 account by using 1-cycle delayed signals for load hits.
1068 for i
in range(NUM_WAYS
):
1069 do_read
= Signal(name
="do_rd%d" % i
)
1070 rd_addr
= Signal(ROW_BITS
)
1071 do_write
= Signal(name
="do_wr%d" % i
)
1072 wr_addr
= Signal(ROW_BITS
)
1073 wr_data
= Signal(WB_DATA_BITS
)
1074 wr_sel
= Signal(ROW_SIZE
)
1075 wr_sel_m
= Signal(ROW_SIZE
)
1076 _d_out
= Signal(WB_DATA_BITS
, name
="dout_%d" % i
)
1078 way
= CacheRam(ROW_BITS
, WB_DATA_BITS
, ADD_BUF
=True)
1079 setattr(m
.submodules
, "cacheram_%d" % i
, way
)
1081 comb
+= way
.rd_en
.eq(do_read
)
1082 comb
+= way
.rd_addr
.eq(rd_addr
)
1083 comb
+= _d_out
.eq(way
.rd_data_o
)
1084 comb
+= way
.wr_sel
.eq(wr_sel_m
)
1085 comb
+= way
.wr_addr
.eq(wr_addr
)
1086 comb
+= way
.wr_data
.eq(wr_data
)
1089 comb
+= do_read
.eq(1)
1090 comb
+= rd_addr
.eq(early_req_row
[:ROW_BITS
])
1091 with m
.If(r1
.hit_way
== i
):
1092 comb
+= cache_out_row
.eq(_d_out
)
1096 # Defaults to wishbone read responses (cache refill)
1098 # For timing, the mux on wr_data/sel/addr is not
1099 # dependent on anything other than the current state.
1101 with m
.If(r1
.write_bram
):
1102 # Write store data to BRAM. This happens one
1103 # cycle after the store is in r0.
1104 comb
+= wr_data
.eq(r1
.req
.data
)
1105 comb
+= wr_sel
.eq(r1
.req
.byte_sel
)
1106 comb
+= wr_addr
.eq(get_row(r1
.req
.real_addr
))
1108 with m
.If(i
== r1
.req
.hit_way
):
1109 comb
+= do_write
.eq(1)
1111 # Otherwise, we might be doing a reload or a DCBZ
1113 comb
+= wr_data
.eq(0)
1115 comb
+= wr_data
.eq(wb_in
.dat
)
1116 comb
+= wr_addr
.eq(r1
.store_row
)
1117 comb
+= wr_sel
.eq(~
0) # all 1s
1119 with m
.If((r1
.state
== State
.RELOAD_WAIT_ACK
)
1120 & wb_in
.ack
& (replace_way
== i
)):
1121 comb
+= do_write
.eq(1)
1123 # Mask write selects with do_write since BRAM
1124 # doesn't have a global write-enable
1125 with m
.If(do_write
):
1126 comb
+= wr_sel_m
.eq(wr_sel
)
1128 # Cache hit synchronous machine for the easy case.
1129 # This handles load hits.
1130 # It also handles error cases (TLB miss, cache paradox)
1131 def dcache_fast_hit(self
, m
, req_op
, r0_valid
, r0
, r1
,
1132 req_hit_way
, req_index
, req_tag
, access_ok
,
1133 tlb_hit
, tlb_hit_way
, tlb_req_index
):
1138 with m
.If(req_op
!= Op
.OP_NONE
):
1139 sync
+= Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1140 req_op
, r0
.req
.addr
, r0
.req
.nc
,
1141 req_index
, req_tag
, req_hit_way
)
1143 with m
.If(r0_valid
):
1144 sync
+= r1
.mmu_req
.eq(r0
.mmu_req
)
1146 # Fast path for load/store hits.
1147 # Set signals for the writeback controls.
1148 sync
+= r1
.hit_way
.eq(req_hit_way
)
1149 sync
+= r1
.hit_index
.eq(req_index
)
1151 with m
.If(req_op
== Op
.OP_LOAD_HIT
):
1152 sync
+= r1
.hit_load_valid
.eq(1)
1154 sync
+= r1
.hit_load_valid
.eq(0)
1156 with m
.If((req_op
== Op
.OP_LOAD_HIT
) |
(req_op
== Op
.OP_STORE_HIT
)):
1157 sync
+= r1
.cache_hit
.eq(1)
1159 sync
+= r1
.cache_hit
.eq(0)
1161 with m
.If(req_op
== Op
.OP_BAD
):
1162 # Display(f"Signalling ld/st error valid_ra={valid_ra}"
1163 # f"rc_ok={rc_ok} perm_ok={perm_ok}"
1164 sync
+= r1
.ls_error
.eq(~r0
.mmu_req
)
1165 sync
+= r1
.mmu_error
.eq(r0
.mmu_req
)
1166 sync
+= r1
.cache_paradox
.eq(access_ok
)
1169 sync
+= r1
.ls_error
.eq(0)
1170 sync
+= r1
.mmu_error
.eq(0)
1171 sync
+= r1
.cache_paradox
.eq(0)
1173 with m
.If(req_op
== Op
.OP_STCX_FAIL
):
1176 sync
+= r1
.stcx_fail
.eq(0)
1178 # Record TLB hit information for updating TLB PLRU
1179 sync
+= r1
.tlb_hit
.eq(tlb_hit
)
1180 sync
+= r1
.tlb_hit_way
.eq(tlb_hit_way
)
1181 sync
+= r1
.tlb_hit_index
.eq(tlb_req_index
)
1183 # Memory accesses are handled by this state machine:
1185 # * Cache load miss/reload (in conjunction with "rams")
1186 # * Load hits for non-cachable forms
1187 # * Stores (the collision case is handled in "rams")
1189 # All wishbone requests generation is done here.
1190 # This machine operates at stage 1.
1191 def dcache_slow(self
, m
, r1
, use_forward1_next
, use_forward2_next
,
1192 cache_valids
, r0
, replace_way
,
1193 req_hit_way
, req_same_tag
,
1194 r0_valid
, req_op
, cache_tags
, req_go
, ra
):
1200 req
= MemAccessRequest("mreq_ds")
1202 adjust_acks
= Signal(3)
1204 req_row
= Signal(ROW_BITS
)
1205 req_idx
= Signal(INDEX_BITS
)
1206 req_tag
= Signal(TAG_BITS
)
1207 comb
+= req_idx
.eq(get_index(req
.real_addr
))
1208 comb
+= req_row
.eq(get_row(req
.real_addr
))
1209 comb
+= req_tag
.eq(get_tag(req
.real_addr
))
1211 sync
+= r1
.use_forward1
.eq(use_forward1_next
)
1212 sync
+= r1
.forward_sel
.eq(0)
1214 with m
.If(use_forward1_next
):
1215 sync
+= r1
.forward_sel
.eq(r1
.req
.byte_sel
)
1216 with m
.Elif(use_forward2_next
):
1217 sync
+= r1
.forward_sel
.eq(r1
.forward_sel1
)
1219 sync
+= r1
.forward_data2
.eq(r1
.forward_data1
)
1220 with m
.If(r1
.write_bram
):
1221 sync
+= r1
.forward_data1
.eq(r1
.req
.data
)
1222 sync
+= r1
.forward_sel1
.eq(r1
.req
.byte_sel
)
1223 sync
+= r1
.forward_way1
.eq(r1
.req
.hit_way
)
1224 sync
+= r1
.forward_row1
.eq(get_row(r1
.req
.real_addr
))
1225 sync
+= r1
.forward_valid1
.eq(1)
1228 sync
+= r1
.forward_data1
.eq(0)
1230 sync
+= r1
.forward_data1
.eq(wb_in
.dat
)
1231 sync
+= r1
.forward_sel1
.eq(~
0) # all 1s
1232 sync
+= r1
.forward_way1
.eq(replace_way
)
1233 sync
+= r1
.forward_row1
.eq(r1
.store_row
)
1234 sync
+= r1
.forward_valid1
.eq(0)
1236 # One cycle pulses reset
1237 sync
+= r1
.slow_valid
.eq(0)
1238 sync
+= r1
.write_bram
.eq(0)
1239 sync
+= r1
.inc_acks
.eq(0)
1240 sync
+= r1
.dec_acks
.eq(0)
1242 sync
+= r1
.ls_valid
.eq(0)
1243 # complete tlbies and TLB loads in the third cycle
1244 sync
+= r1
.mmu_done
.eq(r0_valid
& (r0
.tlbie | r0
.tlbld
))
1246 with m
.If((req_op
== Op
.OP_LOAD_HIT
)
1247 |
(req_op
== Op
.OP_STCX_FAIL
)):
1248 with m
.If(~r0
.mmu_req
):
1249 sync
+= r1
.ls_valid
.eq(1)
1251 sync
+= r1
.mmu_done
.eq(1)
1253 with m
.If(r1
.write_tag
):
1254 # Store new tag in selected way
1255 for i
in range(NUM_WAYS
):
1256 with m
.If(i
== replace_way
):
1257 ct
= Signal(TAG_RAM_WIDTH
)
1258 comb
+= ct
.eq(cache_tags
[r1
.store_index
])
1259 comb
+= ct
.word_select(i
, TAG_WIDTH
).eq(r1
.reload_tag
)
1260 sync
+= cache_tags
[r1
.store_index
].eq(ct
)
1261 sync
+= r1
.store_way
.eq(replace_way
)
1262 sync
+= r1
.write_tag
.eq(0)
1264 # Take request from r1.req if there is one there,
1265 # else from req_op, ra, etc.
1267 comb
+= req
.eq(r1
.req
)
1269 comb
+= req
.op
.eq(req_op
)
1270 comb
+= req
.valid
.eq(req_go
)
1271 comb
+= req
.mmu_req
.eq(r0
.mmu_req
)
1272 comb
+= req
.dcbz
.eq(r0
.req
.dcbz
)
1273 comb
+= req
.real_addr
.eq(ra
)
1275 with m
.If(~r0
.req
.dcbz
):
1276 comb
+= req
.data
.eq(r0
.req
.data
)
1278 comb
+= req
.data
.eq(0)
1280 # Select all bytes for dcbz
1281 # and for cacheable loads
1282 with m
.If(r0
.req
.dcbz |
(r0
.req
.load
& ~r0
.req
.nc
)):
1283 comb
+= req
.byte_sel
.eq(~
0) # all 1s
1285 comb
+= req
.byte_sel
.eq(r0
.req
.byte_sel
)
1286 comb
+= req
.hit_way
.eq(req_hit_way
)
1287 comb
+= req
.same_tag
.eq(req_same_tag
)
1289 # Store the incoming request from r0,
1290 # if it is a slow request
1291 # Note that r1.full = 1 implies req_op = OP_NONE
1292 with m
.If((req_op
== Op
.OP_LOAD_MISS
)
1293 |
(req_op
== Op
.OP_LOAD_NC
)
1294 |
(req_op
== Op
.OP_STORE_MISS
)
1295 |
(req_op
== Op
.OP_STORE_HIT
)):
1296 sync
+= r1
.req
.eq(req
)
1297 sync
+= r1
.full
.eq(1)
1299 # Main state machine
1300 with m
.Switch(r1
.state
):
1302 with m
.Case(State
.IDLE
):
1303 sync
+= r1
.real_adr
.eq(req
.real_addr
)
1304 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
1305 sync
+= r1
.wb
.dat
.eq(req
.data
)
1306 sync
+= r1
.dcbz
.eq(req
.dcbz
)
1308 # Keep track of our index and way
1309 # for subsequent stores.
1310 sync
+= r1
.store_index
.eq(req_idx
)
1311 sync
+= r1
.store_row
.eq(req_row
)
1312 sync
+= r1
.end_row_ix
.eq(get_row_of_line(req_row
))
1313 sync
+= r1
.reload_tag
.eq(req_tag
)
1314 sync
+= r1
.req
.same_tag
.eq(1)
1316 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1317 sync
+= r1
.store_way
.eq(req
.hit_way
)
1319 # Reset per-row valid bits,
1320 # ready for handling OP_LOAD_MISS
1321 for i
in range(ROW_PER_LINE
):
1322 sync
+= r1
.rows_valid
[i
].eq(0)
1324 with m
.If(req_op
!= Op
.OP_NONE
):
1325 sync
+= Display("cache op %d", req
.op
)
1327 with m
.Switch(req
.op
):
1328 with m
.Case(Op
.OP_LOAD_HIT
):
1329 # stay in IDLE state
1332 with m
.Case(Op
.OP_LOAD_MISS
):
1333 sync
+= Display("cache miss real addr: %x " \
1335 req
.real_addr
, req_row
, req_tag
)
1337 # Start the wishbone cycle
1338 sync
+= r1
.wb
.we
.eq(0)
1339 sync
+= r1
.wb
.cyc
.eq(1)
1340 sync
+= r1
.wb
.stb
.eq(1)
1342 # Track that we had one request sent
1343 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
1344 sync
+= r1
.write_tag
.eq(1)
1346 with m
.Case(Op
.OP_LOAD_NC
):
1347 sync
+= r1
.wb
.cyc
.eq(1)
1348 sync
+= r1
.wb
.stb
.eq(1)
1349 sync
+= r1
.wb
.we
.eq(0)
1350 sync
+= r1
.state
.eq(State
.NC_LOAD_WAIT_ACK
)
1352 with m
.Case(Op
.OP_STORE_HIT
, Op
.OP_STORE_MISS
):
1353 with m
.If(~req
.dcbz
):
1354 sync
+= r1
.state
.eq(State
.STORE_WAIT_ACK
)
1355 sync
+= r1
.acks_pending
.eq(1)
1356 sync
+= r1
.full
.eq(0)
1357 sync
+= r1
.slow_valid
.eq(1)
1359 with m
.If(~req
.mmu_req
):
1360 sync
+= r1
.ls_valid
.eq(1)
1362 sync
+= r1
.mmu_done
.eq(1)
1364 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1365 sync
+= r1
.write_bram
.eq(1)
1367 # dcbz is handled much like a load miss except
1368 # that we are writing to memory instead of reading
1369 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
1371 with m
.If(req
.op
== Op
.OP_STORE_MISS
):
1372 sync
+= r1
.write_tag
.eq(1)
1374 sync
+= r1
.wb
.we
.eq(1)
1375 sync
+= r1
.wb
.cyc
.eq(1)
1376 sync
+= r1
.wb
.stb
.eq(1)
1378 # OP_NONE and OP_BAD do nothing
1379 # OP_BAD & OP_STCX_FAIL were
1380 # handled above already
1381 with m
.Case(Op
.OP_NONE
):
1383 with m
.Case(Op
.OP_BAD
):
1385 with m
.Case(Op
.OP_STCX_FAIL
):
1388 with m
.Case(State
.RELOAD_WAIT_ACK
):
1389 ld_stbs_done
= Signal()
1390 # Requests are all sent if stb is 0
1391 comb
+= ld_stbs_done
.eq(~r1
.wb
.stb
)
1393 with m
.If((~wb_in
.stall
) & r1
.wb
.stb
):
1394 # That was the last word?
1395 # We are done sending.
1396 # Clear stb and set ld_stbs_done
1397 # so we can handle an eventual
1398 # last ack on the same cycle.
1399 with m
.If(is_last_row_addr(r1
.real_adr
, r1
.end_row_ix
)):
1400 sync
+= r1
.wb
.stb
.eq(0)
1401 comb
+= ld_stbs_done
.eq(1)
1403 # Calculate the next row address in the current cache line
1404 row
= Signal(LINE_OFF_BITS
-ROW_OFF_BITS
)
1405 comb
+= row
.eq(r1
.real_adr
[ROW_OFF_BITS
:])
1406 sync
+= r1
.real_adr
[ROW_OFF_BITS
:LINE_OFF_BITS
].eq(row
+1)
1408 # Incoming acks processing
1409 sync
+= r1
.forward_valid1
.eq(wb_in
.ack
)
1410 with m
.If(wb_in
.ack
):
1411 srow
= Signal(ROW_LINE_BITS
)
1412 comb
+= srow
.eq(r1
.store_row
)
1413 sync
+= r1
.rows_valid
[srow
].eq(1)
1415 # If this is the data we were looking for,
1416 # we can complete the request next cycle.
1417 # Compare the whole address in case the
1418 # request in r1.req is not the one that
1419 # started this refill.
1420 with m
.If(r1
.full
& r1
.req
.same_tag
&
1421 ((r1
.dcbz
& r1
.req
.dcbz
) |
1422 (~r1
.dcbz
& (r1
.req
.op
== Op
.OP_LOAD_MISS
))) &
1423 (r1
.store_row
== get_row(r1
.req
.real_addr
))):
1424 sync
+= r1
.full
.eq(0)
1425 sync
+= r1
.slow_valid
.eq(1)
1426 with m
.If(~r1
.mmu_req
):
1427 sync
+= r1
.ls_valid
.eq(1)
1429 sync
+= r1
.mmu_done
.eq(1)
1430 sync
+= r1
.forward_sel
.eq(~
0) # all 1s
1431 sync
+= r1
.use_forward1
.eq(1)
1433 # Check for completion
1434 with m
.If(ld_stbs_done
& is_last_row(r1
.store_row
,
1436 # Complete wishbone cycle
1437 sync
+= r1
.wb
.cyc
.eq(0)
1439 # Cache line is now valid
1440 cv
= Signal(INDEX_BITS
)
1441 comb
+= cv
.eq(cache_valids
[r1
.store_index
])
1442 comb
+= cv
.bit_select(r1
.store_way
, 1).eq(1)
1443 sync
+= cache_valids
[r1
.store_index
].eq(cv
)
1444 sync
+= r1
.state
.eq(State
.IDLE
)
1446 # Increment store row counter
1447 sync
+= r1
.store_row
.eq(next_row(r1
.store_row
))
1449 with m
.Case(State
.STORE_WAIT_ACK
):
1450 st_stbs_done
= Signal()
1451 comb
+= st_stbs_done
.eq(~r1
.wb
.stb
)
1452 comb
+= acks
.eq(r1
.acks_pending
)
1454 with m
.If(r1
.inc_acks
!= r1
.dec_acks
):
1455 with m
.If(r1
.inc_acks
):
1456 comb
+= adjust_acks
.eq(acks
+ 1)
1458 comb
+= adjust_acks
.eq(acks
- 1)
1460 comb
+= adjust_acks
.eq(acks
)
1462 sync
+= r1
.acks_pending
.eq(adjust_acks
)
1464 # Clear stb when slave accepted request
1465 with m
.If(~wb_in
.stall
):
1466 # See if there is another store waiting
1467 # to be done which is in the same real page.
1468 with m
.If(req
.valid
):
1469 ra
= req
.real_addr
[0:SET_SIZE_BITS
]
1470 sync
+= r1
.real_adr
[0:SET_SIZE_BITS
].eq(ra
)
1471 sync
+= r1
.wb
.dat
.eq(req
.data
)
1472 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
1474 with m
.Elif((adjust_acks
< 7) & req
.same_tag
&
1475 ((req
.op
== Op
.OP_STORE_MISS
)
1476 |
(req
.op
== Op
.OP_STORE_HIT
))):
1477 sync
+= r1
.wb
.stb
.eq(1)
1478 comb
+= st_stbs_done
.eq(0)
1480 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1481 sync
+= r1
.write_bram
.eq(1)
1482 sync
+= r1
.full
.eq(0)
1483 sync
+= r1
.slow_valid
.eq(1)
1485 # Store requests never come from the MMU
1486 sync
+= r1
.ls_valid
.eq(1)
1487 comb
+= st_stbs_done
.eq(0)
1488 sync
+= r1
.inc_acks
.eq(1)
1490 sync
+= r1
.wb
.stb
.eq(0)
1491 comb
+= st_stbs_done
.eq(1)
1493 # Got ack ? See if complete.
1494 with m
.If(wb_in
.ack
):
1495 with m
.If(st_stbs_done
& (adjust_acks
== 1)):
1496 sync
+= r1
.state
.eq(State
.IDLE
)
1497 sync
+= r1
.wb
.cyc
.eq(0)
1498 sync
+= r1
.wb
.stb
.eq(0)
1499 sync
+= r1
.dec_acks
.eq(1)
1501 with m
.Case(State
.NC_LOAD_WAIT_ACK
):
1502 # Clear stb when slave accepted request
1503 with m
.If(~wb_in
.stall
):
1504 sync
+= r1
.wb
.stb
.eq(0)
1506 # Got ack ? complete.
1507 with m
.If(wb_in
.ack
):
1508 sync
+= r1
.state
.eq(State
.IDLE
)
1509 sync
+= r1
.full
.eq(0)
1510 sync
+= r1
.slow_valid
.eq(1)
1512 with m
.If(~r1
.mmu_req
):
1513 sync
+= r1
.ls_valid
.eq(1)
1515 sync
+= r1
.mmu_done
.eq(1)
1517 sync
+= r1
.forward_sel
.eq(~
0) # all 1s
1518 sync
+= r1
.use_forward1
.eq(1)
1519 sync
+= r1
.wb
.cyc
.eq(0)
1520 sync
+= r1
.wb
.stb
.eq(0)
1522 def dcache_log(self
, m
, r1
, valid_ra
, tlb_hit_way
, stall_out
):
1525 d_out
, wb_in
, log_out
= self
.d_out
, self
.wb_in
, self
.log_out
1527 sync
+= log_out
.eq(Cat(r1
.state
[:3], valid_ra
, tlb_hit_way
[:3],
1528 stall_out
, req_op
[:3], d_out
.valid
, d_out
.error
,
1529 r1
.wb
.cyc
, r1
.wb
.stb
, wb_in
.ack
, wb_in
.stall
,
1532 def elaborate(self
, platform
):
1537 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1538 cache_tags
= CacheTagArray()
1539 cache_tag_set
= Signal(TAG_RAM_WIDTH
)
1540 cache_valids
= CacheValidBitsArray()
1542 # TODO attribute ram_style : string;
1543 # TODO attribute ram_style of cache_tags : signal is "distributed";
1545 """note: these are passed to nmigen.hdl.Memory as "attributes".
1546 don't know how, just that they are.
1548 dtlb_valid_bits
= TLBValidBitsArray()
1549 dtlb_tags
= TLBTagsArray()
1550 dtlb_ptes
= TLBPtesArray()
1551 # TODO attribute ram_style of
1552 # dtlb_tags : signal is "distributed";
1553 # TODO attribute ram_style of
1554 # dtlb_ptes : signal is "distributed";
1556 r0
= RegStage0("r0")
1559 r1
= RegStage1("r1")
1561 reservation
= Reservation()
1563 # Async signals on incoming request
1564 req_index
= Signal(INDEX_BITS
)
1565 req_row
= Signal(ROW_BITS
)
1566 req_hit_way
= Signal(WAY_BITS
)
1567 req_tag
= Signal(TAG_BITS
)
1569 req_data
= Signal(64)
1570 req_same_tag
= Signal()
1573 early_req_row
= Signal(ROW_BITS
)
1575 cancel_store
= Signal()
1577 clear_rsrv
= Signal()
1582 use_forward1_next
= Signal()
1583 use_forward2_next
= Signal()
1585 cache_out_row
= Signal(WB_DATA_BITS
)
1587 plru_victim
= PLRUOut()
1588 replace_way
= Signal(WAY_BITS
)
1590 # Wishbone read/write/cache write formatting signals
1594 tlb_tag_way
= Signal(TLB_TAG_WAY_BITS
)
1595 tlb_pte_way
= Signal(TLB_PTE_WAY_BITS
)
1596 tlb_valid_way
= Signal(TLB_NUM_WAYS
)
1597 tlb_req_index
= Signal(TLB_SET_BITS
)
1599 tlb_hit_way
= Signal(TLB_WAY_BITS
)
1600 pte
= Signal(TLB_PTE_BITS
)
1601 ra
= Signal(REAL_ADDR_BITS
)
1603 perm_attr
= PermAttr("dc_perms")
1606 access_ok
= Signal()
1608 tlb_plru_victim
= TLBPLRUOut()
1610 # we don't yet handle collisions between loadstore1 requests
1612 comb
+= self
.m_out
.stall
.eq(0)
1614 # Hold off the request in r0 when r1 has an uncompleted request
1615 comb
+= r0_stall
.eq(r0_full
& r1
.full
)
1616 comb
+= r0_valid
.eq(r0_full
& ~r1
.full
)
1617 comb
+= self
.stall_out
.eq(r0_stall
)
1619 # Wire up wishbone request latch out of stage 1
1620 comb
+= r1
.wb
.adr
.eq(r1
.real_adr
[ROW_OFF_BITS
:]) # truncate LSBs
1621 comb
+= self
.wb_out
.eq(r1
.wb
)
1623 # call sub-functions putting everything together, using shared
1624 # signals established above
1625 self
.stage_0(m
, r0
, r1
, r0_full
)
1626 self
.tlb_read(m
, r0_stall
, tlb_valid_way
,
1627 tlb_tag_way
, tlb_pte_way
, dtlb_valid_bits
,
1628 dtlb_tags
, dtlb_ptes
)
1629 self
.tlb_search(m
, tlb_req_index
, r0
, r0_valid
,
1630 tlb_valid_way
, tlb_tag_way
, tlb_hit_way
,
1631 tlb_pte_way
, pte
, tlb_hit
, valid_ra
, perm_attr
, ra
)
1632 self
.tlb_update(m
, r0_valid
, r0
, dtlb_valid_bits
, tlb_req_index
,
1633 tlb_hit_way
, tlb_hit
, tlb_plru_victim
, tlb_tag_way
,
1634 dtlb_tags
, tlb_pte_way
, dtlb_ptes
)
1635 self
.maybe_plrus(m
, r1
, plru_victim
)
1636 self
.maybe_tlb_plrus(m
, r1
, tlb_plru_victim
)
1637 self
.cache_tag_read(m
, r0_stall
, req_index
, cache_tag_set
, cache_tags
)
1638 self
.dcache_request(m
, r0
, ra
, req_index
, req_row
, req_tag
,
1639 r0_valid
, r1
, cache_valids
, replace_way
,
1640 use_forward1_next
, use_forward2_next
,
1641 req_hit_way
, plru_victim
, rc_ok
, perm_attr
,
1642 valid_ra
, perm_ok
, access_ok
, req_op
, req_go
,
1644 tlb_hit
, tlb_hit_way
, tlb_valid_way
, cache_tag_set
,
1645 cancel_store
, req_same_tag
, r0_stall
, early_req_row
)
1646 self
.reservation_comb(m
, cancel_store
, set_rsrv
, clear_rsrv
,
1647 r0_valid
, r0
, reservation
)
1648 self
.reservation_reg(m
, r0_valid
, access_ok
, set_rsrv
, clear_rsrv
,
1650 self
.writeback_control(m
, r1
, cache_out_row
)
1651 self
.rams(m
, r1
, early_req_row
, cache_out_row
, replace_way
)
1652 self
.dcache_fast_hit(m
, req_op
, r0_valid
, r0
, r1
,
1653 req_hit_way
, req_index
, req_tag
, access_ok
,
1654 tlb_hit
, tlb_hit_way
, tlb_req_index
)
1655 self
.dcache_slow(m
, r1
, use_forward1_next
, use_forward2_next
,
1656 cache_valids
, r0
, replace_way
,
1657 req_hit_way
, req_same_tag
,
1658 r0_valid
, req_op
, cache_tags
, req_go
, ra
)
1659 #self.dcache_log(m, r1, valid_ra, tlb_hit_way, stall_out)
1663 def dcache_load(dut
, addr
, nc
=0):
1664 yield dut
.d_in
.load
.eq(1)
1665 yield dut
.d_in
.nc
.eq(nc
)
1666 yield dut
.d_in
.addr
.eq(addr
)
1667 yield dut
.d_in
.byte_sel
.eq(~
0)
1668 yield dut
.d_in
.valid
.eq(1)
1670 yield dut
.d_in
.valid
.eq(0)
1671 yield dut
.d_in
.byte_sel
.eq(0)
1673 while not (yield dut
.d_out
.valid
):
1675 data
= yield dut
.d_out
.data
1679 def dcache_store(dut
, addr
, data
, nc
=0):
1680 yield dut
.d_in
.load
.eq(0)
1681 yield dut
.d_in
.nc
.eq(nc
)
1682 yield dut
.d_in
.data
.eq(data
)
1683 yield dut
.d_in
.byte_sel
.eq(~
0)
1684 yield dut
.d_in
.addr
.eq(addr
)
1685 yield dut
.d_in
.valid
.eq(1)
1687 yield dut
.d_in
.valid
.eq(0)
1688 yield dut
.d_in
.byte_sel
.eq(0)
1690 while not (yield dut
.d_out
.valid
):
1694 def dcache_random_sim(dut
):
1696 # start with stack of zeros
1700 yield dut
.d_in
.valid
.eq(0)
1701 yield dut
.d_in
.load
.eq(0)
1702 yield dut
.d_in
.priv_mode
.eq(1)
1703 yield dut
.d_in
.nc
.eq(0)
1704 yield dut
.d_in
.addr
.eq(0)
1705 yield dut
.d_in
.data
.eq(0)
1706 yield dut
.m_in
.valid
.eq(0)
1707 yield dut
.m_in
.addr
.eq(0)
1708 yield dut
.m_in
.pte
.eq(0)
1709 # wait 4 * clk_period
1717 for i
in range(256):
1718 addr
= randint(0, 255)
1719 data
= randint(0, (1<<64)-1)
1720 sim_mem
[addr
] = data
1723 print ("testing %x data %x" % (addr
, data
))
1725 yield from dcache_load(dut
, addr
)
1726 yield from dcache_store(dut
, addr
, data
)
1728 addr
= randint(0, 255)
1729 sim_data
= sim_mem
[addr
]
1732 data
= yield from dcache_load(dut
, addr
)
1733 assert data
== sim_data
, \
1734 "check %x data %x != %x" % (addr
, data
, sim_data
)
1736 for addr
in range(256):
1737 data
= yield from dcache_load(dut
, addr
*8)
1738 assert data
== sim_mem
[addr
], \
1739 "final check %x data %x != %x" % (addr
*8, data
, sim_mem
[addr
])
1741 def dcache_sim(dut
):
1743 yield dut
.d_in
.valid
.eq(0)
1744 yield dut
.d_in
.load
.eq(0)
1745 yield dut
.d_in
.priv_mode
.eq(1)
1746 yield dut
.d_in
.nc
.eq(0)
1747 yield dut
.d_in
.addr
.eq(0)
1748 yield dut
.d_in
.data
.eq(0)
1749 yield dut
.m_in
.valid
.eq(0)
1750 yield dut
.m_in
.addr
.eq(0)
1751 yield dut
.m_in
.pte
.eq(0)
1752 # wait 4 * clk_period
1758 # Cacheable read of address 4
1759 data
= yield from dcache_load(dut
, 0x58)
1760 addr
= yield dut
.d_in
.addr
1761 assert data
== 0x0000001700000016, \
1762 f
"data @%x=%x expected 0x0000001700000016" % (addr
, data
)
1764 # Cacheable read of address 20
1765 data
= yield from dcache_load(dut
, 0x20)
1766 addr
= yield dut
.d_in
.addr
1767 assert data
== 0x0000000900000008, \
1768 f
"data @%x=%x expected 0x0000000900000008" % (addr
, data
)
1770 # Cacheable read of address 30
1771 data
= yield from dcache_load(dut
, 0x530)
1772 addr
= yield dut
.d_in
.addr
1773 assert data
== 0x0000014D0000014C, \
1774 f
"data @%x=%x expected 0000014D0000014C" % (addr
, data
)
1776 # 2nd Cacheable read of address 30
1777 data
= yield from dcache_load(dut
, 0x530)
1778 addr
= yield dut
.d_in
.addr
1779 assert data
== 0x0000014D0000014C, \
1780 f
"data @%x=%x expected 0000014D0000014C" % (addr
, data
)
1782 # Non-cacheable read of address 100
1783 data
= yield from dcache_load(dut
, 0x100, nc
=1)
1784 addr
= yield dut
.d_in
.addr
1785 assert data
== 0x0000004100000040, \
1786 f
"data @%x=%x expected 0000004100000040" % (addr
, data
)
1788 # Store at address 530
1789 yield from dcache_store(dut
, 0x530, 0x121)
1791 # Store at address 30
1792 yield from dcache_store(dut
, 0x530, 0x12345678)
1794 # 3nd Cacheable read of address 530
1795 data
= yield from dcache_load(dut
, 0x530)
1796 addr
= yield dut
.d_in
.addr
1797 assert data
== 0x12345678, \
1798 f
"data @%x=%x expected 0x12345678" % (addr
, data
)
1800 # 4th Cacheable read of address 20
1801 data
= yield from dcache_load(dut
, 0x20)
1802 addr
= yield dut
.d_in
.addr
1803 assert data
== 0x0000000900000008, \
1804 f
"data @%x=%x expected 0x0000000900000008" % (addr
, data
)
1812 def test_dcache(mem
, test_fn
, test_name
):
1815 memory
= Memory(width
=64, depth
=16*64, init
=mem
)
1816 sram
= SRAM(memory
=memory
, granularity
=8)
1819 m
.submodules
.dcache
= dut
1820 m
.submodules
.sram
= sram
1822 m
.d
.comb
+= sram
.bus
.cyc
.eq(dut
.wb_out
.cyc
)
1823 m
.d
.comb
+= sram
.bus
.stb
.eq(dut
.wb_out
.stb
)
1824 m
.d
.comb
+= sram
.bus
.we
.eq(dut
.wb_out
.we
)
1825 m
.d
.comb
+= sram
.bus
.sel
.eq(dut
.wb_out
.sel
)
1826 m
.d
.comb
+= sram
.bus
.adr
.eq(dut
.wb_out
.adr
)
1827 m
.d
.comb
+= sram
.bus
.dat_w
.eq(dut
.wb_out
.dat
)
1829 m
.d
.comb
+= dut
.wb_in
.ack
.eq(sram
.bus
.ack
)
1830 m
.d
.comb
+= dut
.wb_in
.dat
.eq(sram
.bus
.dat_r
)
1836 sim
.add_sync_process(wrap(test_fn(dut
)))
1837 with sim
.write_vcd('test_dcache%s.vcd' % test_name
):
1840 if __name__
== '__main__':
1842 vl
= rtlil
.convert(dut
, ports
=[])
1843 with
open("test_dcache.il", "w") as f
:
1847 for i
in range(0,512):
1848 mem
.append((i
*2)|
((i
*2+1)<<32))
1850 test_dcache(mem
, dcache_sim
, "")
1851 test_dcache(None, dcache_random_sim
, "random")