3 based on Anton Blanchard microwatt dcache.vhdl
7 from enum
import Enum
, unique
9 from nmigen
import Module
, Signal
, Elaboratable
, Cat
, Repl
, Array
, Const
11 from nmigen
.hdl
.ast
import Display
16 from random
import randint
18 from nmigen
.cli
import main
19 from nmutil
.iocontrol
import RecordObject
20 from nmutil
.util
import wrap
21 from nmigen
.utils
import log2_int
22 from soc
.experiment
.mem_types
import (LoadStore1ToDCacheType
,
23 DCacheToLoadStore1Type
,
27 from soc
.experiment
.wb_types
import (WB_ADDR_BITS
, WB_DATA_BITS
, WB_SEL_BITS
,
28 WBAddrType
, WBDataType
, WBSelType
,
29 WBMasterOut
, WBSlaveOut
,
30 WBMasterOutVector
, WBSlaveOutVector
,
31 WBIOMasterOut
, WBIOSlaveOut
)
33 from soc
.experiment
.cache_ram
import CacheRam
34 from soc
.experiment
.plru
import PLRU
37 from nmigen_soc
.wishbone
.sram
import SRAM
38 from nmigen
import Memory
39 from nmigen
.cli
import rtlil
41 from nmigen
.back
.pysim
import Simulator
, Delay
, Settle
43 from nmigen
.sim
.cxxsim
import Simulator
, Delay
, Settle
46 # TODO: make these parameters of DCache at some point
47 LINE_SIZE
= 64 # Line size in bytes
48 NUM_LINES
= 16 # Number of lines in a set
49 NUM_WAYS
= 4 # Number of ways
50 TLB_SET_SIZE
= 64 # L1 DTLB entries per set
51 TLB_NUM_WAYS
= 2 # L1 DTLB number of sets
52 TLB_LG_PGSZ
= 12 # L1 DTLB log_2(page_size)
53 LOG_LENGTH
= 0 # Non-zero to enable log data collection
55 # BRAM organisation: We never access more than
56 # -- WB_DATA_BITS at a time so to save
57 # -- resources we make the array only that wide, and
58 # -- use consecutive indices for to make a cache "line"
60 # -- ROW_SIZE is the width in bytes of the BRAM
61 # -- (based on WB, so 64-bits)
62 ROW_SIZE
= WB_DATA_BITS
// 8;
64 # ROW_PER_LINE is the number of row (wishbone
65 # transactions) in a line
66 ROW_PER_LINE
= LINE_SIZE
// ROW_SIZE
68 # BRAM_ROWS is the number of rows in BRAM needed
69 # to represent the full dcache
70 BRAM_ROWS
= NUM_LINES
* ROW_PER_LINE
73 # Bit fields counts in the address
75 # REAL_ADDR_BITS is the number of real address
79 # ROW_BITS is the number of bits to select a row
80 ROW_BITS
= log2_int(BRAM_ROWS
)
82 # ROW_LINE_BITS is the number of bits to select
84 ROW_LINE_BITS
= log2_int(ROW_PER_LINE
)
86 # LINE_OFF_BITS is the number of bits for
87 # the offset in a cache line
88 LINE_OFF_BITS
= log2_int(LINE_SIZE
)
90 # ROW_OFF_BITS is the number of bits for
92 ROW_OFF_BITS
= log2_int(ROW_SIZE
)
94 # INDEX_BITS is the number if bits to
96 INDEX_BITS
= log2_int(NUM_LINES
)
98 # SET_SIZE_BITS is the log base 2 of the set size
99 SET_SIZE_BITS
= LINE_OFF_BITS
+ INDEX_BITS
101 # TAG_BITS is the number of bits of
102 # the tag part of the address
103 TAG_BITS
= REAL_ADDR_BITS
- SET_SIZE_BITS
105 # TAG_WIDTH is the width in bits of each way of the tag RAM
106 TAG_WIDTH
= TAG_BITS
+ 7 - ((TAG_BITS
+ 7) % 8)
108 # WAY_BITS is the number of bits to select a way
109 WAY_BITS
= log2_int(NUM_WAYS
)
111 # Example of layout for 32 lines of 64 bytes:
113 .. tag |index| line |
115 .. | |---| | ROW_LINE_BITS (3)
116 .. | |--- - --| LINE_OFF_BITS (6)
117 .. | |- --| ROW_OFF_BITS (3)
118 .. |----- ---| | ROW_BITS (8)
119 .. |-----| | INDEX_BITS (5)
120 .. --------| | TAG_BITS (45)
123 print ("Dcache TAG %d IDX %d ROW %d ROFF %d LOFF %d RLB %d" % \
124 (TAG_BITS
, INDEX_BITS
, ROW_BITS
,
125 ROW_OFF_BITS
, LINE_OFF_BITS
, ROW_LINE_BITS
))
126 print ("index @: %d-%d" % (LINE_OFF_BITS
, SET_SIZE_BITS
))
127 print ("row @: %d-%d" % (LINE_OFF_BITS
, ROW_OFF_BITS
))
128 print ("tag @: %d-%d width %d" % (SET_SIZE_BITS
, REAL_ADDR_BITS
, TAG_WIDTH
))
130 TAG_RAM_WIDTH
= TAG_WIDTH
* NUM_WAYS
133 return Array(Signal(TAG_RAM_WIDTH
, name
="cachetag_%d" % x
) \
134 for x
in range(NUM_LINES
))
136 def CacheValidBitsArray():
137 return Array(Signal(INDEX_BITS
, name
="cachevalid_%d" % x
) \
138 for x
in range(NUM_LINES
))
140 def RowPerLineValidArray():
141 return Array(Signal(name
="rows_valid%d" % x
) \
142 for x
in range(ROW_PER_LINE
))
145 TLB_SET_BITS
= log2_int(TLB_SET_SIZE
)
146 TLB_WAY_BITS
= log2_int(TLB_NUM_WAYS
)
147 TLB_EA_TAG_BITS
= 64 - (TLB_LG_PGSZ
+ TLB_SET_BITS
)
148 TLB_TAG_WAY_BITS
= TLB_NUM_WAYS
* TLB_EA_TAG_BITS
150 TLB_PTE_WAY_BITS
= TLB_NUM_WAYS
* TLB_PTE_BITS
;
152 assert (LINE_SIZE
% ROW_SIZE
) == 0, "LINE_SIZE not multiple of ROW_SIZE"
153 assert (LINE_SIZE
% 2) == 0, "LINE_SIZE not power of 2"
154 assert (NUM_LINES
% 2) == 0, "NUM_LINES not power of 2"
155 assert (ROW_PER_LINE
% 2) == 0, "ROW_PER_LINE not power of 2"
156 assert ROW_BITS
== (INDEX_BITS
+ ROW_LINE_BITS
), "geometry bits don't add up"
157 assert (LINE_OFF_BITS
== ROW_OFF_BITS
+ ROW_LINE_BITS
), \
158 "geometry bits don't add up"
159 assert REAL_ADDR_BITS
== (TAG_BITS
+ INDEX_BITS
+ LINE_OFF_BITS
), \
160 "geometry bits don't add up"
161 assert REAL_ADDR_BITS
== (TAG_BITS
+ ROW_BITS
+ ROW_OFF_BITS
), \
162 "geometry bits don't add up"
163 assert 64 == WB_DATA_BITS
, "Can't yet handle wb width that isn't 64-bits"
164 assert SET_SIZE_BITS
<= TLB_LG_PGSZ
, "Set indexed by virtual address"
167 def TLBValidBitsArray():
168 return Array(Signal(TLB_NUM_WAYS
) for x
in range(TLB_SET_SIZE
))
171 return Array(Signal(TLB_EA_TAG_BITS
) for x
in range (TLB_NUM_WAYS
))
174 return Array(Signal(TLB_TAG_WAY_BITS
) for x
in range (TLB_SET_SIZE
))
177 return Array(Signal(TLB_PTE_WAY_BITS
) for x
in range(TLB_SET_SIZE
))
180 return Array(Signal(WAY_BITS
, name
="hitway_%d" % x
) \
181 for x
in range(TLB_NUM_WAYS
))
183 # Cache RAM interface
185 return Array(Signal(WB_DATA_BITS
, name
="cache_out%d" % x
) \
186 for x
in range(NUM_WAYS
))
188 # PLRU output interface
190 return Array(Signal(WAY_BITS
) for x
in range(NUM_LINES
))
192 # TLB PLRU output interface
194 return Array(Signal(TLB_WAY_BITS
) for x
in range(TLB_SET_SIZE
))
196 # Helper functions to decode incoming requests
198 # Return the cache line index (tag index) for an address
200 return addr
[LINE_OFF_BITS
:SET_SIZE_BITS
]
202 # Return the cache row index (data memory) for an address
204 return addr
[ROW_OFF_BITS
:SET_SIZE_BITS
]
206 # Return the index of a row within a line
207 def get_row_of_line(row
):
208 return row
[:ROW_BITS
][:ROW_LINE_BITS
]
210 # Returns whether this is the last row of a line
211 def is_last_row_addr(addr
, last
):
212 return addr
[ROW_OFF_BITS
:LINE_OFF_BITS
] == last
214 # Returns whether this is the last row of a line
215 def is_last_row(row
, last
):
216 return get_row_of_line(row
) == last
218 # Return the next row in the current cache line. We use a
219 # dedicated function in order to limit the size of the
220 # generated adder to be only the bits within a cache line
221 # (3 bits with default settings)
223 row_v
= row
[0:ROW_LINE_BITS
] + 1
224 return Cat(row_v
[:ROW_LINE_BITS
], row
[ROW_LINE_BITS
:])
226 # Get the tag value from the address
228 return addr
[SET_SIZE_BITS
:REAL_ADDR_BITS
]
230 # Read a tag from a tag memory row
231 def read_tag(way
, tagset
):
232 return tagset
.word_select(way
, TAG_WIDTH
)[:TAG_BITS
]
234 # Read a TLB tag from a TLB tag memory row
235 def read_tlb_tag(way
, tags
):
236 return tags
.word_select(way
, TLB_EA_TAG_BITS
)
238 # Write a TLB tag to a TLB tag memory row
239 def write_tlb_tag(way
, tags
, tag
):
240 return read_tlb_tag(way
, tags
).eq(tag
)
242 # Read a PTE from a TLB PTE memory row
243 def read_tlb_pte(way
, ptes
):
244 return ptes
.word_select(way
, TLB_PTE_BITS
)
246 def write_tlb_pte(way
, ptes
, newpte
):
247 return read_tlb_pte(way
, ptes
).eq(newpte
)
250 # Record for storing permission, attribute, etc. bits from a PTE
251 class PermAttr(RecordObject
):
252 def __init__(self
, name
=None):
253 super().__init
__(name
=name
)
254 self
.reference
= Signal()
255 self
.changed
= Signal()
256 self
.nocache
= Signal()
258 self
.rd_perm
= Signal()
259 self
.wr_perm
= Signal()
262 def extract_perm_attr(pte
):
264 pa
.reference
= pte
[8]
273 # Type of operation on a "valid" input
277 OP_BAD
= 1 # NC cache hit, TLB miss, prot/RC failure
278 OP_STCX_FAIL
= 2 # conditional store w/o reservation
279 OP_LOAD_HIT
= 3 # Cache hit on load
280 OP_LOAD_MISS
= 4 # Load missing cache
281 OP_LOAD_NC
= 5 # Non-cachable load
282 OP_STORE_HIT
= 6 # Store hitting cache
283 OP_STORE_MISS
= 7 # Store missing cache
286 # Cache state machine
289 IDLE
= 0 # Normal load hit processing
290 RELOAD_WAIT_ACK
= 1 # Cache reload wait ack
291 STORE_WAIT_ACK
= 2 # Store wait ack
292 NC_LOAD_WAIT_ACK
= 3 # Non-cachable load wait ack
297 # In order to make timing, we use the BRAMs with
298 # an output buffer, which means that the BRAM
299 # output is delayed by an extra cycle.
301 # Thus, the dcache has a 2-stage internal pipeline
302 # for cache hits with no stalls.
304 # All other operations are handled via stalling
305 # in the first stage.
307 # The second stage can thus complete a hit at the same
308 # time as the first stage emits a stall for a complex op.
310 # Stage 0 register, basically contains just the latched request
312 class RegStage0(RecordObject
):
313 def __init__(self
, name
=None):
314 super().__init
__(name
=name
)
315 self
.req
= LoadStore1ToDCacheType(name
="lsmem")
316 self
.tlbie
= Signal()
317 self
.doall
= Signal()
318 self
.tlbld
= Signal()
319 self
.mmu_req
= Signal() # indicates source of request
322 class MemAccessRequest(RecordObject
):
323 def __init__(self
, name
=None):
324 super().__init
__(name
=name
)
326 self
.valid
= Signal()
328 self
.real_addr
= Signal(REAL_ADDR_BITS
)
329 self
.data
= Signal(64)
330 self
.byte_sel
= Signal(8)
331 self
.hit_way
= Signal(WAY_BITS
)
332 self
.same_tag
= Signal()
333 self
.mmu_req
= Signal()
336 # First stage register, contains state for stage 1 of load hits
337 # and for the state machine used by all other operations
338 class RegStage1(RecordObject
):
339 def __init__(self
, name
=None):
340 super().__init
__(name
=name
)
341 # Info about the request
342 self
.full
= Signal() # have uncompleted request
343 self
.mmu_req
= Signal() # request is from MMU
344 self
.req
= MemAccessRequest(name
="reqmem")
347 self
.hit_way
= Signal(WAY_BITS
)
348 self
.hit_load_valid
= Signal()
349 self
.hit_index
= Signal(INDEX_BITS
)
350 self
.cache_hit
= Signal()
353 self
.tlb_hit
= Signal()
354 self
.tlb_hit_way
= Signal(TLB_NUM_WAYS
)
355 self
.tlb_hit_index
= Signal(TLB_WAY_BITS
)
357 # 2-stage data buffer for data forwarded from writes to reads
358 self
.forward_data1
= Signal(64)
359 self
.forward_data2
= Signal(64)
360 self
.forward_sel1
= Signal(8)
361 self
.forward_valid1
= Signal()
362 self
.forward_way1
= Signal(WAY_BITS
)
363 self
.forward_row1
= Signal(ROW_BITS
)
364 self
.use_forward1
= Signal()
365 self
.forward_sel
= Signal(8)
367 # Cache miss state (reload state machine)
368 self
.state
= Signal(State
)
370 self
.write_bram
= Signal()
371 self
.write_tag
= Signal()
372 self
.slow_valid
= Signal()
373 self
.wb
= WBMasterOut("wb")
374 self
.reload_tag
= Signal(TAG_BITS
)
375 self
.store_way
= Signal(WAY_BITS
)
376 self
.store_row
= Signal(ROW_BITS
)
377 self
.store_index
= Signal(INDEX_BITS
)
378 self
.end_row_ix
= Signal(ROW_LINE_BITS
)
379 self
.rows_valid
= RowPerLineValidArray()
380 self
.acks_pending
= Signal(3)
381 self
.inc_acks
= Signal()
382 self
.dec_acks
= Signal()
384 # Signals to complete (possibly with error)
385 self
.ls_valid
= Signal()
386 self
.ls_error
= Signal()
387 self
.mmu_done
= Signal()
388 self
.mmu_error
= Signal()
389 self
.cache_paradox
= Signal()
391 # Signal to complete a failed stcx.
392 self
.stcx_fail
= Signal()
395 # Reservation information
396 class Reservation(RecordObject
):
399 self
.valid
= Signal()
400 self
.addr
= Signal(64-LINE_OFF_BITS
)
403 class DTLBUpdate(Elaboratable
):
405 self
.tlbie
= Signal()
406 self
.tlbwe
= Signal()
407 self
.doall
= Signal()
408 self
.updated
= Signal()
409 self
.v_updated
= Signal()
410 self
.tlb_hit
= Signal()
411 self
.tlb_req_index
= Signal(TLB_SET_BITS
)
413 self
.tlb_hit_way
= Signal(TLB_WAY_BITS
)
414 self
.tlb_tag_way
= Signal(TLB_TAG_WAY_BITS
)
415 self
.tlb_pte_way
= Signal(TLB_PTE_WAY_BITS
)
416 self
.repl_way
= Signal(TLB_WAY_BITS
)
417 self
.eatag
= Signal(TLB_EA_TAG_BITS
)
418 self
.pte_data
= Signal(TLB_PTE_BITS
)
420 self
.dv
= Signal(TLB_PTE_WAY_BITS
)
422 self
.tb_out
= Signal(TLB_TAG_WAY_BITS
)
423 self
.pb_out
= Signal(TLB_NUM_WAYS
)
424 self
.db_out
= Signal(TLB_PTE_WAY_BITS
)
426 def elaborate(self
, platform
):
431 tagset
= Signal(TLB_TAG_WAY_BITS
)
432 pteset
= Signal(TLB_PTE_WAY_BITS
)
434 tb_out
, pb_out
, db_out
= self
.tb_out
, self
.pb_out
, self
.db_out
436 with m
.If(self
.tlbie
& self
.doall
):
437 pass # clear all back in parent
438 with m
.Elif(self
.tlbie
):
439 with m
.If(self
.tlb_hit
):
440 comb
+= db_out
.eq(self
.dv
)
441 comb
+= db_out
.bit_select(self
.tlb_hit_way
, 1).eq(1)
442 comb
+= self
.v_updated
.eq(1)
444 with m
.Elif(self
.tlbwe
):
446 comb
+= tagset
.eq(self
.tlb_tag_way
)
447 comb
+= write_tlb_tag(self
.repl_way
, tagset
, self
.eatag
)
448 comb
+= tb_out
.eq(tagset
)
450 comb
+= pteset
.eq(self
.tlb_pte_way
)
451 comb
+= write_tlb_pte(self
.repl_way
, pteset
, self
.pte_data
)
452 comb
+= pb_out
.eq(pteset
)
454 comb
+= db_out
.bit_select(self
.repl_way
, 1).eq(1)
456 comb
+= self
.updated
.eq(1)
457 comb
+= self
.v_updated
.eq(1)
461 def dcache_request(self
, m
, r0
, ra
, req_index
, req_row
, req_tag
,
462 r0_valid
, r1
, cache_valid_bits
, replace_way
,
463 use_forward1_next
, use_forward2_next
,
464 req_hit_way
, plru_victim
, rc_ok
, perm_attr
,
465 valid_ra
, perm_ok
, access_ok
, req_op
, req_go
,
467 tlb_hit
, tlb_hit_way
, tlb_valid_way
, cache_tag_set
,
468 cancel_store
, req_same_tag
, r0_stall
, early_req_row
):
469 """Cache request parsing and hit detection
472 class DCachePendingHit(Elaboratable
):
474 def __init__(self
, tlb_pte_way
, tlb_valid_way
, tlb_hit_way
,
475 cache_valid_idx
, cache_tag_set
,
480 self
.virt_mode
= Signal()
481 self
.is_hit
= Signal()
482 self
.tlb_hit
= Signal()
483 self
.hit_way
= Signal(WAY_BITS
)
484 self
.rel_match
= Signal()
485 self
.req_index
= Signal(INDEX_BITS
)
486 self
.reload_tag
= Signal(TAG_BITS
)
488 self
.tlb_hit_way
= tlb_hit_way
489 self
.tlb_pte_way
= tlb_pte_way
490 self
.tlb_valid_way
= tlb_valid_way
491 self
.cache_valid_idx
= cache_valid_idx
492 self
.cache_tag_set
= cache_tag_set
493 self
.req_addr
= req_addr
494 self
.hit_set
= hit_set
496 def elaborate(self
, platform
):
502 virt_mode
= self
.virt_mode
504 tlb_pte_way
= self
.tlb_pte_way
505 tlb_valid_way
= self
.tlb_valid_way
506 cache_valid_idx
= self
.cache_valid_idx
507 cache_tag_set
= self
.cache_tag_set
508 req_addr
= self
.req_addr
509 tlb_hit_way
= self
.tlb_hit_way
510 tlb_hit
= self
.tlb_hit
511 hit_set
= self
.hit_set
512 hit_way
= self
.hit_way
513 rel_match
= self
.rel_match
514 req_index
= self
.req_index
515 reload_tag
= self
.reload_tag
517 rel_matches
= Array(Signal(name
="rel_matches_%d" % i
) \
518 for i
in range(TLB_NUM_WAYS
))
519 hit_way_set
= HitWaySet()
521 # Test if pending request is a hit on any way
522 # In order to make timing in virtual mode,
523 # when we are using the TLB, we compare each
524 # way with each of the real addresses from each way of
525 # the TLB, and then decide later which match to use.
527 with m
.If(virt_mode
):
528 for j
in range(TLB_NUM_WAYS
):
529 s_tag
= Signal(TAG_BITS
, name
="s_tag%d" % j
)
531 s_pte
= Signal(TLB_PTE_BITS
)
532 s_ra
= Signal(REAL_ADDR_BITS
)
533 comb
+= s_pte
.eq(read_tlb_pte(j
, tlb_pte_way
))
534 comb
+= s_ra
.eq(Cat(req_addr
[0:TLB_LG_PGSZ
],
535 s_pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]))
536 comb
+= s_tag
.eq(get_tag(s_ra
))
538 for i
in range(NUM_WAYS
):
539 is_tag_hit
= Signal(name
="is_tag_hit_%d_%d" % (j
, i
))
540 comb
+= is_tag_hit
.eq(go
& cache_valid_idx
[i
] &
541 (read_tag(i
, cache_tag_set
) == s_tag
)
543 with m
.If(is_tag_hit
):
544 comb
+= hit_way_set
[j
].eq(i
)
546 comb
+= hit_set
[j
].eq(s_hit
)
547 with m
.If(s_tag
== reload_tag
):
548 comb
+= rel_matches
[j
].eq(1)
550 comb
+= is_hit
.eq(hit_set
[tlb_hit_way
])
551 comb
+= hit_way
.eq(hit_way_set
[tlb_hit_way
])
552 comb
+= rel_match
.eq(rel_matches
[tlb_hit_way
])
554 s_tag
= Signal(TAG_BITS
)
555 comb
+= s_tag
.eq(get_tag(req_addr
))
556 for i
in range(NUM_WAYS
):
557 is_tag_hit
= Signal(name
="is_tag_hit_%d" % i
)
558 comb
+= is_tag_hit
.eq(go
& cache_valid_idx
[i
] &
559 (read_tag(i
, cache_tag_set
) == s_tag
))
560 with m
.If(is_tag_hit
):
561 comb
+= hit_way
.eq(i
)
563 with m
.If(s_tag
== reload_tag
):
564 comb
+= rel_match
.eq(1)
569 class DCache(Elaboratable
):
570 """Set associative dcache write-through
571 TODO (in no specific order):
572 * See list in icache.vhdl
573 * Complete load misses on the cycle when WB data comes instead of
574 at the end of line (this requires dealing with requests coming in
578 self
.d_in
= LoadStore1ToDCacheType("d_in")
579 self
.d_out
= DCacheToLoadStore1Type("d_out")
581 self
.m_in
= MMUToDCacheType("m_in")
582 self
.m_out
= DCacheToMMUType("m_out")
584 self
.stall_out
= Signal()
586 self
.wb_out
= WBMasterOut()
587 self
.wb_in
= WBSlaveOut()
589 self
.log_out
= Signal(20)
591 def stage_0(self
, m
, r0
, r1
, r0_full
):
592 """Latch the request in r0.req as long as we're not stalling
596 d_in
, d_out
, m_in
= self
.d_in
, self
.d_out
, self
.m_in
598 r
= RegStage0("stage0")
600 # TODO, this goes in unit tests and formal proofs
601 with m
.If(d_in
.valid
& m_in
.valid
):
602 sync
+= Display("request collision loadstore vs MMU")
604 with m
.If(m_in
.valid
):
605 sync
+= r
.req
.valid
.eq(1)
606 sync
+= r
.req
.load
.eq(~
(m_in
.tlbie | m_in
.tlbld
))
607 sync
+= r
.req
.dcbz
.eq(0)
608 sync
+= r
.req
.nc
.eq(0)
609 sync
+= r
.req
.reserve
.eq(0)
610 sync
+= r
.req
.virt_mode
.eq(1)
611 sync
+= r
.req
.priv_mode
.eq(1)
612 sync
+= r
.req
.addr
.eq(m_in
.addr
)
613 sync
+= r
.req
.data
.eq(m_in
.pte
)
614 sync
+= r
.req
.byte_sel
.eq(~
0) # Const -1 sets all to 0b111....
615 sync
+= r
.tlbie
.eq(m_in
.tlbie
)
616 sync
+= r
.doall
.eq(m_in
.doall
)
617 sync
+= r
.tlbld
.eq(m_in
.tlbld
)
618 sync
+= r
.mmu_req
.eq(1)
620 sync
+= r
.req
.eq(d_in
)
621 sync
+= r
.tlbie
.eq(0)
622 sync
+= r
.doall
.eq(0)
623 sync
+= r
.tlbld
.eq(0)
624 sync
+= r
.mmu_req
.eq(0)
625 with m
.If(~
(r1
.full
& r0_full
)):
627 sync
+= r0_full
.eq(r
.req
.valid
)
629 def tlb_read(self
, m
, r0_stall
, tlb_valid_way
,
630 tlb_tag_way
, tlb_pte_way
, dtlb_valid_bits
,
631 dtlb_tags
, dtlb_ptes
):
633 Operates in the second cycle on the request latched in r0.req.
634 TLB updates write the entry at the end of the second cycle.
638 m_in
, d_in
= self
.m_in
, self
.d_in
640 index
= Signal(TLB_SET_BITS
)
641 addrbits
= Signal(TLB_SET_BITS
)
644 amax
= TLB_LG_PGSZ
+ TLB_SET_BITS
646 with m
.If(m_in
.valid
):
647 comb
+= addrbits
.eq(m_in
.addr
[amin
: amax
])
649 comb
+= addrbits
.eq(d_in
.addr
[amin
: amax
])
650 comb
+= index
.eq(addrbits
)
652 # If we have any op and the previous op isn't finished,
653 # then keep the same output for next cycle.
654 with m
.If(~r0_stall
):
655 sync
+= tlb_valid_way
.eq(dtlb_valid_bits
[index
])
656 sync
+= tlb_tag_way
.eq(dtlb_tags
[index
])
657 sync
+= tlb_pte_way
.eq(dtlb_ptes
[index
])
659 def maybe_tlb_plrus(self
, m
, r1
, tlb_plru_victim
):
660 """Generate TLB PLRUs
665 if TLB_NUM_WAYS
== 0:
667 for i
in range(TLB_SET_SIZE
):
669 tlb_plru
= PLRU(WAY_BITS
)
670 setattr(m
.submodules
, "maybe_plru_%d" % i
, tlb_plru
)
671 tlb_plru_acc_en
= Signal()
673 comb
+= tlb_plru_acc_en
.eq(r1
.tlb_hit
& (r1
.tlb_hit_index
== i
))
674 comb
+= tlb_plru
.acc_en
.eq(tlb_plru_acc_en
)
675 comb
+= tlb_plru
.acc
.eq(r1
.tlb_hit_way
)
676 comb
+= tlb_plru_victim
[i
].eq(tlb_plru
.lru_o
)
678 def tlb_search(self
, m
, tlb_req_index
, r0
, r0_valid
,
679 tlb_valid_way
, tlb_tag_way
, tlb_hit_way
,
680 tlb_pte_way
, pte
, tlb_hit
, valid_ra
, perm_attr
, ra
):
685 hitway
= Signal(TLB_WAY_BITS
)
687 eatag
= Signal(TLB_EA_TAG_BITS
)
689 TLB_LG_END
= TLB_LG_PGSZ
+ TLB_SET_BITS
690 comb
+= tlb_req_index
.eq(r0
.req
.addr
[TLB_LG_PGSZ
: TLB_LG_END
])
691 comb
+= eatag
.eq(r0
.req
.addr
[TLB_LG_END
: 64 ])
693 for i
in range(TLB_NUM_WAYS
):
694 is_tag_hit
= Signal()
695 comb
+= is_tag_hit
.eq(tlb_valid_way
[i
]
696 & read_tlb_tag(i
, tlb_tag_way
) == eatag
)
697 with m
.If(is_tag_hit
):
701 comb
+= tlb_hit
.eq(hit
& r0_valid
)
702 comb
+= tlb_hit_way
.eq(hitway
)
705 comb
+= pte
.eq(read_tlb_pte(hitway
, tlb_pte_way
))
708 comb
+= valid_ra
.eq(tlb_hit | ~r0
.req
.virt_mode
)
709 with m
.If(r0
.req
.virt_mode
):
710 comb
+= ra
.eq(Cat(Const(0, ROW_OFF_BITS
),
711 r0
.req
.addr
[ROW_OFF_BITS
:TLB_LG_PGSZ
],
712 pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]))
713 comb
+= perm_attr
.eq(extract_perm_attr(pte
))
715 comb
+= ra
.eq(Cat(Const(0, ROW_OFF_BITS
),
716 r0
.req
.addr
[ROW_OFF_BITS
:REAL_ADDR_BITS
]))
718 comb
+= perm_attr
.reference
.eq(1)
719 comb
+= perm_attr
.changed
.eq(1)
720 comb
+= perm_attr
.nocache
.eq(0)
721 comb
+= perm_attr
.priv
.eq(1)
722 comb
+= perm_attr
.rd_perm
.eq(1)
723 comb
+= perm_attr
.wr_perm
.eq(1)
725 def tlb_update(self
, m
, r0_valid
, r0
, dtlb_valid_bits
, tlb_req_index
,
726 tlb_hit_way
, tlb_hit
, tlb_plru_victim
, tlb_tag_way
,
727 dtlb_tags
, tlb_pte_way
, dtlb_ptes
):
735 comb
+= tlbie
.eq(r0_valid
& r0
.tlbie
)
736 comb
+= tlbwe
.eq(r0_valid
& r0
.tlbld
)
738 m
.submodules
.tlb_update
= d
= DTLBUpdate()
739 with m
.If(tlbie
& r0
.doall
):
740 # clear all valid bits at once
741 for i
in range(TLB_SET_SIZE
):
742 sync
+= dtlb_valid_bits
[i
].eq(0)
743 with m
.If(d
.updated
):
744 sync
+= dtlb_tags
[tlb_req_index
].eq(d
.tb_out
)
745 sync
+= dtlb_ptes
[tlb_req_index
].eq(d
.pb_out
)
746 with m
.If(d
.v_updated
):
747 sync
+= dtlb_valid_bits
[tlb_req_index
].eq(d
.db_out
)
749 comb
+= d
.dv
.eq(dtlb_valid_bits
[tlb_req_index
])
751 comb
+= d
.tlbie
.eq(tlbie
)
752 comb
+= d
.tlbwe
.eq(tlbwe
)
753 comb
+= d
.doall
.eq(r0
.doall
)
754 comb
+= d
.tlb_hit
.eq(tlb_hit
)
755 comb
+= d
.tlb_hit_way
.eq(tlb_hit_way
)
756 comb
+= d
.tlb_tag_way
.eq(tlb_tag_way
)
757 comb
+= d
.tlb_pte_way
.eq(tlb_pte_way
)
758 comb
+= d
.tlb_req_index
.eq(tlb_req_index
)
761 comb
+= d
.repl_way
.eq(tlb_hit_way
)
763 comb
+= d
.repl_way
.eq(tlb_plru_victim
[tlb_req_index
])
764 comb
+= d
.eatag
.eq(r0
.req
.addr
[TLB_LG_PGSZ
+ TLB_SET_BITS
:64])
765 comb
+= d
.pte_data
.eq(r0
.req
.data
)
767 def maybe_plrus(self
, m
, r1
, plru_victim
):
773 if TLB_NUM_WAYS
== 0:
776 for i
in range(NUM_LINES
):
778 plru
= PLRU(WAY_BITS
)
779 setattr(m
.submodules
, "plru%d" % i
, plru
)
780 plru_acc_en
= Signal()
782 comb
+= plru_acc_en
.eq(r1
.cache_hit
& (r1
.hit_index
== i
))
783 comb
+= plru
.acc_en
.eq(plru_acc_en
)
784 comb
+= plru
.acc
.eq(r1
.hit_way
)
785 comb
+= plru_victim
[i
].eq(plru
.lru_o
)
787 def cache_tag_read(self
, m
, r0_stall
, req_index
, cache_tag_set
, cache_tags
):
788 """Cache tag RAM read port
792 m_in
, d_in
= self
.m_in
, self
.d_in
794 index
= Signal(INDEX_BITS
)
797 comb
+= index
.eq(req_index
)
798 with m
.Elif(m_in
.valid
):
799 comb
+= index
.eq(get_index(m_in
.addr
))
801 comb
+= index
.eq(get_index(d_in
.addr
))
802 sync
+= cache_tag_set
.eq(cache_tags
[index
])
804 def dcache_request(self
, m
, r0
, ra
, req_index
, req_row
, req_tag
,
805 r0_valid
, r1
, cache_valid_bits
, replace_way
,
806 use_forward1_next
, use_forward2_next
,
807 req_hit_way
, plru_victim
, rc_ok
, perm_attr
,
808 valid_ra
, perm_ok
, access_ok
, req_op
, req_go
,
810 tlb_hit
, tlb_hit_way
, tlb_valid_way
, cache_tag_set
,
811 cancel_store
, req_same_tag
, r0_stall
, early_req_row
):
812 """Cache request parsing and hit detection
817 m_in
, d_in
= self
.m_in
, self
.d_in
820 hit_way
= Signal(WAY_BITS
)
825 hit_set
= Array(Signal(name
="hit_set_%d" % i
) \
826 for i
in range(TLB_NUM_WAYS
))
827 cache_valid_idx
= Signal(INDEX_BITS
)
829 # Extract line, row and tag from request
830 comb
+= req_index
.eq(get_index(r0
.req
.addr
))
831 comb
+= req_row
.eq(get_row(r0
.req
.addr
))
832 comb
+= req_tag
.eq(get_tag(ra
))
834 comb
+= go
.eq(r0_valid
& ~
(r0
.tlbie | r0
.tlbld
) & ~r1
.ls_error
)
835 comb
+= cache_valid_idx
.eq(cache_valid_bits
[req_index
])
837 m
.submodules
.dcache_pend
= dc
= DCachePendingHit(tlb_pte_way
,
838 tlb_valid_way
, tlb_hit_way
,
839 cache_valid_idx
, cache_tag_set
,
843 comb
+= dc
.tlb_hit
.eq(tlb_hit
)
844 comb
+= dc
.reload_tag
.eq(r1
.reload_tag
)
845 comb
+= dc
.virt_mode
.eq(r0
.req
.virt_mode
)
847 comb
+= dc
.req_index
.eq(req_index
)
848 comb
+= is_hit
.eq(dc
.is_hit
)
849 comb
+= hit_way
.eq(dc
.hit_way
)
850 comb
+= req_same_tag
.eq(dc
.rel_match
)
852 # See if the request matches the line currently being reloaded
853 with m
.If((r1
.state
== State
.RELOAD_WAIT_ACK
) &
854 (req_index
== r1
.store_index
) & req_same_tag
):
855 # For a store, consider this a hit even if the row isn't
856 # valid since it will be by the time we perform the store.
857 # For a load, check the appropriate row valid bit.
858 valid
= r1
.rows_valid
[req_row
% ROW_PER_LINE
]
859 comb
+= is_hit
.eq(~r0
.req
.load | valid
)
860 comb
+= hit_way
.eq(replace_way
)
862 # Whether to use forwarded data for a load or not
863 with m
.If((get_row(r1
.req
.real_addr
) == req_row
) &
864 (r1
.req
.hit_way
== hit_way
)):
865 # Only need to consider r1.write_bram here, since if we
866 # are writing refill data here, then we don't have a
867 # cache hit this cycle on the line being refilled.
868 # (There is the possibility that the load following the
869 # load miss that started the refill could be to the old
870 # contents of the victim line, since it is a couple of
871 # cycles after the refill starts before we see the updated
872 # cache tag. In that case we don't use the bypass.)
873 comb
+= use_forward1_next
.eq(r1
.write_bram
)
874 with m
.If((r1
.forward_row1
== req_row
) & (r1
.forward_way1
== hit_way
)):
875 comb
+= use_forward2_next
.eq(r1
.forward_valid1
)
877 # The way that matched on a hit
878 comb
+= req_hit_way
.eq(hit_way
)
880 # The way to replace on a miss
881 with m
.If(r1
.write_tag
):
882 comb
+= replace_way
.eq(plru_victim
[r1
.store_index
])
884 comb
+= replace_way
.eq(r1
.store_way
)
886 # work out whether we have permission for this access
887 # NB we don't yet implement AMR, thus no KUAP
888 comb
+= rc_ok
.eq(perm_attr
.reference
889 & (r0
.req
.load | perm_attr
.changed
)
891 comb
+= perm_ok
.eq((r0
.req
.priv_mode | ~perm_attr
.priv
) &
893 (r0
.req
.load
& perm_attr
.rd_perm
)))
894 comb
+= access_ok
.eq(valid_ra
& perm_ok
& rc_ok
)
895 # Combine the request and cache hit status to decide what
896 # operation needs to be done
897 comb
+= nc
.eq(r0
.req
.nc | perm_attr
.nocache
)
898 comb
+= op
.eq(Op
.OP_NONE
)
900 with m
.If(~access_ok
):
901 comb
+= op
.eq(Op
.OP_BAD
)
902 with m
.Elif(cancel_store
):
903 comb
+= op
.eq(Op
.OP_STCX_FAIL
)
905 comb
+= opsel
.eq(Cat(is_hit
, nc
, r0
.req
.load
))
906 with m
.Switch(opsel
):
907 with m
.Case(0b101): comb
+= op
.eq(Op
.OP_LOAD_HIT
)
908 with m
.Case(0b100): comb
+= op
.eq(Op
.OP_LOAD_MISS
)
909 with m
.Case(0b110): comb
+= op
.eq(Op
.OP_LOAD_NC
)
910 with m
.Case(0b001): comb
+= op
.eq(Op
.OP_STORE_HIT
)
911 with m
.Case(0b000): comb
+= op
.eq(Op
.OP_STORE_MISS
)
912 with m
.Case(0b010): comb
+= op
.eq(Op
.OP_STORE_MISS
)
913 with m
.Case(0b011): comb
+= op
.eq(Op
.OP_BAD
)
914 with m
.Case(0b111): comb
+= op
.eq(Op
.OP_BAD
)
915 comb
+= req_op
.eq(op
)
916 comb
+= req_go
.eq(go
)
918 # Version of the row number that is valid one cycle earlier
919 # in the cases where we need to read the cache data BRAM.
920 # If we're stalling then we need to keep reading the last
922 with m
.If(~r0_stall
):
923 with m
.If(m_in
.valid
):
924 comb
+= early_req_row
.eq(get_row(m_in
.addr
))
926 comb
+= early_req_row
.eq(get_row(d_in
.addr
))
928 comb
+= early_req_row
.eq(req_row
)
930 def reservation_comb(self
, m
, cancel_store
, set_rsrv
, clear_rsrv
,
931 r0_valid
, r0
, reservation
):
932 """Handle load-with-reservation and store-conditional instructions
937 with m
.If(r0_valid
& r0
.req
.reserve
):
939 # XXX generate alignment interrupt if address
940 # is not aligned XXX or if r0.req.nc = '1'
941 with m
.If(r0
.req
.load
):
942 comb
+= set_rsrv
.eq(1) # load with reservation
944 comb
+= clear_rsrv
.eq(1) # store conditional
945 with m
.If(~reservation
.valid | r0
.req
.addr
[LINE_OFF_BITS
:64]):
946 comb
+= cancel_store
.eq(1)
948 def reservation_reg(self
, m
, r0_valid
, access_ok
, set_rsrv
, clear_rsrv
,
954 with m
.If(r0_valid
& access_ok
):
955 with m
.If(clear_rsrv
):
956 sync
+= reservation
.valid
.eq(0)
957 with m
.Elif(set_rsrv
):
958 sync
+= reservation
.valid
.eq(1)
959 sync
+= reservation
.addr
.eq(r0
.req
.addr
[LINE_OFF_BITS
:64])
961 def writeback_control(self
, m
, r1
, cache_out
):
962 """Return data for loads & completion control logic
966 d_out
, m_out
= self
.d_out
, self
.m_out
968 data_out
= Signal(64)
969 data_fwd
= Signal(64)
971 # Use the bypass if are reading the row that was
972 # written 1 or 2 cycles ago, including for the
973 # slow_valid = 1 case (i.e. completing a load
974 # miss or a non-cacheable load).
975 with m
.If(r1
.use_forward1
):
976 comb
+= data_fwd
.eq(r1
.forward_data1
)
978 comb
+= data_fwd
.eq(r1
.forward_data2
)
980 comb
+= data_out
.eq(cache_out
[r1
.hit_way
])
983 with m
.If(r1
.forward_sel
[i
]):
984 dsel
= data_fwd
.word_select(i
, 8)
985 comb
+= data_out
.word_select(i
, 8).eq(dsel
)
987 comb
+= d_out
.valid
.eq(r1
.ls_valid
)
988 comb
+= d_out
.data
.eq(data_out
)
989 comb
+= d_out
.store_done
.eq(~r1
.stcx_fail
)
990 comb
+= d_out
.error
.eq(r1
.ls_error
)
991 comb
+= d_out
.cache_paradox
.eq(r1
.cache_paradox
)
994 comb
+= m_out
.done
.eq(r1
.mmu_done
)
995 comb
+= m_out
.err
.eq(r1
.mmu_error
)
996 comb
+= m_out
.data
.eq(data_out
)
998 # We have a valid load or store hit or we just completed
999 # a slow op such as a load miss, a NC load or a store
1001 # Note: the load hit is delayed by one cycle. However it
1002 # can still not collide with r.slow_valid (well unless I
1003 # miscalculated) because slow_valid can only be set on a
1004 # subsequent request and not on its first cycle (the state
1005 # machine must have advanced), which makes slow_valid
1006 # at least 2 cycles from the previous hit_load_valid.
1008 # Sanity: Only one of these must be set in any given cycle
1010 if False: # TODO: need Display to get this to work
1011 assert (r1
.slow_valid
& r1
.stcx_fail
) != 1, \
1012 "unexpected slow_valid collision with stcx_fail"
1014 assert ((r1
.slow_valid | r1
.stcx_fail
) | r1
.hit_load_valid
) != 1, \
1015 "unexpected hit_load_delayed collision with slow_valid"
1017 with m
.If(~r1
.mmu_req
):
1018 # Request came from loadstore1...
1019 # Load hit case is the standard path
1020 with m
.If(r1
.hit_load_valid
):
1021 sync
+= Display("completing load hit data=%x", data_out
)
1023 # error cases complete without stalling
1024 with m
.If(r1
.ls_error
):
1025 sync
+= Display("completing ld/st with error")
1027 # Slow ops (load miss, NC, stores)
1028 with m
.If(r1
.slow_valid
):
1029 sync
+= Display("completing store or load miss data=%x",
1033 # Request came from MMU
1034 with m
.If(r1
.hit_load_valid
):
1035 sync
+= Display("completing load hit to MMU, data=%x",
1037 # error cases complete without stalling
1038 with m
.If(r1
.mmu_error
):
1039 sync
+= Display("combpleting MMU ld with error")
1041 # Slow ops (i.e. load miss)
1042 with m
.If(r1
.slow_valid
):
1043 sync
+= Display("completing MMU load miss, data=%x",
1046 def rams(self
, m
, r1
, early_req_row
, cache_out
, replace_way
):
1048 Generate a cache RAM for each way. This handles the normal
1049 reads, writes from reloads and the special store-hit update
1052 Note: the BRAMs have an extra read buffer, meaning the output
1053 is pipelined an extra cycle. This differs from the
1054 icache. The writeback logic needs to take that into
1055 account by using 1-cycle delayed signals for load hits.
1060 for i
in range(NUM_WAYS
):
1061 do_read
= Signal(name
="do_rd%d" % i
)
1062 rd_addr
= Signal(ROW_BITS
)
1063 do_write
= Signal(name
="do_wr%d" % i
)
1064 wr_addr
= Signal(ROW_BITS
)
1065 wr_data
= Signal(WB_DATA_BITS
)
1066 wr_sel
= Signal(ROW_SIZE
)
1067 wr_sel_m
= Signal(ROW_SIZE
)
1068 _d_out
= Signal(WB_DATA_BITS
, name
="dout_%d" % i
)
1070 way
= CacheRam(ROW_BITS
, WB_DATA_BITS
, True)
1071 setattr(m
.submodules
, "cacheram_%d" % i
, way
)
1073 comb
+= way
.rd_en
.eq(do_read
)
1074 comb
+= way
.rd_addr
.eq(rd_addr
)
1075 comb
+= _d_out
.eq(way
.rd_data_o
)
1076 comb
+= way
.wr_sel
.eq(wr_sel_m
)
1077 comb
+= way
.wr_addr
.eq(wr_addr
)
1078 comb
+= way
.wr_data
.eq(wr_data
)
1081 comb
+= do_read
.eq(1)
1082 comb
+= rd_addr
.eq(early_req_row
[:ROW_BITS
])
1083 comb
+= cache_out
[i
].eq(_d_out
)
1087 # Defaults to wishbone read responses (cache refill)
1089 # For timing, the mux on wr_data/sel/addr is not
1090 # dependent on anything other than the current state.
1092 with m
.If(r1
.write_bram
):
1093 # Write store data to BRAM. This happens one
1094 # cycle after the store is in r0.
1095 comb
+= wr_data
.eq(r1
.req
.data
)
1096 comb
+= wr_sel
.eq(r1
.req
.byte_sel
)
1097 comb
+= wr_addr
.eq(get_row(r1
.req
.real_addr
))
1099 with m
.If(i
== r1
.req
.hit_way
):
1100 comb
+= do_write
.eq(1)
1102 # Otherwise, we might be doing a reload or a DCBZ
1104 comb
+= wr_data
.eq(0)
1106 comb
+= wr_data
.eq(wb_in
.dat
)
1107 comb
+= wr_addr
.eq(r1
.store_row
)
1108 comb
+= wr_sel
.eq(~
0) # all 1s
1110 with m
.If((r1
.state
== State
.RELOAD_WAIT_ACK
)
1111 & wb_in
.ack
& (replace_way
== i
)):
1112 comb
+= do_write
.eq(1)
1114 # Mask write selects with do_write since BRAM
1115 # doesn't have a global write-enable
1116 with m
.If(do_write
):
1117 comb
+= wr_sel_m
.eq(wr_sel
)
1119 # Cache hit synchronous machine for the easy case.
1120 # This handles load hits.
1121 # It also handles error cases (TLB miss, cache paradox)
1122 def dcache_fast_hit(self
, m
, req_op
, r0_valid
, r0
, r1
,
1123 req_hit_way
, req_index
, req_tag
, access_ok
,
1124 tlb_hit
, tlb_hit_way
, tlb_req_index
):
1129 with m
.If(req_op
!= Op
.OP_NONE
):
1130 sync
+= Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1131 req_op
, r0
.req
.addr
, r0
.req
.nc
,
1132 req_index
, req_tag
, req_hit_way
)
1134 with m
.If(r0_valid
):
1135 sync
+= r1
.mmu_req
.eq(r0
.mmu_req
)
1137 # Fast path for load/store hits.
1138 # Set signals for the writeback controls.
1139 sync
+= r1
.hit_way
.eq(req_hit_way
)
1140 sync
+= r1
.hit_index
.eq(req_index
)
1142 with m
.If(req_op
== Op
.OP_LOAD_HIT
):
1143 sync
+= r1
.hit_load_valid
.eq(1)
1145 sync
+= r1
.hit_load_valid
.eq(0)
1147 with m
.If((req_op
== Op
.OP_LOAD_HIT
) |
(req_op
== Op
.OP_STORE_HIT
)):
1148 sync
+= r1
.cache_hit
.eq(1)
1150 sync
+= r1
.cache_hit
.eq(0)
1152 with m
.If(req_op
== Op
.OP_BAD
):
1153 # Display(f"Signalling ld/st error valid_ra={valid_ra}"
1154 # f"rc_ok={rc_ok} perm_ok={perm_ok}"
1155 sync
+= r1
.ls_error
.eq(~r0
.mmu_req
)
1156 sync
+= r1
.mmu_error
.eq(r0
.mmu_req
)
1157 sync
+= r1
.cache_paradox
.eq(access_ok
)
1160 sync
+= r1
.ls_error
.eq(0)
1161 sync
+= r1
.mmu_error
.eq(0)
1162 sync
+= r1
.cache_paradox
.eq(0)
1164 with m
.If(req_op
== Op
.OP_STCX_FAIL
):
1167 sync
+= r1
.stcx_fail
.eq(0)
1169 # Record TLB hit information for updating TLB PLRU
1170 sync
+= r1
.tlb_hit
.eq(tlb_hit
)
1171 sync
+= r1
.tlb_hit_way
.eq(tlb_hit_way
)
1172 sync
+= r1
.tlb_hit_index
.eq(tlb_req_index
)
1174 # Memory accesses are handled by this state machine:
1176 # * Cache load miss/reload (in conjunction with "rams")
1177 # * Load hits for non-cachable forms
1178 # * Stores (the collision case is handled in "rams")
1180 # All wishbone requests generation is done here.
1181 # This machine operates at stage 1.
1182 def dcache_slow(self
, m
, r1
, use_forward1_next
, use_forward2_next
,
1183 cache_valid_bits
, r0
, replace_way
,
1184 req_hit_way
, req_same_tag
,
1185 r0_valid
, req_op
, cache_tags
, req_go
, ra
):
1191 req
= MemAccessRequest("mreq_ds")
1193 adjust_acks
= Signal(3)
1195 req_row
= Signal(ROW_BITS
)
1196 req_idx
= Signal(INDEX_BITS
)
1197 req_tag
= Signal(TAG_BITS
)
1198 comb
+= req_idx
.eq(get_index(req
.real_addr
))
1199 comb
+= req_row
.eq(get_row(req
.real_addr
))
1200 comb
+= req_tag
.eq(get_tag(req
.real_addr
))
1202 sync
+= r1
.use_forward1
.eq(use_forward1_next
)
1203 sync
+= r1
.forward_sel
.eq(0)
1205 with m
.If(use_forward1_next
):
1206 sync
+= r1
.forward_sel
.eq(r1
.req
.byte_sel
)
1207 with m
.Elif(use_forward2_next
):
1208 sync
+= r1
.forward_sel
.eq(r1
.forward_sel1
)
1210 sync
+= r1
.forward_data2
.eq(r1
.forward_data1
)
1211 with m
.If(r1
.write_bram
):
1212 sync
+= r1
.forward_data1
.eq(r1
.req
.data
)
1213 sync
+= r1
.forward_sel1
.eq(r1
.req
.byte_sel
)
1214 sync
+= r1
.forward_way1
.eq(r1
.req
.hit_way
)
1215 sync
+= r1
.forward_row1
.eq(get_row(r1
.req
.real_addr
))
1216 sync
+= r1
.forward_valid1
.eq(1)
1219 sync
+= r1
.forward_data1
.eq(0)
1221 sync
+= r1
.forward_data1
.eq(wb_in
.dat
)
1222 sync
+= r1
.forward_sel1
.eq(~
0) # all 1s
1223 sync
+= r1
.forward_way1
.eq(replace_way
)
1224 sync
+= r1
.forward_row1
.eq(r1
.store_row
)
1225 sync
+= r1
.forward_valid1
.eq(0)
1227 # One cycle pulses reset
1228 sync
+= r1
.slow_valid
.eq(0)
1229 sync
+= r1
.write_bram
.eq(0)
1230 sync
+= r1
.inc_acks
.eq(0)
1231 sync
+= r1
.dec_acks
.eq(0)
1233 sync
+= r1
.ls_valid
.eq(0)
1234 # complete tlbies and TLB loads in the third cycle
1235 sync
+= r1
.mmu_done
.eq(r0_valid
& (r0
.tlbie | r0
.tlbld
))
1237 with m
.If((req_op
== Op
.OP_LOAD_HIT
)
1238 |
(req_op
== Op
.OP_STCX_FAIL
)):
1239 with m
.If(~r0
.mmu_req
):
1240 sync
+= r1
.ls_valid
.eq(1)
1242 sync
+= r1
.mmu_done
.eq(1)
1244 with m
.If(r1
.write_tag
):
1245 # Store new tag in selected way
1246 for i
in range(NUM_WAYS
):
1247 with m
.If(i
== replace_way
):
1248 ct
= Signal(TAG_RAM_WIDTH
)
1249 comb
+= ct
.eq(cache_tags
[r1
.store_index
])
1250 comb
+= ct
.word_select(i
, TAG_WIDTH
).eq(r1
.reload_tag
)
1251 sync
+= cache_tags
[r1
.store_index
].eq(ct
)
1252 sync
+= r1
.store_way
.eq(replace_way
)
1253 sync
+= r1
.write_tag
.eq(0)
1255 # Take request from r1.req if there is one there,
1256 # else from req_op, ra, etc.
1258 comb
+= req
.eq(r1
.req
)
1260 comb
+= req
.op
.eq(req_op
)
1261 comb
+= req
.valid
.eq(req_go
)
1262 comb
+= req
.mmu_req
.eq(r0
.mmu_req
)
1263 comb
+= req
.dcbz
.eq(r0
.req
.dcbz
)
1264 comb
+= req
.real_addr
.eq(ra
)
1266 with m
.If(~r0
.req
.dcbz
):
1267 comb
+= req
.data
.eq(r0
.req
.data
)
1269 comb
+= req
.data
.eq(0)
1271 # Select all bytes for dcbz
1272 # and for cacheable loads
1273 with m
.If(r0
.req
.dcbz |
(r0
.req
.load
& ~r0
.req
.nc
)):
1274 comb
+= req
.byte_sel
.eq(~
0) # all 1s
1276 comb
+= req
.byte_sel
.eq(r0
.req
.byte_sel
)
1277 comb
+= req
.hit_way
.eq(req_hit_way
)
1278 comb
+= req
.same_tag
.eq(req_same_tag
)
1280 # Store the incoming request from r0,
1281 # if it is a slow request
1282 # Note that r1.full = 1 implies req_op = OP_NONE
1283 with m
.If((req_op
== Op
.OP_LOAD_MISS
)
1284 |
(req_op
== Op
.OP_LOAD_NC
)
1285 |
(req_op
== Op
.OP_STORE_MISS
)
1286 |
(req_op
== Op
.OP_STORE_HIT
)):
1287 sync
+= r1
.req
.eq(req
)
1288 sync
+= r1
.full
.eq(1)
1290 # Main state machine
1291 with m
.Switch(r1
.state
):
1293 with m
.Case(State
.IDLE
):
1294 sync
+= r1
.wb
.adr
.eq(req
.real_addr
)
1295 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
1296 sync
+= r1
.wb
.dat
.eq(req
.data
)
1297 sync
+= r1
.dcbz
.eq(req
.dcbz
)
1299 # Keep track of our index and way
1300 # for subsequent stores.
1301 sync
+= r1
.store_index
.eq(req_idx
)
1302 sync
+= r1
.store_row
.eq(req_row
)
1303 sync
+= r1
.end_row_ix
.eq(get_row_of_line(req_row
))
1304 sync
+= r1
.reload_tag
.eq(req_tag
)
1305 sync
+= r1
.req
.same_tag
.eq(1)
1307 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1308 sync
+= r1
.store_way
.eq(req
.hit_way
)
1310 # Reset per-row valid bits,
1311 # ready for handling OP_LOAD_MISS
1312 for i
in range(ROW_PER_LINE
):
1313 sync
+= r1
.rows_valid
[i
].eq(0)
1315 with m
.If(req_op
!= Op
.OP_NONE
):
1316 sync
+= Display("cache op %d", req
.op
)
1318 with m
.Switch(req
.op
):
1319 with m
.Case(Op
.OP_LOAD_HIT
):
1320 # stay in IDLE state
1323 with m
.Case(Op
.OP_LOAD_MISS
):
1324 sync
+= Display("cache miss real addr: %x " \
1326 req
.real_addr
, req_row
, req_tag
)
1328 # Start the wishbone cycle
1329 sync
+= r1
.wb
.we
.eq(0)
1330 sync
+= r1
.wb
.cyc
.eq(1)
1331 sync
+= r1
.wb
.stb
.eq(1)
1333 # Track that we had one request sent
1334 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
1335 sync
+= r1
.write_tag
.eq(1)
1337 with m
.Case(Op
.OP_LOAD_NC
):
1338 sync
+= r1
.wb
.cyc
.eq(1)
1339 sync
+= r1
.wb
.stb
.eq(1)
1340 sync
+= r1
.wb
.we
.eq(0)
1341 sync
+= r1
.state
.eq(State
.NC_LOAD_WAIT_ACK
)
1343 with m
.Case(Op
.OP_STORE_HIT
, Op
.OP_STORE_MISS
):
1344 with m
.If(~req
.dcbz
):
1345 sync
+= r1
.state
.eq(State
.STORE_WAIT_ACK
)
1346 sync
+= r1
.acks_pending
.eq(1)
1347 sync
+= r1
.full
.eq(0)
1348 sync
+= r1
.slow_valid
.eq(1)
1350 with m
.If(~req
.mmu_req
):
1351 sync
+= r1
.ls_valid
.eq(1)
1353 sync
+= r1
.mmu_done
.eq(1)
1355 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1356 sync
+= r1
.write_bram
.eq(1)
1358 # dcbz is handled much like a load miss except
1359 # that we are writing to memory instead of reading
1360 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
1362 with m
.If(req
.op
== Op
.OP_STORE_MISS
):
1363 sync
+= r1
.write_tag
.eq(1)
1365 sync
+= r1
.wb
.we
.eq(1)
1366 sync
+= r1
.wb
.cyc
.eq(1)
1367 sync
+= r1
.wb
.stb
.eq(1)
1369 # OP_NONE and OP_BAD do nothing
1370 # OP_BAD & OP_STCX_FAIL were
1371 # handled above already
1372 with m
.Case(Op
.OP_NONE
):
1374 with m
.Case(Op
.OP_BAD
):
1376 with m
.Case(Op
.OP_STCX_FAIL
):
1379 with m
.Case(State
.RELOAD_WAIT_ACK
):
1380 ld_stbs_done
= Signal()
1381 # Requests are all sent if stb is 0
1382 comb
+= ld_stbs_done
.eq(~r1
.wb
.stb
)
1384 with m
.If((~wb_in
.stall
) & r1
.wb
.stb
):
1385 # That was the last word?
1386 # We are done sending.
1387 # Clear stb and set ld_stbs_done
1388 # so we can handle an eventual
1389 # last ack on the same cycle.
1390 with m
.If(is_last_row_addr(r1
.wb
.adr
, r1
.end_row_ix
)):
1391 sync
+= r1
.wb
.stb
.eq(0)
1392 comb
+= ld_stbs_done
.eq(1)
1394 # Calculate the next row address in the current cache line
1395 rarange
= Signal(LINE_OFF_BITS
-ROW_OFF_BITS
)
1396 comb
+= rarange
.eq(r1
.wb
.adr
[ROW_OFF_BITS
:LINE_OFF_BITS
]+1)
1397 sync
+= r1
.wb
.adr
[ROW_OFF_BITS
:LINE_OFF_BITS
].eq(rarange
)
1399 # Incoming acks processing
1400 sync
+= r1
.forward_valid1
.eq(wb_in
.ack
)
1401 with m
.If(wb_in
.ack
):
1402 sync
+= r1
.rows_valid
[r1
.store_row
% ROW_PER_LINE
].eq(1)
1404 # If this is the data we were looking for,
1405 # we can complete the request next cycle.
1406 # Compare the whole address in case the
1407 # request in r1.req is not the one that
1408 # started this refill.
1409 with m
.If(r1
.full
& r1
.req
.same_tag
&
1410 ((r1
.dcbz
& r1
.req
.dcbz
) |
1411 (~r1
.dcbz
& (r1
.req
.op
== Op
.OP_LOAD_MISS
))) &
1412 (r1
.store_row
== get_row(r1
.req
.real_addr
))):
1413 sync
+= r1
.full
.eq(0)
1414 sync
+= r1
.slow_valid
.eq(1)
1415 with m
.If(~r1
.mmu_req
):
1416 sync
+= r1
.ls_valid
.eq(1)
1418 sync
+= r1
.mmu_done
.eq(1)
1419 sync
+= r1
.forward_sel
.eq(~
0) # all 1s
1420 sync
+= r1
.use_forward1
.eq(1)
1422 # Check for completion
1423 with m
.If(ld_stbs_done
& is_last_row(r1
.store_row
,
1425 # Complete wishbone cycle
1426 sync
+= r1
.wb
.cyc
.eq(0)
1428 # Cache line is now valid
1429 cv
= Signal(INDEX_BITS
)
1430 comb
+= cv
.eq(cache_valid_bits
[r1
.store_index
])
1431 comb
+= cv
.bit_select(r1
.store_way
, 1).eq(1)
1432 sync
+= cache_valid_bits
[r1
.store_index
].eq(cv
)
1433 sync
+= r1
.state
.eq(State
.IDLE
)
1435 # Increment store row counter
1436 sync
+= r1
.store_row
.eq(next_row(r1
.store_row
))
1438 with m
.Case(State
.STORE_WAIT_ACK
):
1439 st_stbs_done
= Signal()
1440 comb
+= st_stbs_done
.eq(~r1
.wb
.stb
)
1441 comb
+= acks
.eq(r1
.acks_pending
)
1443 with m
.If(r1
.inc_acks
!= r1
.dec_acks
):
1444 with m
.If(r1
.inc_acks
):
1445 comb
+= adjust_acks
.eq(acks
+ 1)
1447 comb
+= adjust_acks
.eq(acks
- 1)
1449 comb
+= adjust_acks
.eq(acks
)
1451 sync
+= r1
.acks_pending
.eq(adjust_acks
)
1453 # Clear stb when slave accepted request
1454 with m
.If(~wb_in
.stall
):
1455 # See if there is another store waiting
1456 # to be done which is in the same real page.
1457 with m
.If(req
.valid
):
1458 ra
= req
.real_addr
[0:SET_SIZE_BITS
]
1459 sync
+= r1
.wb
.adr
[0:SET_SIZE_BITS
].eq(ra
)
1460 sync
+= r1
.wb
.dat
.eq(req
.data
)
1461 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
1463 with m
.Elif((adjust_acks
< 7) & req
.same_tag
&
1464 ((req
.op
== Op
.OP_STORE_MISS
)
1465 |
(req
.op
== Op
.OP_STORE_HIT
))):
1466 sync
+= r1
.wb
.stb
.eq(1)
1467 comb
+= st_stbs_done
.eq(0)
1469 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1470 sync
+= r1
.write_bram
.eq(1)
1471 sync
+= r1
.full
.eq(0)
1472 sync
+= r1
.slow_valid
.eq(1)
1474 # Store requests never come from the MMU
1475 sync
+= r1
.ls_valid
.eq(1)
1476 comb
+= st_stbs_done
.eq(0)
1477 sync
+= r1
.inc_acks
.eq(1)
1479 sync
+= r1
.wb
.stb
.eq(0)
1480 comb
+= st_stbs_done
.eq(1)
1482 # Got ack ? See if complete.
1483 with m
.If(wb_in
.ack
):
1484 with m
.If(st_stbs_done
& (adjust_acks
== 1)):
1485 sync
+= r1
.state
.eq(State
.IDLE
)
1486 sync
+= r1
.wb
.cyc
.eq(0)
1487 sync
+= r1
.wb
.stb
.eq(0)
1488 sync
+= r1
.dec_acks
.eq(1)
1490 with m
.Case(State
.NC_LOAD_WAIT_ACK
):
1491 # Clear stb when slave accepted request
1492 with m
.If(~wb_in
.stall
):
1493 sync
+= r1
.wb
.stb
.eq(0)
1495 # Got ack ? complete.
1496 with m
.If(wb_in
.ack
):
1497 sync
+= r1
.state
.eq(State
.IDLE
)
1498 sync
+= r1
.full
.eq(0)
1499 sync
+= r1
.slow_valid
.eq(1)
1501 with m
.If(~r1
.mmu_req
):
1502 sync
+= r1
.ls_valid
.eq(1)
1504 sync
+= r1
.mmu_done
.eq(1)
1506 sync
+= r1
.forward_sel
.eq(~
0) # all 1s
1507 sync
+= r1
.use_forward1
.eq(1)
1508 sync
+= r1
.wb
.cyc
.eq(0)
1509 sync
+= r1
.wb
.stb
.eq(0)
1511 def dcache_log(self
, m
, r1
, valid_ra
, tlb_hit_way
, stall_out
):
1514 d_out
, wb_in
, log_out
= self
.d_out
, self
.wb_in
, self
.log_out
1516 sync
+= log_out
.eq(Cat(r1
.state
[:3], valid_ra
, tlb_hit_way
[:3],
1517 stall_out
, req_op
[:3], d_out
.valid
, d_out
.error
,
1518 r1
.wb
.cyc
, r1
.wb
.stb
, wb_in
.ack
, wb_in
.stall
,
1521 def elaborate(self
, platform
):
1526 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1527 cache_tags
= CacheTagArray()
1528 cache_tag_set
= Signal(TAG_RAM_WIDTH
)
1529 cache_valid_bits
= CacheValidBitsArray()
1531 # TODO attribute ram_style : string;
1532 # TODO attribute ram_style of cache_tags : signal is "distributed";
1534 """note: these are passed to nmigen.hdl.Memory as "attributes".
1535 don't know how, just that they are.
1537 dtlb_valid_bits
= TLBValidBitsArray()
1538 dtlb_tags
= TLBTagsArray()
1539 dtlb_ptes
= TLBPtesArray()
1540 # TODO attribute ram_style of
1541 # dtlb_tags : signal is "distributed";
1542 # TODO attribute ram_style of
1543 # dtlb_ptes : signal is "distributed";
1545 r0
= RegStage0("r0")
1548 r1
= RegStage1("r1")
1550 reservation
= Reservation()
1552 # Async signals on incoming request
1553 req_index
= Signal(INDEX_BITS
)
1554 req_row
= Signal(ROW_BITS
)
1555 req_hit_way
= Signal(WAY_BITS
)
1556 req_tag
= Signal(TAG_BITS
)
1558 req_data
= Signal(64)
1559 req_same_tag
= Signal()
1562 early_req_row
= Signal(ROW_BITS
)
1564 cancel_store
= Signal()
1566 clear_rsrv
= Signal()
1571 use_forward1_next
= Signal()
1572 use_forward2_next
= Signal()
1574 cache_out
= CacheRamOut()
1576 plru_victim
= PLRUOut()
1577 replace_way
= Signal(WAY_BITS
)
1579 # Wishbone read/write/cache write formatting signals
1583 tlb_tag_way
= Signal(TLB_TAG_WAY_BITS
)
1584 tlb_pte_way
= Signal(TLB_PTE_WAY_BITS
)
1585 tlb_valid_way
= Signal(TLB_NUM_WAYS
)
1586 tlb_req_index
= Signal(TLB_SET_BITS
)
1588 tlb_hit_way
= Signal(TLB_WAY_BITS
)
1589 pte
= Signal(TLB_PTE_BITS
)
1590 ra
= Signal(REAL_ADDR_BITS
)
1592 perm_attr
= PermAttr("dc_perms")
1595 access_ok
= Signal()
1597 tlb_plru_victim
= TLBPLRUOut()
1599 # we don't yet handle collisions between loadstore1 requests
1601 comb
+= self
.m_out
.stall
.eq(0)
1603 # Hold off the request in r0 when r1 has an uncompleted request
1604 comb
+= r0_stall
.eq(r0_full
& r1
.full
)
1605 comb
+= r0_valid
.eq(r0_full
& ~r1
.full
)
1606 comb
+= self
.stall_out
.eq(r0_stall
)
1608 # Wire up wishbone request latch out of stage 1
1609 comb
+= self
.wb_out
.eq(r1
.wb
)
1611 # call sub-functions putting everything together, using shared
1612 # signals established above
1613 self
.stage_0(m
, r0
, r1
, r0_full
)
1614 self
.tlb_read(m
, r0_stall
, tlb_valid_way
,
1615 tlb_tag_way
, tlb_pte_way
, dtlb_valid_bits
,
1616 dtlb_tags
, dtlb_ptes
)
1617 self
.tlb_search(m
, tlb_req_index
, r0
, r0_valid
,
1618 tlb_valid_way
, tlb_tag_way
, tlb_hit_way
,
1619 tlb_pte_way
, pte
, tlb_hit
, valid_ra
, perm_attr
, ra
)
1620 self
.tlb_update(m
, r0_valid
, r0
, dtlb_valid_bits
, tlb_req_index
,
1621 tlb_hit_way
, tlb_hit
, tlb_plru_victim
, tlb_tag_way
,
1622 dtlb_tags
, tlb_pte_way
, dtlb_ptes
)
1623 self
.maybe_plrus(m
, r1
, plru_victim
)
1624 self
.maybe_tlb_plrus(m
, r1
, tlb_plru_victim
)
1625 self
.cache_tag_read(m
, r0_stall
, req_index
, cache_tag_set
, cache_tags
)
1626 self
.dcache_request(m
, r0
, ra
, req_index
, req_row
, req_tag
,
1627 r0_valid
, r1
, cache_valid_bits
, replace_way
,
1628 use_forward1_next
, use_forward2_next
,
1629 req_hit_way
, plru_victim
, rc_ok
, perm_attr
,
1630 valid_ra
, perm_ok
, access_ok
, req_op
, req_go
,
1632 tlb_hit
, tlb_hit_way
, tlb_valid_way
, cache_tag_set
,
1633 cancel_store
, req_same_tag
, r0_stall
, early_req_row
)
1634 self
.reservation_comb(m
, cancel_store
, set_rsrv
, clear_rsrv
,
1635 r0_valid
, r0
, reservation
)
1636 self
.reservation_reg(m
, r0_valid
, access_ok
, set_rsrv
, clear_rsrv
,
1638 self
.writeback_control(m
, r1
, cache_out
)
1639 self
.rams(m
, r1
, early_req_row
, cache_out
, replace_way
)
1640 self
.dcache_fast_hit(m
, req_op
, r0_valid
, r0
, r1
,
1641 req_hit_way
, req_index
, req_tag
, access_ok
,
1642 tlb_hit
, tlb_hit_way
, tlb_req_index
)
1643 self
.dcache_slow(m
, r1
, use_forward1_next
, use_forward2_next
,
1644 cache_valid_bits
, r0
, replace_way
,
1645 req_hit_way
, req_same_tag
,
1646 r0_valid
, req_op
, cache_tags
, req_go
, ra
)
1647 #self.dcache_log(m, r1, valid_ra, tlb_hit_way, stall_out)
1651 def dcache_load(dut
, addr
, nc
=0):
1652 yield dut
.d_in
.load
.eq(1)
1653 yield dut
.d_in
.nc
.eq(nc
)
1654 yield dut
.d_in
.addr
.eq(addr
)
1655 yield dut
.d_in
.byte_sel
.eq(~
0)
1656 yield dut
.d_in
.valid
.eq(1)
1658 yield dut
.d_in
.valid
.eq(0)
1659 yield dut
.d_in
.byte_sel
.eq(0)
1661 while not (yield dut
.d_out
.valid
):
1663 data
= yield dut
.d_out
.data
1667 def dcache_store(dut
, addr
, data
, nc
=0):
1668 yield dut
.d_in
.load
.eq(0)
1669 yield dut
.d_in
.nc
.eq(nc
)
1670 yield dut
.d_in
.data
.eq(data
)
1671 yield dut
.d_in
.byte_sel
.eq(~
0)
1672 yield dut
.d_in
.addr
.eq(addr
)
1673 yield dut
.d_in
.valid
.eq(1)
1675 yield dut
.d_in
.valid
.eq(0)
1676 yield dut
.d_in
.byte_sel
.eq(0)
1678 while not (yield dut
.d_out
.valid
):
1682 def dcache_random_sim(dut
):
1684 # start with stack of zeros
1688 yield dut
.d_in
.valid
.eq(0)
1689 yield dut
.d_in
.load
.eq(0)
1690 yield dut
.d_in
.priv_mode
.eq(1)
1691 yield dut
.d_in
.nc
.eq(0)
1692 yield dut
.d_in
.addr
.eq(0)
1693 yield dut
.d_in
.data
.eq(0)
1694 yield dut
.m_in
.valid
.eq(0)
1695 yield dut
.m_in
.addr
.eq(0)
1696 yield dut
.m_in
.pte
.eq(0)
1697 # wait 4 * clk_period
1705 for i
in range(256):
1706 addr
= randint(0, 255)
1707 data
= randint(0, (1<<64)-1)
1708 sim_mem
[addr
] = data
1711 print ("testing %x data %x" % (addr
, data
))
1713 yield from dcache_load(dut
, addr
)
1714 yield from dcache_store(dut
, addr
, data
)
1716 addr
= randint(0, 255)
1717 sim_data
= sim_mem
[addr
]
1720 data
= yield from dcache_load(dut
, addr
)
1721 assert data
== sim_data
, \
1722 "check %x data %x != %x" % (addr
, data
, sim_data
)
1724 for addr
in range(8):
1725 data
= yield from dcache_load(dut
, addr
*8)
1726 assert data
== sim_mem
[addr
], \
1727 "final check %x data %x != %x" % (addr
*8, data
, sim_mem
[addr
])
1729 def dcache_sim(dut
):
1731 yield dut
.d_in
.valid
.eq(0)
1732 yield dut
.d_in
.load
.eq(0)
1733 yield dut
.d_in
.priv_mode
.eq(1)
1734 yield dut
.d_in
.nc
.eq(0)
1735 yield dut
.d_in
.addr
.eq(0)
1736 yield dut
.d_in
.data
.eq(0)
1737 yield dut
.m_in
.valid
.eq(0)
1738 yield dut
.m_in
.addr
.eq(0)
1739 yield dut
.m_in
.pte
.eq(0)
1740 # wait 4 * clk_period
1746 # Cacheable read of address 4
1747 data
= yield from dcache_load(dut
, 0x4)
1748 addr
= yield dut
.d_in
.addr
1749 assert data
== 0x0000000100000000, \
1750 f
"data @%x=%x expected 0x0000000100000000" % (addr
, data
)
1752 # Cacheable read of address 20
1753 data
= yield from dcache_load(dut
, 0x20)
1754 addr
= yield dut
.d_in
.addr
1755 assert data
== 0x0000000100000000, \
1756 f
"data @%x=%x expected 0x0000000100000000" % (addr
, data
)
1758 # Cacheable read of address 30
1759 data
= yield from dcache_load(dut
, 0x530)
1760 addr
= yield dut
.d_in
.addr
1761 assert data
== 0x0000014D0000014C, \
1762 f
"data @%x=%x expected 0000014D0000014C" % (addr
, data
)
1764 # 2nd Cacheable read of address 30
1765 data
= yield from dcache_load(dut
, 0x530)
1766 addr
= yield dut
.d_in
.addr
1767 assert data
== 0x0000014D0000014C, \
1768 f
"data @%x=%x expected 0000014D0000014C" % (addr
, data
)
1770 # Non-cacheable read of address 100
1771 data
= yield from dcache_load(dut
, 0x100, nc
=1)
1772 addr
= yield dut
.d_in
.addr
1773 assert data
== 0x0000004100000040, \
1774 f
"data @%x=%x expected 0000004100000040" % (addr
, data
)
1776 # Store at address 530
1777 yield from dcache_store(dut
, 0x530, 0x121)
1779 # Store at address 30
1780 yield from dcache_store(dut
, 0x530, 0x12345678)
1782 # 3nd Cacheable read of address 530
1783 data
= yield from dcache_load(dut
, 0x530)
1784 addr
= yield dut
.d_in
.addr
1785 assert data
== 0x12345678, \
1786 f
"data @%x=%x expected 0x12345678" % (addr
, data
)
1788 # 4th Cacheable read of address 30
1789 data
= yield from dcache_load(dut
, 0x20)
1790 addr
= yield dut
.d_in
.addr
1791 assert data
== 0x12345678, \
1792 f
"data @%x=%x expected 0x12345678" % (addr
, data
)
1800 def test_dcache(mem
, test_fn
, test_name
):
1803 memory
= Memory(width
=64, depth
=16*64, init
=mem
)
1804 sram
= SRAM(memory
=memory
, granularity
=8)
1807 m
.submodules
.dcache
= dut
1808 m
.submodules
.sram
= sram
1810 m
.d
.comb
+= sram
.bus
.cyc
.eq(dut
.wb_out
.cyc
)
1811 m
.d
.comb
+= sram
.bus
.stb
.eq(dut
.wb_out
.stb
)
1812 m
.d
.comb
+= sram
.bus
.we
.eq(dut
.wb_out
.we
)
1813 m
.d
.comb
+= sram
.bus
.sel
.eq(dut
.wb_out
.sel
)
1814 m
.d
.comb
+= sram
.bus
.adr
.eq(dut
.wb_out
.adr
[3:])
1815 m
.d
.comb
+= sram
.bus
.dat_w
.eq(dut
.wb_out
.dat
)
1817 m
.d
.comb
+= dut
.wb_in
.ack
.eq(sram
.bus
.ack
)
1818 m
.d
.comb
+= dut
.wb_in
.dat
.eq(sram
.bus
.dat_r
)
1824 sim
.add_sync_process(wrap(test_fn(dut
)))
1825 with sim
.write_vcd('test_dcache%s.vcd' % test_name
):
1828 if __name__
== '__main__':
1830 vl
= rtlil
.convert(dut
, ports
=[])
1831 with
open("test_dcache.il", "w") as f
:
1835 for i
in range(0,512):
1836 mem
.append((i
*2)|
((i
*2+1)<<32))
1838 test_dcache(mem
, dcache_sim
, "")
1839 #test_dcache(None, dcache_random_sim, "random")