3 based on Anton Blanchard microwatt dcache.vhdl
7 from enum
import Enum
, unique
9 from nmigen
import Module
, Signal
, Elaboratable
,
11 from nmigen
.cli
import main
12 from nmigen
.iocontrol
import RecordObject
13 from nmigen
.util
import log2_int
15 from experiment
.mem_types
import LoadStore1ToDcacheType
,
16 DcacheToLoadStore1Type
,
20 from experiment
.wb_types
import WB_ADDR_BITS
, WB_DATA_BITS
, WB_SEL_BITS
,
21 WBAddrType
, WBDataType
, WBSelType
,
22 WbMasterOut
, WBSlaveOut
,
23 WBMasterOutVector
, WBSlaveOutVector
,
24 WBIOMasterOut
, WBIOSlaveOut
27 # Record for storing permission, attribute, etc. bits from a PTE
28 class PermAttr(RecordObject
):
31 self
.reference
= Signal()
32 self
.changed
= Signal()
33 self
.nocache
= Signal()
35 self
.rd_perm
= Signal()
36 self
.wr_perm
= Signal()
39 def extract_perm_attr(pte
):
50 # Type of operation on a "valid" input
54 OP_BAD
= 1 # NC cache hit, TLB miss, prot/RC failure
55 OP_STCX_FAIL
= 2 # conditional store w/o reservation
56 OP_LOAD_HIT
= 3 # Cache hit on load
57 OP_LOAD_MISS
= 4 # Load missing cache
58 OP_LOAD_NC
= 5 # Non-cachable load
59 OP_STORE_HIT
= 6 # Store hitting cache
60 OP_STORE_MISS
= 7 # Store missing cache
66 IDLE
= 0 # Normal load hit processing
67 RELOAD_WAIT_ACK
= 1 # Cache reload wait ack
68 STORE_WAIT_ACK
= 2 # Store wait ack
69 NC_LOAD_WAIT_ACK
= 3 # Non-cachable load wait ack
74 # In order to make timing, we use the BRAMs with
75 # an output buffer, which means that the BRAM
76 # output is delayed by an extra cycle.
78 # Thus, the dcache has a 2-stage internal pipeline
79 # for cache hits with no stalls.
81 # All other operations are handled via stalling
84 # The second stage can thus complete a hit at the same
85 # time as the first stage emits a stall for a complex op.
87 # Stage 0 register, basically contains just the latched request
88 class RegStage0(RecordObject
):
91 self
.req
= LoadStore1ToDcacheType()
95 self
.mmu_req
= Signal() # indicates source of request
98 class MemAccessRequest(RecordObject
):
102 self
.valid
= Signal()
104 self
.real_addr
= Signal(REAL_ADDR_BITS
)
105 self
.data
= Signal(64)
106 self
.byte_sel
= Signal(8)
107 self
.hit_way
= Signal(WAY_BITS
)
108 self
.same_tag
= Signal()
109 self
.mmu_req
= Signal()
112 # Set associative dcache write-through
114 # TODO (in no specific order):
116 # * See list in icache.vhdl
117 # * Complete load misses on the cycle when WB data comes instead of
118 # at the end of line (this requires dealing with requests coming in
120 class Dcache(Elaboratable
):
122 # TODO: make these parameters of Dcache at some point
123 self
.LINE_SIZE
= 64 # Line size in bytes
124 self
.NUM_LINES
= 32 # Number of lines in a set
125 self
.NUM_WAYS
= 4 # Number of ways
126 self
.TLB_SET_SIZE
= 64 # L1 DTLB entries per set
127 self
.TLB_NUM_WAYS
= 2 # L1 DTLB number of sets
128 self
.TLB_LG_PGSZ
= 12 # L1 DTLB log_2(page_size)
129 self
.LOG_LENGTH
= 0 # Non-zero to enable log data collection
131 self
.d_in
= LoadStore1ToDcacheType()
132 self
.d_out
= DcacheToLoadStore1Type()
134 self
.m_in
= MmuToDcacheType()
135 self
.m_out
= DcacheToMmuType()
137 self
.stall_out
= Signal()
139 self
.wb_out
= WBMasterOut()
140 self
.wb_in
= WBSlaveOut()
142 self
.log_out
= Signal(20)
144 def elaborate(self
, platform
):
145 LINE_SIZE
= self
.LINE_SIZE
146 NUM_LINES
= self
.NUM_LINES
147 NUM_WAYS
= self
.NUM_WAYS
148 TLB_SET_SIZE
= self
.TLB_SET_SIZE
149 TLB_NUM_WAYS
= self
.TLB_NUM_WAYS
150 TLB_LG_PGSZ
= self
.TLB_LG_PGSZ
151 LOG_LENGTH
= self
.LOG_LENGTH
153 # BRAM organisation: We never access more than
154 # -- wishbone_data_bits at a time so to save
155 # -- resources we make the array only that wide, and
156 # -- use consecutive indices for to make a cache "line"
158 # -- ROW_SIZE is the width in bytes of the BRAM
159 # -- (based on WB, so 64-bits)
160 ROW_SIZE
= WB_DATA_BITS
/ 8;
162 # ROW_PER_LINE is the number of row (wishbone
163 # transactions) in a line
164 ROW_PER_LINE
= LINE_SIZE
// ROW_SIZE
166 # BRAM_ROWS is the number of rows in BRAM needed
167 # to represent the full dcache
168 BRAM_ROWS
= NUM_LINES
* ROW_PER_LINE
171 # Bit fields counts in the address
173 # REAL_ADDR_BITS is the number of real address
177 # ROW_BITS is the number of bits to select a row
178 ROW_BITS
= log2_int(BRAM_ROWS
)
180 # ROW_LINE_BITS is the number of bits to select
181 # a row within a line
182 ROW_LINE_BITS
= log2_int(ROW_PER_LINE
)
184 # LINE_OFF_BITS is the number of bits for
185 # the offset in a cache line
186 LINE_OFF_BITS
= log2_int(LINE_SIZE
)
188 # ROW_OFF_BITS is the number of bits for
189 # the offset in a row
190 ROW_OFF_BITS
= log2_int(ROW_SIZE
)
192 # INDEX_BITS is the number if bits to
193 # select a cache line
194 INDEX_BITS
= log2_int(NUM_LINES
)
196 # SET_SIZE_BITS is the log base 2 of the set size
197 SET_SIZE_BITS
= LINE_OFF_BITS
+ INDEX_BITS
199 # TAG_BITS is the number of bits of
200 # the tag part of the address
201 TAG_BITS
= REAL_ADDR_BITS
- SET_SIZE_BITS
203 # TAG_WIDTH is the width in bits of each way of the tag RAM
204 TAG_WIDTH
= TAG_BITS
+ 7 - ((TAG_BITS
+ 7) % 8)
206 # WAY_BITS is the number of bits to select a way
207 WAY_BITS
= log2_int(NUM_WAYS
)
209 # Example of layout for 32 lines of 64 bytes:
211 # .. tag |index| line |
213 # .. | |---| | ROW_LINE_BITS (3)
214 # .. | |--- - --| LINE_OFF_BITS (6)
215 # .. | |- --| ROW_OFF_BITS (3)
216 # .. |----- ---| | ROW_BITS (8)
217 # .. |-----| | INDEX_BITS (5)
218 # .. --------| | TAG_BITS (45)
221 # subtype row_t is integer range 0 to BRAM_ROWS-1;
222 # subtype index_t is integer range 0 to NUM_LINES-1;
223 """wherever way_t is used to make a Signal it must be substituted with
224 log2_int(NUM_WAYS) i.e. WAY_BITS. this because whilst the *range*
225 of the number is 0..NUM_WAYS it requires log2_int(NUM_WAYS) i.e.
226 WAY_BITS of space to store it
228 # subtype way_t is integer range 0 to NUM_WAYS-1;
229 # subtype row_in_line_t is unsigned(ROW_LINE_BITS-1 downto 0);
230 ROW
= BRAM_ROWS
# yyyeah not really necessary, delete
231 INDEX
= NUM_LINES
# yyyeah not really necessary, delete
232 WAY
= NUM_WAYS
# yyyeah not really necessary, delete
233 ROW_IN_LINE
= ROW_LINE_BITS
# yyyeah not really necessary, delete
235 # -- The cache data BRAM organized as described above for each way
236 # subtype cache_row_t is
237 # std_ulogic_vector(wishbone_data_bits-1 downto 0);
238 # The cache data BRAM organized as described above for each way
239 CACHE_ROW
= WB_DATA_BITS
241 # -- The cache tags LUTRAM has a row per set.
242 # -- Vivado is a pain and will not handle a
243 # -- clean (commented) definition of the cache
244 # -- tags as a 3d memory. For now, work around
245 # -- it by putting all the tags
246 # subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
247 # The cache tags LUTRAM has a row per set.
248 # Vivado is a pain and will not handle a
249 # clean (commented) definition of the cache
250 # tags as a 3d memory. For now, work around
251 # it by putting all the tags
254 # -- type cache_tags_set_t is array(way_t) of cache_tag_t;
255 # -- type cache_tags_array_t is array(index_t) of cache_tags_set_t;
256 # constant TAG_RAM_WIDTH : natural := TAG_WIDTH * NUM_WAYS;
257 # subtype cache_tags_set_t is
258 # std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
259 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
260 # type cache_tags_set_t is array(way_t) of cache_tag_t;
261 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
262 TAG_RAM_WIDTH
= TAG_WIDTH
* NUM_WAYS
264 CACHE_TAG_SET
= TAG_RAM_WIDTH
267 return Array(CacheTagSet() for x
in range(INDEX
))
269 # -- The cache valid bits
270 # subtype cache_way_valids_t is
271 # std_ulogic_vector(NUM_WAYS-1 downto 0);
272 # type cache_valids_t is array(index_t) of cache_way_valids_t;
273 # type row_per_line_valid_t is
274 # array(0 to ROW_PER_LINE - 1) of std_ulogic;
275 # The cache valid bits
276 CACHE_WAY_VALID_BITS
= NUM_WAYS
278 def CacheValidBitsArray():
279 return Array(CacheWayValidBits() for x
in range(INDEX
))
281 def RowPerLineValidArray():
282 return Array(Signal() for x
in range(ROW_PER_LINE
))
284 # -- Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
285 # signal cache_tags : cache_tags_array_t;
286 # signal cache_tag_set : cache_tags_set_t;
287 # signal cache_valids : cache_valids_t;
289 # attribute ram_style : string;
290 # attribute ram_style of cache_tags : signal is "distributed";
291 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
292 cache_tags
= CacheTagArray()
293 cache_tag_set
= Signal(CACHE_TAG_SET
)
294 cache_valid_bits
= CacheValidBitsArray()
296 # TODO attribute ram_style : string;
297 # TODO attribute ram_style of cache_tags : signal is "distributed";
300 # constant TLB_SET_BITS : natural := log2(TLB_SET_SIZE);
301 # constant TLB_WAY_BITS : natural := log2(TLB_NUM_WAYS);
302 # constant TLB_EA_TAG_BITS : natural :=
303 # 64 - (TLB_LG_PGSZ + TLB_SET_BITS);
304 # constant TLB_TAG_WAY_BITS : natural :=
305 # TLB_NUM_WAYS * TLB_EA_TAG_BITS;
306 # constant TLB_PTE_BITS : natural := 64;
307 # constant TLB_PTE_WAY_BITS : natural :=
308 # TLB_NUM_WAYS * TLB_PTE_BITS;
310 TLB_SET_BITS
= log2_int(TLB_SET_SIZE
)
311 TLB_WAY_BITS
= log2_int(TLB_NUM_WAYS
)
312 TLB_EA_TAG_BITS
= 64 - (TLB_LG_PGSZ
+ TLB_SET_BITS
)
313 TLB_TAG_WAY_BITS
= TLB_NUM_WAYS
* TLB_EA_TAG_BITS
315 TLB_PTE_WAY_BITS
= TLB_NUM_WAYS
* TLB_PTE_BITS
;
317 # subtype tlb_way_t is integer range 0 to TLB_NUM_WAYS - 1;
318 # subtype tlb_index_t is integer range 0 to TLB_SET_SIZE - 1;
319 # subtype tlb_way_valids_t is
320 # std_ulogic_vector(TLB_NUM_WAYS-1 downto 0);
321 # type tlb_valids_t is
322 # array(tlb_index_t) of tlb_way_valids_t;
323 # subtype tlb_tag_t is
324 # std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
325 # subtype tlb_way_tags_t is
326 # std_ulogic_vector(TLB_TAG_WAY_BITS-1 downto 0);
328 # array(tlb_index_t) of tlb_way_tags_t;
329 # subtype tlb_pte_t is
330 # std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
331 # subtype tlb_way_ptes_t is
332 # std_ulogic_vector(TLB_PTE_WAY_BITS-1 downto 0);
333 # type tlb_ptes_t is array(tlb_index_t) of tlb_way_ptes_t;
334 # type hit_way_set_t is array(tlb_way_t) of way_t;
335 TLB_WAY
= TLB_NUM_WAYS
337 TLB_INDEX
= TLB_SET_SIZE
339 TLB_WAY_VALID_BITS
= TLB_NUM_WAYS
341 def TLBValidBitsArray():
343 Signal(TLB_WAY_VALID_BITS
) for x
in range(TLB_SET_SIZE
)
346 TLB_TAG
= TLB_EA_TAG_BITS
348 TLB_WAY_TAGS
= TLB_TAG_WAY_BITS
352 Signal(TLB_WAY_TAGS
) for x
in range (TLB_SET_SIZE
)
355 TLB_PTE
= TLB_PTE_BITS
357 TLB_WAY_PTES
= TLB_PTE_WAY_BITS
361 Signal(TLB_WAY_PTES
) for x
in range(TLB_SET_SIZE
)
365 return Array(Signal(NUM_WAYS
) for x
in range(TLB_NUM_WAYS
))
367 # signal dtlb_valids : tlb_valids_t;
368 # signal dtlb_tags : tlb_tags_t;
369 # signal dtlb_ptes : tlb_ptes_t;
371 """note: these are passed to nmigen.hdl.Memory as "attributes". don't
372 know how, just that they are.
374 # attribute ram_style of dtlb_tags : signal is "distributed";
375 # attribute ram_style of dtlb_ptes : signal is "distributed";
376 dtlb_valids
= TLBValidBitsArray()
377 dtlb_tags
= TLBTagsArray()
378 dtlb_ptes
= TLBPtesArray()
379 # TODO attribute ram_style of dtlb_tags : signal is "distributed";
380 # TODO attribute ram_style of dtlb_ptes : signal is "distributed";
382 # signal r0 : reg_stage_0_t;
383 # signal r0_full : std_ulogic;
388 # -- First stage register, contains state for stage 1 of load hits
389 # -- and for the state machine used by all other operations
390 # type reg_stage_1_t is record
391 # -- Info about the request
392 # full : std_ulogic; -- have uncompleted request
393 # mmu_req : std_ulogic; -- request is from MMU
394 # req : mem_access_request_t;
398 # hit_load_valid : std_ulogic;
399 # hit_index : index_t;
400 # cache_hit : std_ulogic;
403 # tlb_hit : std_ulogic;
404 # tlb_hit_way : tlb_way_t;
405 # tlb_hit_index : tlb_index_t;
407 # -- 2-stage data buffer for data forwarded from writes to reads
408 # forward_data1 : std_ulogic_vector(63 downto 0);
409 # forward_data2 : std_ulogic_vector(63 downto 0);
410 # forward_sel1 : std_ulogic_vector(7 downto 0);
411 # forward_valid1 : std_ulogic;
412 # forward_way1 : way_t;
413 # forward_row1 : row_t;
414 # use_forward1 : std_ulogic;
415 # forward_sel : std_ulogic_vector(7 downto 0);
417 # -- Cache miss state (reload state machine)
420 # write_bram : std_ulogic;
421 # write_tag : std_ulogic;
422 # slow_valid : std_ulogic;
423 # wb : wishbone_master_out;
424 # reload_tag : cache_tag_t;
427 # store_index : index_t;
428 # end_row_ix : row_in_line_t;
429 # rows_valid : row_per_line_valid_t;
430 # acks_pending : unsigned(2 downto 0);
431 # inc_acks : std_ulogic;
432 # dec_acks : std_ulogic;
434 # -- Signals to complete (possibly with error)
435 # ls_valid : std_ulogic;
436 # ls_error : std_ulogic;
437 # mmu_done : std_ulogic;
438 # mmu_error : std_ulogic;
439 # cache_paradox : std_ulogic;
441 # -- Signal to complete a failed stcx.
442 # stcx_fail : std_ulogic;
444 # First stage register, contains state for stage 1 of load hits
445 # and for the state machine used by all other operations
446 class RegStage1(RecordObject
):
449 # Info about the request
450 self
.full
= Signal() # have uncompleted request
451 self
.mmu_req
= Signal() # request is from MMU
452 self
.req
= MemAccessRequest()
455 self
.hit_way
= Signal(WAY_BITS
)
456 self
.hit_load_valid
= Signal()
457 self
.hit_index
= Signal(INDEX
)
458 self
.cache_hit
= Signal()
461 self
.tlb_hit
= Signal()
462 self
.tlb_hit_way
= Signal(TLB_WAY
)
463 self
.tlb_hit_index
= Signal(TLB_SET_SIZE
)
465 # 2-stage data buffer for data forwarded from writes to reads
466 self
.forward_data1
= Signal(64)
467 self
.forward_data2
= Signal(64)
468 self
.forward_sel1
= Signal(8)
469 self
.forward_valid1
= Signal()
470 self
.forward_way1
= Signal(WAY_BITS
)
471 self
.forward_row1
= Signal(ROW
)
472 self
.use_forward1
= Signal()
473 self
.forward_sel
= Signal(8)
475 # Cache miss state (reload state machine)
478 self
.write_bram
= Signal()
479 self
.write_tag
= Signal()
480 self
.slow_valid
= Signal()
481 self
.wb
= WishboneMasterOut()
482 self
.reload_tag
= Signal(CACHE_TAG
)
483 self
.store_way
= Signal(WAY_BITS
)
484 self
.store_row
= Signal(ROW
)
485 self
.store_index
= Signal(INDEX
)
486 self
.end_row_ix
= Signal(ROW_IN_LINE
)
487 self
.rows_valid
= RowPerLineValidArray()
488 self
.acks_pending
= Signal(3)
489 self
.inc_acks
= Signal()
490 self
.dec_acks
= Signal()
492 # Signals to complete (possibly with error)
493 self
.ls_valid
= Signal()
494 self
.ls_error
= Signal()
495 self
.mmu_done
= Signal()
496 self
.mmu_error
= Signal()
497 self
.cache_paradox
= Signal()
499 # Signal to complete a failed stcx.
500 self
.stcx_fail
= Signal()
502 # signal r1 : reg_stage_1_t;
505 # -- Reservation information
507 # type reservation_t is record
508 # valid : std_ulogic;
509 # addr : std_ulogic_vector(63 downto LINE_OFF_BITS);
511 # Reservation information
513 class Reservation(RecordObject
):
517 # TODO LINE_OFF_BITS is 6
518 addr
= Signal(63 downto LINE_OFF_BITS
)
520 # signal reservation : reservation_t;
521 reservation
= Reservation()
523 # -- Async signals on incoming request
524 # signal req_index : index_t;
525 # signal req_row : row_t;
526 # signal req_hit_way : way_t;
527 # signal req_tag : cache_tag_t;
528 # signal req_op : op_t;
529 # signal req_data : std_ulogic_vector(63 downto 0);
530 # signal req_same_tag : std_ulogic;
531 # signal req_go : std_ulogic;
532 # Async signals on incoming request
533 req_index
= Signal(INDEX
)
534 req_row
= Signal(ROW
)
535 req_hit_way
= Signal(WAY_BITS
)
536 req_tag
= Signal(CACHE_TAG
)
538 req_data
= Signal(64)
539 req_same_tag
= Signal()
542 # signal early_req_row : row_t;
544 # signal cancel_store : std_ulogic;
545 # signal set_rsrv : std_ulogic;
546 # signal clear_rsrv : std_ulogic;
548 # signal r0_valid : std_ulogic;
549 # signal r0_stall : std_ulogic;
551 # signal use_forward1_next : std_ulogic;
552 # signal use_forward2_next : std_ulogic;
553 early_req_row
= Signal(ROW
)
555 cancel_store
= Signal()
557 clear_rsrv
= Signal()
562 use_forward1_next
= Signal()
563 use_forward2_next
= Signal()
565 # -- Cache RAM interface
566 # type cache_ram_out_t is array(way_t) of cache_row_t;
567 # signal cache_out : cache_ram_out_t;
568 # Cache RAM interface
570 return Array(Signal(CACHE_ROW
) for x
in range(NUM_WAYS
))
572 cache_out
= CacheRamOut()
574 # -- PLRU output interface
575 # type plru_out_t is array(index_t) of
576 # std_ulogic_vector(WAY_BITS-1 downto 0);
577 # signal plru_victim : plru_out_t;
578 # signal replace_way : way_t;
579 # PLRU output interface
581 return Array(Signal(WAY_BITS
) for x
in range(Index()))
583 plru_victim
= PLRUOut()
584 replace_way
= Signal(WAY_BITS
)
586 # -- Wishbone read/write/cache write formatting signals
587 # signal bus_sel : std_ulogic_vector(7 downto 0);
588 # Wishbone read/write/cache write formatting signals
592 # signal tlb_tag_way : tlb_way_tags_t;
593 # signal tlb_pte_way : tlb_way_ptes_t;
594 # signal tlb_valid_way : tlb_way_valids_t;
595 # signal tlb_req_index : tlb_index_t;
596 # signal tlb_hit : std_ulogic;
597 # signal tlb_hit_way : tlb_way_t;
598 # signal pte : tlb_pte_t;
599 # signal ra : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
600 # signal valid_ra : std_ulogic;
601 # signal perm_attr : perm_attr_t;
602 # signal rc_ok : std_ulogic;
603 # signal perm_ok : std_ulogic;
604 # signal access_ok : std_ulogic;
606 tlb_tag_way
= Signal(TLB_WAY_TAGS
)
607 tlb_pte_way
= Signal(TLB_WAY_PTES
)
608 tlb_valid_way
= Signal(TLB_WAY_VALID_BITS
)
609 tlb_req_index
= Signal(TLB_SET_SIZE
)
611 tlb_hit_way
= Signal(TLB_WAY
)
612 pte
= Signal(TLB_PTE
)
613 ra
= Signal(REAL_ADDR_BITS
)
615 perm_attr
= PermAttr()
620 # -- TLB PLRU output interface
621 # type tlb_plru_out_t is array(tlb_index_t) of
622 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
623 # signal tlb_plru_victim : tlb_plru_out_t;
624 # TLB PLRU output interface
626 return Array(Signal(TLB_WAY_BITS
) for x
in range(TLB_SET_SIZE
))
628 tlb_plru_victim
= TLBPLRUOut()
630 # -- Helper functions to decode incoming requests
632 # -- Return the cache line index (tag index) for an address
633 # function get_index(addr: std_ulogic_vector) return index_t is
636 # unsigned(addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS))
639 # Helper functions to decode incoming requests
641 # Return the cache line index (tag index) for an address
643 return addr
[LINE_OFF_BITS
:SET_SIZE_BITS
]
645 # -- Return the cache row index (data memory) for an address
646 # function get_row(addr: std_ulogic_vector) return row_t is
649 # unsigned(addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS))
652 # Return the cache row index (data memory) for an address
654 return addr
[ROW_OFF_BITS
:SET_SIZE_BITS
]
656 # -- Return the index of a row within a line
657 # function get_row_of_line(row: row_t) return row_in_line_t is
658 # variable row_v : unsigned(ROW_BITS-1 downto 0);
660 # row_v := to_unsigned(row, ROW_BITS);
661 # return row_v(ROW_LINEBITS-1 downto 0);
663 # Return the index of a row within a line
664 def get_row_of_line(row
):
665 row_v
= Signal(ROW_BITS
)
667 return row_v
[0:ROW_LINE_BITS
]
669 # -- Returns whether this is the last row of a line
670 # function is_last_row_addr(addr: wishbone_addr_type;
671 # last: row_in_line_t) return boolean is
674 # unsigned(addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS)) = last;
676 # Returns whether this is the last row of a line
677 def is_last_row_addr(addr
, last
):
678 return addr
[ROW_OFF_BITS
:LINE_OFF_BITS
] == last
680 # -- Returns whether this is the last row of a line
681 # function is_last_row(row: row_t; last: row_in_line_t)
684 # return get_row_of_line(row) = last;
686 # Returns whether this is the last row of a line
687 def is_last_row(row
, last
):
688 return get_row_of_line(row
) == last
690 # -- Return the address of the next row in the current cache line
691 # function next_row_addr(addr: wishbone_addr_type)
692 # return std_ulogic_vector is
693 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
694 # variable result : wishbone_addr_type;
696 # -- Is there no simpler way in VHDL to
697 # -- generate that 3 bits adder ?
698 # row_idx := addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS);
699 # row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
701 # result(LINE_OFF_BITS-1 downto ROW_OFF_BITS) := row_idx;
704 # Return the address of the next row in the current cache line
705 def next_row_addr(addr
):
706 row_idx
= Signal(ROW_LINE_BITS
)
707 result
= WBAddrType()
708 # Is there no simpler way in VHDL to
709 # generate that 3 bits adder ?
710 row_idx
= addr
[ROW_OFF_BITS
:LINE_OFF_BITS
]
711 row_idx
= Signal(row_idx
+ 1)
713 result
[ROW_OFF_BITS
:LINE_OFF_BITS
] = row_idx
716 # -- Return the next row in the current cache line. We use a
717 # -- dedicated function in order to limit the size of the
718 # -- generated adder to be only the bits within a cache line
719 # -- (3 bits with default settings)
720 # function next_row(row: row_t) return row_t is
721 # variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
722 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
723 # variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
725 # row_v := std_ulogic_vector(to_unsigned(row, ROW_BITS));
726 # row_idx := row_v(ROW_LINEBITS-1 downto 0);
727 # row_v(ROW_LINEBITS-1 downto 0) :=
728 # std_ulogic_vector(unsigned(row_idx) + 1);
729 # return to_integer(unsigned(row_v));
731 # Return the next row in the current cache line. We use a
732 # dedicated function in order to limit the size of the
733 # generated adder to be only the bits within a cache line
734 # (3 bits with default settings)
736 row_v
= Signal(ROW_BITS
)
737 row_idx
= Signal(ROW_LINE_BITS
)
738 result
= Signal(ROW_BITS
)
741 row_idx
= row_v
[ROW_LINE_BITS
]
742 row_v
[0:ROW_LINE_BITS
] = Signal(row_idx
+ 1)
745 # -- Get the tag value from the address
746 # function get_tag(addr: std_ulogic_vector) return cache_tag_t is
748 # return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
750 # Get the tag value from the address
752 return addr
[SET_SIZE_BITS
:REAL_ADDR_BITS
]
754 # -- Read a tag from a tag memory row
755 # function read_tag(way: way_t; tagset: cache_tags_set_t)
756 # return cache_tag_t is
758 # return tagset(way * TAG_WIDTH + TAG_BITS
759 # - 1 downto way * TAG_WIDTH);
761 # Read a tag from a tag memory row
762 def read_tag(way
, tagset
):
763 return tagset
[way
*TAG_WIDTH
:way
* TAG_WIDTH
+ TAG_BITS
]
765 # -- Read a TLB tag from a TLB tag memory row
766 # function read_tlb_tag(way: tlb_way_t; tags: tlb_way_tags_t)
767 # return tlb_tag_t is
768 # variable j : integer;
770 # j := way * TLB_EA_TAG_BITS;
771 # return tags(j + TLB_EA_TAG_BITS - 1 downto j);
773 # Read a TLB tag from a TLB tag memory row
774 def read_tlb_tag(way
, tags
):
777 j
= way
* TLB_EA_TAG_BITS
778 return tags
[j
:j
+ TLB_EA_TAG_BITS
]
780 # -- Write a TLB tag to a TLB tag memory row
781 # procedure write_tlb_tag(way: tlb_way_t; tags: inout tlb_way_tags_t;
783 # variable j : integer;
785 # j := way * TLB_EA_TAG_BITS;
786 # tags(j + TLB_EA_TAG_BITS - 1 downto j) := tag;
788 # Write a TLB tag to a TLB tag memory row
789 def write_tlb_tag(way
, tags
), tag
):
792 j
= way
* TLB_EA_TAG_BITS
793 tags
[j
:j
+ TLB_EA_TAG_BITS
] = tag
795 # -- Read a PTE from a TLB PTE memory row
796 # function read_tlb_pte(way: tlb_way_t; ptes: tlb_way_ptes_t)
797 # return tlb_pte_t is
798 # variable j : integer;
800 # j := way * TLB_PTE_BITS;
801 # return ptes(j + TLB_PTE_BITS - 1 downto j);
803 # Read a PTE from a TLB PTE memory row
804 def read_tlb_pte(way
, ptes
):
807 j
= way
* TLB_PTE_BITS
808 return ptes
[j
:j
+ TLB_PTE_BITS
]
810 # procedure write_tlb_pte(way: tlb_way_t;
811 # ptes: inout tlb_way_ptes_t; newpte: tlb_pte_t) is
812 # variable j : integer;
814 # j := way * TLB_PTE_BITS;
815 # ptes(j + TLB_PTE_BITS - 1 downto j) := newpte;
817 def write_tlb_pte(way
, ptes
,newpte
):
820 j
= way
* TLB_PTE_BITS
821 return ptes
[j
:j
+ TLB_PTE_BITS
] = newpte
825 """these, because they are constants, can actually be done *as*
827 assert LINE_SIZE % ROWSIZE == 0, "line size not ...."
829 # assert LINE_SIZE mod ROW_SIZE = 0
830 # report "LINE_SIZE not multiple of ROW_SIZE" severity FAILURE;
831 # assert ispow2(LINE_SIZE)
832 # report "LINE_SIZE not power of 2" severity FAILURE;
833 # assert ispow2(NUM_LINES)
834 # report "NUM_LINES not power of 2" severity FAILURE;
835 # assert ispow2(ROW_PER_LINE)
836 # report "ROW_PER_LINE not power of 2" severity FAILURE;
837 # assert (ROW_BITS = INDEX_BITS + ROW_LINEBITS)
838 # report "geometry bits don't add up" severity FAILURE;
839 # assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
840 # report "geometry bits don't add up" severity FAILURE;
841 # assert (REAL_ADDR_BITS = TAG_BITS + INDEX_BITS + LINE_OFF_BITS)
842 # report "geometry bits don't add up" severity FAILURE;
843 # assert (REAL_ADDR_BITS = TAG_BITS + ROW_BITS + ROW_OFF_BITS)
844 # report "geometry bits don't add up" severity FAILURE;
845 # assert (64 = wishbone_data_bits)
846 # report "Can't yet handle a wishbone width that isn't 64-bits"
848 # assert SET_SIZE_BITS <= TLB_LG_PGSZ
849 # report "Set indexed by virtual address" severity FAILURE;
850 assert (LINE_SIZE
% ROW_SIZE
) == 0 "LINE_SIZE not " \
851 "multiple of ROW_SIZE"
853 assert (LINE_SIZE
% 2) == 0 "LINE_SIZE not power of 2"
855 assert (NUM_LINES
% 2) == 0 "NUM_LINES not power of 2"
857 assert (ROW_PER_LINE
% 2) == 0 "ROW_PER_LINE not" \
860 assert ROW_BITS
== (INDEX_BITS
+ ROW_LINE_BITS
) \
861 "geometry bits don't add up"
863 assert (LINE_OFF_BITS
= ROW_OFF_BITS
+ ROW_LINEBITS
) \
864 "geometry bits don't add up"
866 assert REAL_ADDR_BITS
== (TAG_BITS
+ INDEX_BITS \
867 + LINE_OFF_BITS
) "geometry bits don't add up"
869 assert REAL_ADDR_BITS
== (TAG_BITS
+ ROW_BITS
+ ROW_OFF_BITS
) \
870 "geometry bits don't add up"
872 assert 64 == wishbone_data_bits
"Can't yet handle a" \
873 "wishbone width that isn't 64-bits"
875 assert SET_SIZE_BITS
<= TLB_LG_PGSZ
"Set indexed by" \
878 # -- Latch the request in r0.req as long as we're not stalling
879 # stage_0 : process(clk)
880 # Latch the request in r0.req as long as we're not stalling
881 class Stage0(Elaboratable
):
885 def elaborate(self
, platform
):
891 # variable r : reg_stage_0_t;
896 # if rising_edge(clk) then
897 # assert (d_in.valid and m_in.valid) = '0'
898 # report "request collision loadstore vs MMU";
899 assert ~
(d_in
.valid
& m_in
.valid
) "request collision
902 # if m_in.valid = '1' then
903 with m
.If(m_in
.valid
):
904 # r.req.valid := '1';
905 # r.req.load := not (m_in.tlbie or m_in.tlbld);
908 # r.req.reserve := '0';
909 # r.req.virt_mode := '0';
910 # r.req.priv_mode := '1';
911 # r.req.addr := m_in.addr;
912 # r.req.data := m_in.pte;
913 # r.req.byte_sel := (others => '1');
914 # r.tlbie := m_in.tlbie;
915 # r.doall := m_in.doall;
916 # r.tlbld := m_in.tlbld;
918 sync
+= r
.req
.valid
.eq(1)
919 sync
+= r
.req
.load
.eq(~
(m_in
.tlbie | m_in
.tlbld
))
920 sync
+= r
.req
.priv_mode
.eq(1)
921 sync
+= r
.req
.addr
.eq(m_in
.addr
)
922 sync
+= r
.req
.data
.eq(m_in
.pte
)
923 sync
+= r
.req
.byte_sel
.eq(1)
924 sync
+= r
.tlbie
.eq(m_in
.tlbie
)
925 sync
+= r
.doall
.eq(m_in
.doall
)
926 sync
+= r
.tlbld
.eq(m_in
.tlbld
)
927 sync
+= r
.mmu_req
.eq(1)
935 sync
+= r
.req
.eq(d_in
)
939 # elsif r1.full = '0' or r0_full = '0' then
940 with m
.If(~r1
.full | ~r0_full
):
942 # r0_full <= r.req.valid;
944 sync
+= r0_full
.eq(r
.req
.valid
)
949 # -- we don't yet handle collisions between loadstore1 requests
950 # -- and MMU requests
951 # m_out.stall <= '0';
952 # we don't yet handle collisions between loadstore1 requests
954 comb
+= m_out
.stall
.eq(0)
956 # -- Hold off the request in r0 when r1 has an uncompleted request
957 # r0_stall <= r0_full and r1.full;
958 # r0_valid <= r0_full and not r1.full;
959 # stall_out <= r0_stall;
960 # Hold off the request in r0 when r1 has an uncompleted request
961 comb
+= r0_stall
.eq(r0_full
& r1
.full
)
962 comb
+= r0_valid
.eq(r0_full
& ~r1
.full
)
963 comb
+= stall_out
.eq(r0_stall
)
966 # -- Operates in the second cycle on the request latched in r0.req.
967 # -- TLB updates write the entry at the end of the second cycle.
968 # tlb_read : process(clk)
970 # Operates in the second cycle on the request latched in r0.req.
971 # TLB updates write the entry at the end of the second cycle.
972 class TLBRead(Elaboratable
):
976 def elaborate(self
, platform
):
982 # variable index : tlb_index_t;
983 # variable addrbits :
984 # std_ulogic_vector(TLB_SET_BITS - 1 downto 0);
986 addrbits
= Signal(TLB_SET_BITS
)
992 # if rising_edge(clk) then
993 # if m_in.valid = '1' then
994 with m
.If(m_in
.valid
):
995 # addrbits := m_in.addr(TLB_LG_PGSZ + TLB_SET_BITS
996 # - 1 downto TLB_LG_PGSZ);
997 sync
+= addrbits
.eq(m_in
.addr
[
998 TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_SET_BITS
1002 # addrbits := d_in.addr(TLB_LG_PGSZ + TLB_SET_BITS
1003 # - 1 downto TLB_LG_PGSZ);
1004 sync
+= addrbits
.eq(d_in
.addr
[
1005 TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_SET_BITS
1009 # index := to_integer(unsigned(addrbits));
1010 sync
+= index
.eq(addrbits
)
1011 # -- If we have any op and the previous op isn't finished,
1012 # -- then keep the same output for next cycle.
1013 # if r0_stall = '0' then
1014 # If we have any op and the previous op isn't finished,
1015 # then keep the same output for next cycle.
1016 with m
.If(~r0_stall
):
1017 sync
+= tlb_valid_way
.eq(dtlb_valids
[index
])
1018 sync
+= tlb_tag_way
.eq(dtlb_tags
[index
])
1019 sync
+= tlb_pte_way
.eq(dtlb_ptes
[index
])
1024 # -- Generate TLB PLRUs
1025 # maybe_tlb_plrus: if TLB_NUM_WAYS > 1 generate
1026 # Generate TLB PLRUs
1027 class MaybeTLBPLRUs(Elaboratable
):
1031 def elaborate(self
, platform
):
1037 with m
.If(TLB_NUM_WAYS
> 1):
1039 # TODO understand how to conver generate statements
1040 # tlb_plrus: for i in 0 to TLB_SET_SIZE - 1 generate
1041 # -- TLB PLRU interface
1042 # signal tlb_plru_acc :
1043 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
1044 # signal tlb_plru_acc_en : std_ulogic;
1045 # signal tlb_plru_out :
1046 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
1048 # tlb_plru : entity work.plru
1050 # BITS => TLB_WAY_BITS
1055 # acc => tlb_plru_acc,
1056 # acc_en => tlb_plru_acc_en,
1057 # lru => tlb_plru_out
1063 # if r1.tlb_hit_index = i then
1064 # tlb_plru_acc_en <= r1.tlb_hit;
1066 # tlb_plru_acc_en <= '0';
1069 # std_ulogic_vector(to_unsigned(
1070 # r1.tlb_hit_way, TLB_WAY_BITS
1072 # tlb_plru_victim(i) <= tlb_plru_out;
1078 # tlb_search : process(all)
1079 class TLBSearch(Elaboratable
):
1083 def elborate(self
, platform
):
1089 # variable hitway : tlb_way_t;
1090 # variable hit : std_ulogic;
1091 # variable eatag : tlb_tag_t;
1102 # to_integer(unsigned(r0.req.addr(
1103 # TLB_LG_PGSZ + TLB_SET_BITS - 1 downto TLB_LG_PGSZ
1107 # eatag := r0.req.addr(63 downto TLB_LG_PGSZ + TLB_SET_BITS);
1108 # for i in tlb_way_t loop
1109 # if tlb_valid_way(i) = '1' and
1110 # read_tlb_tag(i, tlb_tag_way) = eatag then
1115 # tlb_hit <= hit and r0_valid;
1116 # tlb_hit_way <= hitway;
1117 comb
+= tlb_req_index
.eq(r0
.req
.addr
[
1118 TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_SET_BITS
1121 comb
+= eatag
.eq(r0
.req
.addr
[
1122 TLB_LG_PGSZ
+ TLB_SET_BITS
:64
1126 with m
.If(tlb_valid_way(i
)
1127 & read_tlb_tag(i
, tlb_tag_way
) == eatag
):
1129 comb
+= hitway
.eq(i
)
1132 comb
+= tlb_hit
.eq(hit
& r0_valid
)
1133 comb
+= tlb_hit_way
.eq(hitway
)
1135 # if tlb_hit = '1' then
1137 # pte <= read_tlb_pte(hitway, tlb_pte_way);
1138 comb
+= pte
.eq(read_tlb_pte(hitway
, tlb_pte_way
))
1141 # pte <= (others => '0');
1144 # valid_ra <= tlb_hit or not r0.req.virt_mode;
1145 comb
+= valid_ra
.eq(tlb_hit | ~r0
.req
.virt_mode
)
1146 # if r0.req.virt_mode = '1' then
1147 with m
.If(r0
.req
.virt_mode
):
1148 # ra <= pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
1149 # r0.req.addr(TLB_LG_PGSZ - 1 downto ROW_OFF_BITS) &
1150 # (ROW_OFF_BITS-1 downto 0 => '0');
1151 # perm_attr <= extract_perm_attr(pte);
1153 Const(ROW_OFF_BITS
, ROW_OFF_BITS
),
1154 r0
.req
.addr
[ROW_OFF_BITS
:TLB_LG_PGSZ
],
1155 pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]
1157 comb
+= perm_attr
.eq(extract_perm_attr(pte
))
1160 # ra <= r0.req.addr(
1161 # REAL_ADDR_BITS - 1 downto ROW_OFF_BITS
1162 # ) & (ROW_OFF_BITS-1 downto 0 => '0');
1164 Const(ROW_OFF_BITS
, ROW_OFF_BITS
),
1165 r0
.rq
.addr
[ROW_OFF_BITS
:REAL_ADDR_BITS
]
1168 # perm_attr <= real_mode_perm_attr;
1169 comb
+= perm_attr
.reference
.eq(1)
1170 comb
+= perm_attr
.changed
.eq(1)
1171 comb
+= perm_attr
.priv
.eq(1)
1172 comb
+= perm_attr
.nocache
.eq(0)
1173 comb
+= perm_attr
.rd_perm
.eq(1)
1174 comb
+= perm_attr
.wr_perm
.eq(1)
1178 # tlb_update : process(clk)
1179 class TLBUpdate(Elaboratable
):
1183 def elaborate(self
, platform
):
1189 # variable tlbie : std_ulogic;
1190 # variable tlbwe : std_ulogic;
1191 # variable repl_way : tlb_way_t;
1192 # variable eatag : tlb_tag_t;
1193 # variable tagset : tlb_way_tags_t;
1194 # variable pteset : tlb_way_ptes_t;
1199 tagset
= TLBWayTags()
1200 pteset
= TLBWayPtes()
1210 # if rising_edge(clk) then
1211 # tlbie := r0_valid and r0.tlbie;
1212 # tlbwe := r0_valid and r0.tlbldoi;
1213 sync
+= tlbie
.eq(r0_valid
& r0
.tlbie
)
1214 sync
+= tlbwe
.eq(r0_valid
& r0
.tlbldoi
)
1216 # if rst = '1' or (tlbie = '1' and r0.doall = '1') then
1217 # with m.If (TODO understand how signal resets work in nmigen)
1218 # -- clear all valid bits at once
1219 # for i in tlb_index_t loop
1220 # dtlb_valids(i) <= (others => '0');
1222 # clear all valid bits at once
1223 for i
in range(TLB_SET_SIZE
):
1224 sync
+= dtlb_valids
[i
].eq(0)
1225 # elsif tlbie = '1' then
1227 # if tlb_hit = '1' then
1229 # dtlb_valids(tlb_req_index)(tlb_hit_way) <= '0';
1230 sync
+= dtlb_valids
[tlb_req_index
][tlb_hit_way
].eq(0)
1232 # elsif tlbwe = '1' then
1234 # if tlb_hit = '1' then
1236 # repl_way := tlb_hit_way;
1237 sync
+= repl_way
.eq(tlb_hit_way
)
1240 # repl_way := to_integer(unsigned(
1241 # tlb_plru_victim(tlb_req_index)));
1242 sync
+= repl_way
.eq(tlb_plru_victim
[tlb_req_index
])
1244 # eatag := r0.req.addr(
1245 # 63 downto TLB_LG_PGSZ + TLB_SET_BITS
1247 # tagset := tlb_tag_way;
1248 # write_tlb_tag(repl_way, tagset, eatag);
1249 # dtlb_tags(tlb_req_index) <= tagset;
1250 # pteset := tlb_pte_way;
1251 # write_tlb_pte(repl_way, pteset, r0.req.data);
1252 # dtlb_ptes(tlb_req_index) <= pteset;
1253 # dtlb_valids(tlb_req_index)(repl_way) <= '1';
1254 sync
+= eatag
.eq(r0
.req
.addr
[TLB_LG_PGSZ
+ TLB_SET_BITS
:64])
1255 sync
+= tagset
.eq(tlb_tag_way
)
1256 sync
+= write_tlb_tag(repl_way
, tagset
, eatag
)
1257 sync
+= dtlb_tags
[tlb_req_index
].eq(tagset
)
1258 sync
+= pteset
.eq(tlb_pte_way
)
1259 sync
+= write_tlb_pte(repl_way
, pteset
, r0
.req
.data
)
1260 sync
+= dtlb_ptes
[tlb_req_index
].eq(pteset
)
1261 sync
+= dtlb_valids
[tlb_req_index
][repl_way
].eq(1)
1267 # maybe_plrus: if NUM_WAYS > 1 generate
1268 class MaybePLRUs(Elaboratable
):
1272 def elaborate(self
, platform
):
1279 # TODO learn translation of generate into nmgien @lkcl
1280 # plrus: for i in 0 to NUM_LINES-1 generate
1282 # signal plru_acc : std_ulogic_vector(WAY_BITS-1 downto 0);
1283 # signal plru_acc_en : std_ulogic;
1284 # signal plru_out : std_ulogic_vector(WAY_BITS-1 downto 0);
1287 # TODO learn tranlation of entity, generic map, port map in
1289 # plru : entity work.plru
1297 # acc_en => plru_acc_en,
1304 # if r1.hit_index = i then
1306 with m
.If(r1
.hit_index
== i
):
1307 # plru_acc_en <= r1.cache_hit;
1308 comb
+= plru_acc_en
.eq(r1
.cache_hit
)
1311 # plru_acc_en <= '0';
1312 comb
+= plru_acc_en
.eq(0)
1314 # plru_acc <= std_ulogic_vector(to_unsigned(
1315 # r1.hit_way, WAY_BITS
1317 # plru_victim(i) <= plru_out;
1318 comb
+= plru_acc
.eq(r1
.hit_way
)
1319 comb
+= plru_victime
[i
].eq(plru_out
)
1324 # -- Cache tag RAM read port
1325 # cache_tag_read : process(clk)
1326 # Cache tag RAM read port
1327 class CacheTagRead(Elaboratable
):
1331 def elaborate(self
, platform
):
1337 # variable index : index_t;
1338 index
= Signal(INDEX
)
1343 # if rising_edge(clk) then
1344 # if r0_stall = '1' then
1345 with m
.If(r0_stall
):
1346 # index := req_index;
1347 sync
+= index
.eq(req_index
)
1349 # elsif m_in.valid = '1' then
1350 with m
.Elif(m_in
.valid
):
1351 # index := get_index(m_in.addr);
1352 sync
+= index
.eq(get_index(m_in
.addr
))
1356 # index := get_index(d_in.addr);
1357 sync
+= index
.eq(get_index(d_in
.addr
))
1359 # cache_tag_set <= cache_tags(index);
1360 sync
+= cache_tag_set
.eq(cache_tags(index
))
1364 # -- Cache request parsing and hit detection
1365 # dcache_request : process(all)
1366 # Cache request parsing and hit detection
1367 class DcacheRequest(Elaboratable
):
1371 def elaborate(self
, platform
):
1372 # variable is_hit : std_ulogic;
1373 # variable hit_way : way_t;
1374 # variable op : op_t;
1375 # variable opsel : std_ulogic_vector(2 downto 0);
1376 # variable go : std_ulogic;
1377 # variable nc : std_ulogic;
1378 # variable s_hit : std_ulogic;
1379 # variable s_tag : cache_tag_t;
1380 # variable s_pte : tlb_pte_t;
1381 # variable s_ra : std_ulogic_vector(
1382 # REAL_ADDR_BITS - 1 downto 0
1384 # variable hit_set : std_ulogic_vector(
1385 # TLB_NUM_WAYS - 1 downto 0
1387 # variable hit_way_set : hit_way_set_t;
1388 # variable rel_matches : std_ulogic_vector(
1389 # TLB_NUM_WAYS - 1 downto 0
1391 rel_match
= Signal()
1393 hit_way
= Signal(WAY_BITS
)
1399 s_tag
= Signal(CACHE_TAG
)
1400 s_pte
= Signal(TLB_PTE
)
1401 s_ra
= Signal(REAL_ADDR_BITS
)
1402 hit_set
= Signal(TLB_NUM_WAYS
)
1403 hit_way_set
= HitWaySet()
1404 rel_matches
= Signal(TLB_NUM_WAYS
)
1405 rel_match
= Signal()
1424 # -- Extract line, row and tag from request
1425 # req_index <= get_index(r0.req.addr);
1426 # req_row <= get_row(r0.req.addr);
1427 # req_tag <= get_tag(ra);
1429 # go := r0_valid and not (r0.tlbie or r0.tlbld)
1430 # and not r1.ls_error;
1431 # Extract line, row and tag from request
1432 comb
+= req_index
.eq(get_index(r0
.req
.addr
))
1433 comb
+= req_row
.eq(get_row(r0
.req
.addr
))
1434 comb
+= req_tag
.eq(get_tag(ra
))
1436 comb
+= go
.eq(r0_valid
& ~
(r0
.tlbie | r0
.tlbld
) & ~r1
.ls_error
)
1438 # -- Test if pending request is a hit on any way
1439 # -- In order to make timing in virtual mode,
1440 # -- when we are using the TLB, we compare each
1441 # --way with each of the real addresses from each way of
1442 # -- the TLB, and then decide later which match to use.
1446 # Test if pending request is a hit on any way
1447 # In order to make timing in virtual mode,
1448 # when we are using the TLB, we compare each
1449 # way with each of the real addresses from each way of
1450 # the TLB, and then decide later which match to use.
1451 comb
+= hit_way
.eq(0)
1452 comb
+= is_hit
.eq(0)
1453 comb
+= rel_match
.eq(0)
1455 # if r0.req.virt_mode = '1' then
1456 with m
.If(r0
.req
.virt_mode
):
1457 # rel_matches := (others => '0');
1458 comb
+= rel_matches
.eq(0)
1459 # for j in tlb_way_t loop
1460 for j
in range(TLB_WAY
):
1461 # hit_way_set(j) := 0;
1463 # s_pte := read_tlb_pte(j, tlb_pte_way);
1464 # s_ra := s_pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ)
1465 # & r0.req.addr(TLB_LG_PGSZ - 1 downto 0);
1466 # s_tag := get_tag(s_ra);
1467 comb
+= hit_way_set
[j
].eq(0)
1469 comb
+= s_pte
.eq(read_tlb_pte(j
, tlb_pte_way
))
1470 comb
+= s_ra
.eq(Cat(
1471 r0
.req
.addr
[0:TLB_LG_PGSZ
],
1472 s_pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]
1474 comb
+= s_tag
.eq(get_tag(s_ra
))
1476 # for i in way_t loop
1477 for i
in range(NUM_WAYS
):
1478 # if go = '1' and cache_valids(req_index)(i) = '1'
1479 # and read_tag(i, cache_tag_set) = s_tag
1480 # and tlb_valid_way(j) = '1' then
1481 with m
.If(go
& cache_valid_bits
[req_index
][i
] &
1482 read_tag(i
, cache_tag_set
) == s_tag
1483 & tlb_valid_way
[j
]):
1484 # hit_way_set(j) := i;
1486 comb
+= hit_way_set
[j
].eq(i
)
1490 # hit_set(j) := s_hit;
1491 comb
+= hit_set
[j
].eq(s_hit
)
1492 # if s_tag = r1.reload_tag then
1493 with m
.If(s_tag
== r1
.reload_tag
):
1494 # rel_matches(j) := '1';
1495 comb
+= rel_matches
[j
].eq(1)
1498 # if tlb_hit = '1' then
1500 # is_hit := hit_set(tlb_hit_way);
1501 # hit_way := hit_way_set(tlb_hit_way);
1502 # rel_match := rel_matches(tlb_hit_way);
1503 comb
+= is_hit
.eq(hit_set
[tlb_hit_way
])
1504 comb
+= hit_way
.eq(hit_way_set
[tlb_hit_way
])
1505 comb
+= rel_match
.eq(rel_matches
[tlb_hit_way
])
1509 # s_tag := get_tag(r0.req.addr);
1510 comb
+= s_tag
.eq(get_tag(r0
.req
.addr
))
1511 # for i in way_t loop
1512 for i
in range(NUM_WAYS
):
1513 # if go = '1' and cache_valids(req_index)(i) = '1' and
1514 # read_tag(i, cache_tag_set) = s_tag then
1515 with m
.If(go
& cache_valid_bits
[req_index
][i
] &
1516 read_tag(i
, cache_tag_set
) == s_tag
):
1519 comb
+= hit_way
.eq(i
)
1520 comb
+= is_hit
.eq(1)
1523 # if s_tag = r1.reload_tag then
1524 with m
.If(s_tag
== r1
.reload_tag
):
1526 comb
+= rel_match
.eq(1)
1529 # req_same_tag <= rel_match;
1530 comb
+= req_same_tag
.eq(rel_match
)
1532 # -- See if the request matches the line currently being reloaded
1533 # if r1.state = RELOAD_WAIT_ACK and req_index = r1.store_index
1534 # and rel_match = '1' then
1535 # See if the request matches the line currently being reloaded
1536 with m
.If(r1
.state
== State
.RELOAD_WAIT_ACK
& req_index
==
1537 r1
.store_index
& rel_match
):
1538 # -- For a store, consider this a hit even if the row isn't
1539 # -- valid since it will be by the time we perform the store.
1540 # -- For a load, check the appropriate row valid bit.
1541 # For a store, consider this a hit even if the row isn't
1542 # valid since it will be by the time we perform the store.
1543 # For a load, check the appropriate row valid bit.
1545 # not r0.req.load or r1.rows_valid(req_row mod ROW_PER_LINE);
1546 # hit_way := replace_way;
1547 comb
+= is_hit
.eq(~r0
.req
.load
1548 | r1
.rows_valid
[req_row
% ROW_PER_LINE
])
1549 comb
+= hit_way
.eq(replace_way
)
1552 # -- Whether to use forwarded data for a load or not
1553 # Whether to use forwarded data for a load or not
1554 # use_forward1_next <= '0';
1555 comb
+= use_forward1_next
.eq(0)
1556 # if get_row(r1.req.real_addr) = req_row
1557 # and r1.req.hit_way = hit_way then
1558 with m
.If(get_row(r1
.req
.real_addr
) == req_row
1559 & r1
.req
.hit_way
== hit_way
)
1560 # -- Only need to consider r1.write_bram here, since if we
1561 # -- are writing refill data here, then we don't have a
1562 # -- cache hit this cycle on the line being refilled.
1563 # -- (There is the possibility that the load following the
1564 # -- load miss that started the refill could be to the old
1565 # -- contents of the victim line, since it is a couple of
1566 # -- cycles after the refill starts before we see the updated
1567 # -- cache tag. In that case we don't use the bypass.)
1568 # Only need to consider r1.write_bram here, since if we
1569 # are writing refill data here, then we don't have a
1570 # cache hit this cycle on the line being refilled.
1571 # (There is the possibility that the load following the
1572 # load miss that started the refill could be to the old
1573 # contents of the victim line, since it is a couple of
1574 # cycles after the refill starts before we see the updated
1575 # cache tag. In that case we don't use the bypass.)
1576 # use_forward1_next <= r1.write_bram;
1577 comb
+= use_forward1_next
.eq(r1
.write_bram
)
1579 # use_forward2_next <= '0';
1580 comb
+= use_forward2_next
.eq(0)
1581 # if r1.forward_row1 = req_row and r1.forward_way1 = hit_way then
1582 with m
.If(r1
.forward_row1
== req_row
& r1
.forward_way1
== hit_way
):
1583 # use_forward2_next <= r1.forward_valid1;
1584 comb
+= use_forward2_next
.eq(r1
.forward_valid1
)
1587 # -- The way that matched on a hit
1588 # The way that matched on a hit
1589 # req_hit_way <= hit_way;
1590 comb
+= req_hit_way
.eq(hit_way
)
1592 # -- The way to replace on a miss
1593 # The way to replace on a miss
1594 # if r1.write_tag = '1' then
1595 with m
.If(r1
.write_tag
):
1596 # replace_way <= to_integer(unsigned(
1597 # plru_victim(r1.store_index)
1599 replace_way
.eq(plru_victim
[r1
.store_index
])
1602 # replace_way <= r1.store_way;
1603 comb
+= replace_way
.eq(r1
.store_way
)
1606 # -- work out whether we have permission for this access
1607 # -- NB we don't yet implement AMR, thus no KUAP
1608 # work out whether we have permission for this access
1609 # NB we don't yet implement AMR, thus no KUAP
1610 # rc_ok <= perm_attr.reference and
1611 # (r0.req.load or perm_attr.changed);
1612 # perm_ok <= (r0.req.priv_mode or not perm_attr.priv) and
1613 # (perm_attr.wr_perm or (r0.req.load
1614 # and perm_attr.rd_perm));
1615 # access_ok <= valid_ra and perm_ok and rc_ok;
1617 perm_attr
.reference
& (r0
.req
.load | perm_attr
.changed
)
1619 comb
+= perm_ok
.eq((r0
.req
.prive_mode | ~perm_attr
.priv
)
1621 |
(r0
.req
.load
& perm_attr
.rd_perm
)
1623 comb
+= access_ok
.eq(valid_ra
& perm_ok
& rc_ok
)
1624 # -- Combine the request and cache hit status to decide what
1625 # -- operation needs to be done
1626 # nc := r0.req.nc or perm_attr.nocache;
1628 # Combine the request and cache hit status to decide what
1629 # operation needs to be done
1630 comb
+= nc
.eq(r0
.req
.nc | perm_attr
.nocache
)
1631 comb
+= op
.eq(Op
.OP_NONE
)
1634 # if access_ok = '0' then
1635 with m
.If(~access_ok
):
1637 comb
+= op
.eq(Op
.OP_BAD
)
1638 # elsif cancel_store = '1' then
1639 with m
.Elif(cancel_store
):
1640 # op := OP_STCX_FAIL;
1641 comb
+= op
.eq(Op
.OP_STCX_FAIL
)
1644 # opsel := r0.req.load & nc & is_hit;
1645 comb
+= opsel
.eq(Cat(is_hit
, nc
, r0
.req
.load
))
1647 with m
.Switch(opsel
):
1648 # when "101" => op := OP_LOAD_HIT;
1649 # when "100" => op := OP_LOAD_MISS;
1650 # when "110" => op := OP_LOAD_NC;
1651 # when "001" => op := OP_STORE_HIT;
1652 # when "000" => op := OP_STORE_MISS;
1653 # when "010" => op := OP_STORE_MISS;
1654 # when "011" => op := OP_BAD;
1655 # when "111" => op := OP_BAD;
1656 # when others => op := OP_NONE;
1657 with m
.Case(Const(0b101, 3)):
1658 comb
+= op
.eq(Op
.OP_LOAD_HIT
)
1660 with m
.Case(Cosnt(0b100, 3)):
1661 comb
+= op
.eq(Op
.OP_LOAD_MISS
)
1663 with m
.Case(Const(0b110, 3)):
1664 comb
+= op
.eq(Op
.OP_LOAD_NC
)
1666 with m
.Case(Const(0b001, 3)):
1667 comb
+= op
.eq(Op
.OP_STORE_HIT
)
1669 with m
.Case(Const(0b000, 3)):
1670 comb
+= op
.eq(Op
.OP_STORE_MISS
)
1672 with m
.Case(Const(0b010, 3)):
1673 comb
+= op
.eq(Op
.OP_STORE_MISS
)
1675 with m
.Case(Const(0b011, 3)):
1676 comb
+= op
.eq(Op
.OP_BAD
)
1678 with m
.Case(Const(0b111, 3)):
1679 comb
+= op
.eq(Op
.OP_BAD
)
1682 comb
+= op
.eq(Op
.OP_NONE
)
1688 comb
+= req_op
.eq(op
)
1689 comb
+= req_go
.eq(go
)
1691 # -- Version of the row number that is valid one cycle earlier
1692 # -- in the cases where we need to read the cache data BRAM.
1693 # -- If we're stalling then we need to keep reading the last
1695 # Version of the row number that is valid one cycle earlier
1696 # in the cases where we need to read the cache data BRAM.
1697 # If we're stalling then we need to keep reading the last
1699 # if r0_stall = '0' then
1700 with m
.If(~r0_stall
):
1701 # if m_in.valid = '1' then
1702 with m
.If(m_in
.valid
):
1703 # early_req_row <= get_row(m_in.addr);
1704 comb
+= early_req_row
.eq(get_row(m_in
.addr
))
1707 # early_req_row <= get_row(d_in.addr);
1708 comb
+= early_req_row
.eq(get_row(d_in
.addr
))
1712 # early_req_row <= req_row;
1713 comb
+= early_req_row
.eq(req_row
)
1717 # -- Wire up wishbone request latch out of stage 1
1718 # wishbone_out <= r1.wb;
1719 # Wire up wishbone request latch out of stage 1
1720 comb
+= wishbone_out
.eq(r1
.wb
)
1722 # -- Handle load-with-reservation and store-conditional instructions
1723 # reservation_comb: process(all)
1724 # Handle load-with-reservation and store-conditional instructions
1725 class ReservationComb(Elaboratable
):
1729 def elaborate(self
, platform
):
1736 # cancel_store <= '0';
1738 # clear_rsrv <= '0';
1739 # if r0_valid = '1' and r0.req.reserve = '1' then
1740 with m
.If(r0_valid
& r0
.req
.reserve
):
1742 # -- XXX generate alignment interrupt if address
1743 # -- is not aligned XXX or if r0.req.nc = '1'
1744 # if r0.req.load = '1' then
1745 # XXX generate alignment interrupt if address
1746 # is not aligned XXX or if r0.req.nc = '1'
1747 with m
.If(r0
.req
.load
):
1748 # -- load with reservation
1750 # load with reservation
1754 # -- store conditional
1755 # clear_rsrv <= '1';
1757 comb
+= clear_rsrv
.eq(1)
1758 # if reservation.valid = '0' or r0.req.addr(63
1759 # downto LINE_OFF_BITS) /= reservation.addr then
1760 with m
.If(~reservation
.valid
1761 | r0
.req
.addr
[LINE_OFF_BITS
:64]):
1762 # cancel_store <= '1';
1763 comb
+= cancel_store
.eq(1)
1769 # reservation_reg: process(clk)
1770 class ReservationReg(Elaboratable
):
1774 def elaborate(self
, platform
):
1781 # if rising_edge(clk) then
1783 # reservation.valid <= '0';
1784 # TODO understand how resets work in nmigen
1785 # elsif r0_valid = '1' and access_ok = '1' then
1786 with m
.Elif(r0_valid
& access_ok
)""
1787 # if clear_rsrv = '1' then
1788 with m
.If(clear_rsrv
):
1789 # reservation.valid <= '0';
1790 sync
+= reservation
.valid
.ea(0)
1791 # elsif set_rsrv = '1' then
1792 with m
.Elif(set_rsrv
):
1793 # reservation.valid <= '1';
1794 # reservation.addr <=
1795 # r0.req.addr(63 downto LINE_OFF_BITS);
1796 sync
+= reservation
.valid
.eq(1)
1797 sync
+= reservation
.addr(r0
.req
.addr
[LINE_OFF_BITS
:64])
1803 # -- Return data for loads & completion control logic
1804 # writeback_control: process(all)
1805 # Return data for loads & completion control logic
1806 class WriteBackControl(Elaboratable
):
1810 def elaborate(self
, platform
):
1816 # variable data_out : std_ulogic_vector(63 downto 0);
1817 # variable data_fwd : std_ulogic_vector(63 downto 0);
1818 # variable j : integer;
1819 data_out
= Signal(64)
1820 data_fwd
= Signal(64)
1824 # -- Use the bypass if are reading the row that was
1825 # -- written 1 or 2 cycles ago, including for the
1826 # -- slow_valid = 1 case (i.e. completing a load
1827 # -- miss or a non-cacheable load).
1828 # if r1.use_forward1 = '1' then
1829 # Use the bypass if are reading the row that was
1830 # written 1 or 2 cycles ago, including for the
1831 # slow_valid = 1 case (i.e. completing a load
1832 # miss or a non-cacheable load).
1833 with m
.If(r1
.use_forward1
):
1834 # data_fwd := r1.forward_data1;
1835 comb
+= data_fwd
.eq(r1
.forward_data1
)
1838 # data_fwd := r1.forward_data2;
1839 comb
+= data_fwd
.eq(r1
.forward_data2
)
1842 # data_out := cache_out(r1.hit_way);
1843 comb
+= data_out
.eq(cache_out
[r1
.hit_way
])
1845 # for i in 0 to 7 loop
1850 # if r1.forward_sel(i) = '1' then
1851 with m
.If(r1
.forward_sel
[i
]):
1852 # data_out(j + 7 downto j) := data_fwd(j + 7 downto j);
1853 comb
+= data_out
[j
:j
+8].eq(data_fwd
[j
:j
+8])
1857 # d_out.valid <= r1.ls_valid;
1858 # d_out.data <= data_out;
1859 # d_out.store_done <= not r1.stcx_fail;
1860 # d_out.error <= r1.ls_error;
1861 # d_out.cache_paradox <= r1.cache_paradox;
1862 comb
+= d_out
.valid
.eq(r1
.ls_valid
)
1863 comb
+= d_out
.data
.eq(data_out
)
1864 comb
+= d_out
.store_done
.eq(~r1
.stcx_fail
)
1865 comb
+= d_out
.error
.eq(r1
.ls_error
)
1866 comb
+= d_out
.cache_paradox
.eq(r1
.cache_paradox
)
1869 # m_out.done <= r1.mmu_done;
1870 # m_out.err <= r1.mmu_error;
1871 # m_out.data <= data_out;
1872 comb
+= m_out
.done
.eq(r1
.mmu_done
)
1873 comb
+= m_out
.err
.eq(r1
.mmu_error
)
1874 comb
+= m_out
.data
.eq(data_out
)
1876 # -- We have a valid load or store hit or we just completed
1877 # -- a slow op such as a load miss, a NC load or a store
1879 # -- Note: the load hit is delayed by one cycle. However it
1880 # -- can still not collide with r.slow_valid (well unless I
1881 # -- miscalculated) because slow_valid can only be set on a
1882 # -- subsequent request and not on its first cycle (the state
1883 # -- machine must have advanced), which makes slow_valid
1884 # -- at least 2 cycles from the previous hit_load_valid.
1886 # -- Sanity: Only one of these must be set in any given cycle
1887 # assert (r1.slow_valid and r1.stcx_fail) /= '1'
1888 # report "unexpected slow_valid collision with stcx_fail"
1890 # assert ((r1.slow_valid or r1.stcx_fail) and r1.hit_load_valid)
1891 # /= '1' report "unexpected hit_load_delayed collision with
1892 # slow_valid" severity FAILURE;
1893 # We have a valid load or store hit or we just completed
1894 # a slow op such as a load miss, a NC load or a store
1896 # Note: the load hit is delayed by one cycle. However it
1897 # can still not collide with r.slow_valid (well unless I
1898 # miscalculated) because slow_valid can only be set on a
1899 # subsequent request and not on its first cycle (the state
1900 # machine must have advanced), which makes slow_valid
1901 # at least 2 cycles from the previous hit_load_valid.
1903 # Sanity: Only one of these must be set in any given cycle
1904 assert (r1
.slow_valid
& r1
.stcx_fail
) != 1 "unexpected" \
1905 "slow_valid collision with stcx_fail -!- severity FAILURE"
1907 assert ((r1
.slow_valid | r1
.stcx_fail
) | r1
.hit_load_valid
) != 1
1908 "unexpected hit_load_delayed collision with slow_valid -!-" \
1911 # if r1.mmu_req = '0' then
1912 with m
.If(~r1
._mmu_req
):
1913 # -- Request came from loadstore1...
1914 # -- Load hit case is the standard path
1915 # if r1.hit_load_valid = '1' then
1916 # Request came from loadstore1...
1917 # Load hit case is the standard path
1918 with m
.If(r1
.hit_load_valid
):
1920 # "completing load hit data=" & to_hstring(data_out);
1921 print(f
"completing load hit data={data_out}")
1924 # -- error cases complete without stalling
1925 # if r1.ls_error = '1' then
1926 # error cases complete without stalling
1927 with m
.If(r1
.ls_error
):
1928 # report "completing ld/st with error";
1929 print("completing ld/st with error")
1932 # -- Slow ops (load miss, NC, stores)
1933 # if r1.slow_valid = '1' then
1934 # Slow ops (load miss, NC, stores)
1935 with m
.If(r1
.slow_valid
):
1937 # "completing store or load miss data="
1938 # & to_hstring(data_out);
1939 print(f
"completing store or load miss data={data_out}")
1944 # -- Request came from MMU
1945 # if r1.hit_load_valid = '1' then
1946 # Request came from MMU
1947 with m
.If(r1
.hit_load_valid
):
1948 # report "completing load hit to MMU, data="
1949 # & to_hstring(m_out.data);
1950 print(f
"completing load hit to MMU, data={m_out.data}")
1953 # -- error cases complete without stalling
1954 # if r1.mmu_error = '1' then
1955 # report "completing MMU ld with error";
1956 # error cases complete without stalling
1957 with m
.If(r1
.mmu_error
):
1958 print("combpleting MMU ld with error")
1961 # -- Slow ops (i.e. load miss)
1962 # if r1.slow_valid = '1' then
1963 # Slow ops (i.e. load miss)
1964 with m
.If(r1
.slow_valid
):
1965 # report "completing MMU load miss, data="
1966 # & to_hstring(m_out.data);
1967 print("completing MMU load miss, data={m_out.data}")
1973 # -- Generate a cache RAM for each way. This handles the normal
1974 # -- reads, writes from reloads and the special store-hit update
1977 # -- Note: the BRAMs have an extra read buffer, meaning the output
1978 # -- is pipelined an extra cycle. This differs from the
1979 # -- icache. The writeback logic needs to take that into
1980 # -- account by using 1-cycle delayed signals for load hits.
1982 # rams: for i in 0 to NUM_WAYS-1 generate
1983 # signal do_read : std_ulogic;
1984 # signal rd_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1985 # signal do_write : std_ulogic;
1986 # signal wr_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1988 # std_ulogic_vector(wishbone_data_bits-1 downto 0);
1989 # signal wr_sel : std_ulogic_vector(ROW_SIZE-1 downto 0);
1990 # signal wr_sel_m : std_ulogic_vector(ROW_SIZE-1 downto 0);
1991 # signal dout : cache_row_t;
1993 # way: entity work.cache_ram
1995 # ROW_BITS => ROW_BITS,
1996 # WIDTH => wishbone_data_bits,
2002 # rd_addr => rd_addr,
2004 # wr_sel => wr_sel_m,
2005 # wr_addr => wr_addr,
2006 # wr_data => wr_data
2010 class TODO(Elaboratable
):
2014 def elaborate(self
, platform
):
2021 # -- Cache hit reads
2024 # std_ulogic_vector(to_unsigned(early_req_row, ROW_BITS));
2025 # cache_out(i) <= dout;
2027 comb
+= do_read
.eq(1)
2028 comb
+= rd_addr
.eq(Signal(ROW
))
2029 comb
+= cache_out
[i
].eq(dout
)
2033 # -- Defaults to wishbone read responses (cache refill)
2035 # -- For timing, the mux on wr_data/sel/addr is not
2036 # -- dependent on anything other than the current state.
2039 # Defaults to wishbone read responses (cache refill)
2041 # For timing, the mux on wr_data/sel/addr is not
2042 # dependent on anything other than the current state.
2043 # wr_sel_m <= (others => '0');
2044 comb
+= wr_sel_m
.eq(0)
2047 comb
+= do_write
.eq(0)
2048 # if r1.write_bram = '1' then
2049 with m
.If(r1
.write_bram
):
2050 # -- Write store data to BRAM. This happens one
2051 # -- cycle after the store is in r0.
2052 # Write store data to BRAM. This happens one
2053 # cycle after the store is in r0.
2054 # wr_data <= r1.req.data;
2055 # wr_sel <= r1.req.byte_sel;
2056 # wr_addr <= std_ulogic_vector(to_unsigned(
2057 # get_row(r1.req.real_addr), ROW_BITS
2059 comb
+= wr_data
.eq(r1
.req
.data
)
2060 comb
+= wr_sel
.eq(r1
.req
.byte_sel
)
2061 comb
+= wr_addr
.eq(Signal(get_row(r1
.req
.real_addr
)))
2063 # if i = r1.req.hit_way then
2064 with m
.If(i
== r1
.req
.hit_way
):
2066 comb
+= do_write
.eq(1)
2070 # -- Otherwise, we might be doing a reload or a DCBZ
2071 # if r1.dcbz = '1' then
2072 # Otherwise, we might be doing a reload or a DCBZ
2074 # wr_data <= (others => '0');
2075 comb
+= wr_data
.eq(0)
2078 # wr_data <= wishbone_in.dat;
2079 comb
+= wr_data
.eq(wishbone_in
.dat
)
2082 # wr_addr <= std_ulogic_vector(to_unsigned(
2083 # r1.store_row, ROW_BITS
2085 # wr_sel <= (others => '1');
2086 comb
+= wr_addr
.eq(Signal(r1
.store_row
))
2087 comb
+= wr_sel
.eq(1)
2089 # if r1.state = RELOAD_WAIT_ACK and
2090 # wishbone_in.ack = '1' and replace_way = i then
2091 with m
.If(r1
.state
== State
.RELOAD_WAIT_ACK
& wishbone_in
.ack
2092 & relpace_way
== i
):
2094 comb
+= do_write
.eq(1)
2098 # -- Mask write selects with do_write since BRAM
2099 # -- doesn't have a global write-enable
2100 # if do_write = '1' then
2101 # -- Mask write selects with do_write since BRAM
2102 # -- doesn't have a global write-enable
2103 with m
.If(do_write
):
2104 # wr_sel_m <= wr_sel;
2105 comb
+= wr_sel_m
.eq(wr_sel
)
2110 # -- Cache hit synchronous machine for the easy case.
2111 # -- This handles load hits.
2112 # -- It also handles error cases (TLB miss, cache paradox)
2113 # dcache_fast_hit : process(clk)
2114 # Cache hit synchronous machine for the easy case.
2115 # This handles load hits.
2116 # It also handles error cases (TLB miss, cache paradox)
2117 class DcacheFastHit(Elaboratable
):
2121 def elaborate(self
, platform
):
2128 # if rising_edge(clk) then
2129 # if req_op /= OP_NONE then
2130 with m
.If(req_op
!= Op
.OP_NONE
):
2131 # report "op:" & op_t'image(req_op) &
2132 # " addr:" & to_hstring(r0.req.addr) &
2133 # " nc:" & std_ulogic'image(r0.req.nc) &
2134 # " idx:" & integer'image(req_index) &
2135 # " tag:" & to_hstring(req_tag) &
2136 # " way: " & integer'image(req_hit_way);
2137 print(f
"op:{req_op} addr:{r0.req.addr} nc: {r0.req.nc}" \
2138 f
"idx:{req_index} tag:{req_tag} way: {req_hit_way}"
2141 # if r0_valid = '1' then
2142 with m
.If(r0_valid
):
2143 # r1.mmu_req <= r0.mmu_req;
2144 sync
+= r1
.mmu_req
.eq(r0
.mmu_req
)
2147 # -- Fast path for load/store hits.
2148 # -- Set signals for the writeback controls.
2149 # r1.hit_way <= req_hit_way;
2150 # r1.hit_index <= req_index;
2151 # Fast path for load/store hits.
2152 # Set signals for the writeback controls.
2153 sync
+= r1
.hit_way
.eq(req_hit_way
)
2154 sync
+= r1
.hit_index
.eq(req_index
)
2156 # if req_op = OP_LOAD_HIT then
2157 with m
.If(req_op
== Op
.OP_LOAD_HIT
):
2158 # r1.hit_load_valid <= '1';
2159 sync
+= r1
.hit_load_valid
.eq(1)
2163 # r1.hit_load_valid <= '0';
2164 sync
+= r1
.hit_load_valid
.eq(0)
2167 # if req_op = OP_LOAD_HIT or req_op = OP_STORE_HIT then
2168 with m
.If(req_op
== Op
.OP_LOAD_HIT | req_op
== Op
.OP_STORE_HIT
):
2169 # r1.cache_hit <= '1';
2170 sync
+= r1
.cache_hit
.eq(1)
2173 # r1.cache_hit <= '0';
2174 sync
+= r1
.cache_hit
.eq(0)
2177 # if req_op = OP_BAD then
2178 with m
.If(req_op
== Op
.OP_BAD
):
2179 # report "Signalling ld/st error valid_ra=" &
2180 # std_ulogic'image(valid_ra) & " rc_ok=" &
2181 # std_ulogic'image(rc_ok) & " perm_ok=" &
2182 # std_ulogic'image(perm_ok);
2183 print(f
"Signalling ld/st error valid_ra={valid_ra}"
2184 f
"rc_ok={rc_ok} perm_ok={perm_ok}"
2186 # r1.ls_error <= not r0.mmu_req;
2187 # r1.mmu_error <= r0.mmu_req;
2188 # r1.cache_paradox <= access_ok;
2189 sync
+= r1
.ls_error
.eq(~r0
.mmu_req
)
2190 sync
+= r1
.mmu_error
.eq(r0
.mmu_req
)
2191 sync
+= r1
.cache_paradox
.eq(access_ok
)
2195 # r1.ls_error <= '0';
2196 # r1.mmu_error <= '0';
2197 # r1.cache_paradox <= '0';
2198 sync
+= r1
.ls_error
.eq(0)
2199 sync
+= r1
.mmu_error
.eq(0)
2200 sync
+= r1
.cache_paradox
.eq(0)
2203 # if req_op = OP_STCX_FAIL then
2204 with m
.If(req_op
== Op
.OP_STCX_FAIL
):
2205 # r1.stcx_fail <= '1';
2210 # r1.stcx_fail <= '0';
2211 sync
+= r1
.stcx_fail
.eq(0)
2214 # -- Record TLB hit information for updating TLB PLRU
2215 # r1.tlb_hit <= tlb_hit;
2216 # r1.tlb_hit_way <= tlb_hit_way;
2217 # r1.tlb_hit_index <= tlb_req_index;
2218 # Record TLB hit information for updating TLB PLRU
2219 sync
+= r1
.tlb_hit
.eq(tlb_hit
)
2220 sync
+= r1
.tlb_hit_way
.eq(tlb_hit_way
)
2221 sync
+= r1
.tlb_hit_index
.eq(tlb_req_index
)
2225 # -- Memory accesses are handled by this state machine:
2227 # -- * Cache load miss/reload (in conjunction with "rams")
2228 # -- * Load hits for non-cachable forms
2229 # -- * Stores (the collision case is handled in "rams")
2231 # -- All wishbone requests generation is done here.
2232 # -- This machine operates at stage 1.
2233 # dcache_slow : process(clk)
2234 # Memory accesses are handled by this state machine:
2236 # * Cache load miss/reload (in conjunction with "rams")
2237 # * Load hits for non-cachable forms
2238 # * Stores (the collision case is handled in "rams")
2240 # All wishbone requests generation is done here.
2241 # This machine operates at stage 1.
2242 class DcacheSlow(Elaboratable
):
2246 def elaborate(self
, platform
):
2252 # variable stbs_done : boolean;
2253 # variable req : mem_access_request_t;
2254 # variable acks : unsigned(2 downto 0);
2255 stbs_done
= Signal()
2256 req
= MemAccessRequest()
2264 # if rising_edge(clk) then
2265 # r1.use_forward1 <= use_forward1_next;
2266 # r1.forward_sel <= (others => '0');
2267 sync
+= r1
.use_forward1
.eq(use_forward1_next
)
2268 sync
+= r1
.forward_sel
.eq(0)
2270 # if use_forward1_next = '1' then
2271 with m
.If(use_forward1_next
):
2272 # r1.forward_sel <= r1.req.byte_sel;
2273 sync
+= r1
.forward_sel
.eq(r1
.req
.byte_sel
)
2275 # elsif use_forward2_next = '1' then
2276 with m
.Elif(use_forward2_next
):
2277 # r1.forward_sel <= r1.forward_sel1;
2278 sync
+= r1
.forward_sel
.eq(r1
.forward_sel1
)
2281 # r1.forward_data2 <= r1.forward_data1;
2282 sync
+= r1
.forward_data2
.eq(r1
.forward_data1
)
2284 # if r1.write_bram = '1' then
2285 with m
.If(r1
.write_bram
):
2286 # r1.forward_data1 <= r1.req.data;
2287 # r1.forward_sel1 <= r1.req.byte_sel;
2288 # r1.forward_way1 <= r1.req.hit_way;
2289 # r1.forward_row1 <= get_row(r1.req.real_addr);
2290 # r1.forward_valid1 <= '1';
2291 sync
+= r1
.forward_data1
.eq(r1
.req
.data
)
2292 sync
+= r1
.forward_sel1
.eq(r1
.req
.byte_sel
)
2293 sync
+= r1
.forward_way1
.eq(r1
.req
.hit_way
)
2294 sync
+= r1
.forward_row1
.eq(get_row(r1
.req
.real_addr
))
2295 sync
+= r1
.forward_valid1
.eq(1)
2299 # if r1.dcbz = '1' then
2301 # r1.forward_data1 <= (others => '0');
2302 sync
+= r1
.forward_data1
.eq(0)
2306 # r1.forward_data1 <= wishbone_in.dat;
2307 sync
+= r1
.forward_data1
.eq(wb_in
.dat
)
2310 # r1.forward_sel1 <= (others => '1');
2311 # r1.forward_way1 <= replace_way;
2312 # r1.forward_row1 <= r1.store_row;
2313 # r1.forward_valid1 <= '0';
2314 sync
+= r1
.forward_sel1
.eq(1)
2315 sync
+= r1
.forward_way1
.eq(replace_way
)
2316 sync
+= r1
.forward_row1
.eq(r1
.store_row
)
2317 sync
+= r1
.forward_valid1
.eq(0)
2320 # -- On reset, clear all valid bits to force misses
2322 # On reset, clear all valid bits to force misses
2323 # TODO figure out how reset signal works in nmigeni
2324 with m
.If("""TODO RST???"""):
2325 # for i in index_t loop
2326 for i
in range(INDEX
):
2327 # cache_valids(i) <= (others => '0');
2328 sync
+= cache_valid_bits
[i
].eq(0)
2333 # r1.slow_valid <= '0';
2336 # r1.ls_valid <= '0';
2337 # r1.mmu_done <= '0';
2338 sync
+= r1
.state
.eq(State
.IDLE
)
2339 sync
+= r1
.full
.eq(0)
2340 sync
+= r1
.slow_valid
.eq(0)
2341 sync
+= r1
.wb
.cyc
.eq(0)
2342 sync
+= r1
.wb
.stb
.eq(0)
2343 sync
+= r1
.ls_valid
.eq(0)
2344 sync
+= r1
.mmu_done
.eq(0)
2346 # -- Not useful normally but helps avoiding
2347 # -- tons of sim warnings
2348 # Not useful normally but helps avoiding
2349 # tons of sim warnings
2350 # r1.wb.adr <= (others => '0');
2351 sync
+= r1
.wb
.adr
.eq(0)
2354 # -- One cycle pulses reset
2355 # r1.slow_valid <= '0';
2356 # r1.write_bram <= '0';
2357 # r1.inc_acks <= '0';
2358 # r1.dec_acks <= '0';
2360 # r1.ls_valid <= '0';
2361 # -- complete tlbies and TLB loads in the third cycle
2362 # r1.mmu_done <= r0_valid and (r0.tlbie or r0.tlbld);
2363 # One cycle pulses reset
2364 sync
+= r1
.slow_valid
.eq(0)
2365 sync
+= r1
.write_bram
.eq(0)
2366 sync
+= r1
.inc_acks
.eq(0)
2367 sync
+= r1
.dec_acks
.eq(0)
2369 sync
+= r1
.ls_valid
.eq(0)
2370 # complete tlbies and TLB loads in the third cycle
2371 sync
+= r1
.mmu_done
.eq(r0_valid
& (r0
.tlbie | r0
.tlbld
))
2373 # if req_op = OP_LOAD_HIT or req_op = OP_STCX_FAIL then
2374 with m
.If(req_op
== Op
.OP_LOAD_HIT | req_op
== Op
.OP_STCX_FAIL
)
2375 # if r0.mmu_req = '0' then
2376 with m
.If(~r0
.mmu_req
):
2377 # r1.ls_valid <= '1';
2378 sync
+= r1
.ls_valid
.eq(1)
2381 # r1.mmu_done <= '1';
2382 sync
+= r1
.mmu_done
.eq(1)
2386 # if r1.write_tag = '1' then
2387 with m
.If(r1
.write_tag
):
2388 # -- Store new tag in selected way
2389 # for i in 0 to NUM_WAYS-1 loop
2390 # Store new tag in selected way
2391 for i
in range(NUM_WAYS
):
2392 # if i = replace_way then
2393 with m
.If(i
== replace_way
):
2394 # cache_tags(r1.store_index)(
2395 # (i + 1) * TAG_WIDTH - 1
2396 # downto i * TAG_WIDTH
2398 # (TAG_WIDTH - 1 downto TAG_BITS => '0')
2402 ][i
* TAG_WIDTH
:(i
+1) * TAG_WIDTH
].eq(
2403 Const(TAG_WIDTH
, TAG_WIDTH
)
2408 # r1.store_way <= replace_way;
2409 # r1.write_tag <= '0';
2410 sync
+= r1
.store_way
.eq(replace_way
)
2411 sync
+= r1
.write_tag
.eq(0)
2414 # -- Take request from r1.req if there is one there,
2415 # -- else from req_op, ra, etc.
2416 # if r1.full = '1' then
2417 # Take request from r1.req if there is one there,
2418 # else from req_op, ra, etc.
2421 sync
+= req
.eq(r1
.req
)
2426 # req.valid := req_go;
2427 # req.mmu_req := r0.mmu_req;
2428 # req.dcbz := r0.req.dcbz;
2429 # req.real_addr := ra;
2430 sync
+= req
.op
.eq(req_op
)
2431 sync
+= req
.valid
.eq(req_go
)
2432 sync
+= req
.mmu_req
.eq(r0
.mmu_req
)
2433 sync
+= req
.dcbz
.eq(r0
.req
.dcbz
)
2434 sync
+= req
.real_addr
.eq(ra
)
2436 # -- Force data to 0 for dcbz
2437 # if r0.req.dcbz = '0' then
2438 with m
.If(~r0
.req
.dcbz
):
2439 # req.data := r0.req.data;
2440 sync
+= req
.data
.eq(r0
.req
.data
)
2444 # req.data := (others => '0');
2445 sync
+= req
.data
.eq(0)
2448 # -- Select all bytes for dcbz
2449 # -- and for cacheable loads
2450 # if r0.req.dcbz = '1'
2451 # or (r0.req.load = '1' and r0.req.nc = '0') then
2452 # Select all bytes for dcbz
2453 # and for cacheable loads
2454 with m
.If(r0
.req
.dcbz |
(r0
.req
.load
& ~r0
.req
.nc
):
2455 # req.byte_sel := (others => '1');
2456 sync
+= req
.byte_sel
.eq(1)
2460 # req.byte_sel := r0.req.byte_sel;
2461 sync
+= req
.byte_sel
.eq(r0
.req
.byte_sel
)
2464 # req.hit_way := req_hit_way;
2465 # req.same_tag := req_same_tag;
2466 sync
+= req
.hit_way
.eq(req_hit_way
)
2467 sync
+= req
.same_tag
.eq(req_same_tag
)
2469 # -- Store the incoming request from r0,
2470 # -- if it is a slow request
2471 # -- Note that r1.full = 1 implies req_op = OP_NONE
2472 # if req_op = OP_LOAD_MISS or req_op = OP_LOAD_NC
2473 # or req_op = OP_STORE_MISS
2474 # or req_op = OP_STORE_HIT then
2475 # Store the incoming request from r0,
2476 # if it is a slow request
2477 # Note that r1.full = 1 implies req_op = OP_NONE
2478 with m
.If(req_op
== Op
.OP_LOAD_MISS
2479 | req_op
== Op
.OP_LOAD_NC
2480 | req_op
== Op
.OP_STORE_MISS
2481 | req_op
== Op
.OP_STORE_HIT
):
2485 sync
+= r1
.full
.eq(1)
2489 # -- Main state machine
2491 # Main state machine
2492 with m
.Switch(r1
.state
):
2495 with m
.Case(State
.IDLE
)
2496 # r1.wb.adr <= req.real_addr(r1.wb.adr'left downto 0);
2497 # r1.wb.sel <= req.byte_sel;
2498 # r1.wb.dat <= req.data;
2499 # r1.dcbz <= req.dcbz;
2501 # -- Keep track of our index and way
2502 # -- for subsequent stores.
2503 # r1.store_index <= get_index(req.real_addr);
2504 # r1.store_row <= get_row(req.real_addr);
2506 # get_row_of_line(get_row(req.real_addr)) - 1;
2507 # r1.reload_tag <= get_tag(req.real_addr);
2508 # r1.req.same_tag <= '1';
2509 sync
+= r1
.wb
.adr
.eq(req
.real_addr
[0:r1
.wb
.adr
])
2510 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
2511 sync
+= r1
.wb
.dat
.eq(req
.data
)
2512 sync
+= r1
.dcbz
.eq(req
.dcbz
)
2514 # Keep track of our index and way
2515 # for subsequent stores.
2516 sync
+= r1
.store_index
.eq(get_index(req
.real_addr
))
2517 sync
+= r1
.store_row
.eq(get_row(req
.real_addr
))
2518 sync
+= r1
.end_row_ix
.eq(
2519 get_row_of_line(get_row(req
.real_addr
))
2521 sync
+= r1
.reload_tag
.eq(get_tag(req
.real_addr
))
2522 sync
+= r1
.req
.same_tag
.eq(1)
2524 # if req.op = OP_STORE_HIT theni
2525 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
2526 # r1.store_way <= req.hit_way;
2527 sync
+= r1
.store_way
.eq(req
.hit_way
)
2530 # -- Reset per-row valid bits,
2531 # -- ready for handling OP_LOAD_MISS
2532 # for i in 0 to ROW_PER_LINE - 1 loop
2533 # Reset per-row valid bits,
2534 # ready for handling OP_LOAD_MISS
2535 for i
in range(ROW_PER_LINE
):
2536 # r1.rows_valid(i) <= '0';
2537 sync
+= r1
.rows_valid
[i
].eq(0)
2541 with m
.Switch(req
.op
):
2542 # when OP_LOAD_HIT =>
2543 with m
.Case(Op
.OP_LOAD_HIT
):
2544 # -- stay in IDLE state
2545 # stay in IDLE state
2548 # when OP_LOAD_MISS =>
2549 with m
.Case(Op
.OP_LOAD_MISS
):
2550 # -- Normal load cache miss,
2551 # -- start the reload machine
2552 # report "cache miss real addr:" &
2553 # to_hstring(req.real_addr) & " idx:" &
2554 # integer'image(get_index(req.real_addr)) &
2555 # " tag:" & to_hstring(get_tag(req.real_addr));
2556 # Normal load cache miss,
2557 # start the reload machine
2558 print(f
"cache miss real addr:{req_real_addr}" \
2559 f
" idx:{get_index(req_real_addr)}" \
2560 f
" tag:{get_tag(req.real_addr)}")
2562 # -- Start the wishbone cycle
2566 # Start the wishbone cycle
2567 sync
+= r1
.wb
.we
.eq(0)
2568 sync
+= r1
.wb
.cyc
.eq(1)
2569 sync
+= r1
.wb
.stb
.eq(1)
2571 # -- Track that we had one request sent
2572 # r1.state <= RELOAD_WAIT_ACK;
2573 # r1.write_tag <= '1';
2574 # Track that we had one request sent
2575 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
2576 sync
+= r1
.write_tag
.eq(1)
2578 # when OP_LOAD_NC =>
2579 with m
.Case(Op
.OP_LOAD_NC
):
2583 # r1.state <= NC_LOAD_WAIT_ACK;
2584 sync
+= r1
.wb
.cyc
.eq(1)
2585 sync
+= r1
.wb
.stb
.eq(1)
2586 sync
+= r1
.wb
.we
.eq(0)
2587 sync
+= r1
.state
.eq(State
.NC_LOAD_WAIT_ACK
)
2589 # when OP_STORE_HIT | OP_STORE_MISS =>
2590 with m
.Case(Op
.OP_STORE_HIT | Op
.OP_STORE_MISS
):
2591 # if req.dcbz = '0' then
2592 with m
.If(~req
.bcbz
):
2593 # r1.state <= STORE_WAIT_ACK;
2594 # r1.acks_pending <= to_unsigned(1, 3);
2596 # r1.slow_valid <= '1';
2597 sync
+= r1
.state
.eq(State
.STORE_WAIT_ACK
)
2598 sync
+= r1
.acks_pending
.eq(
2599 '''TODO to_unsignes(1,3)'''
2601 sync
+= r1
.full
.eq(0)
2602 sync
+= r1
.slow_valid
.eq(1)
2604 # if req.mmu_req = '0' then
2605 with m
.If(~req
.mmu_req
):
2606 # r1.ls_valid <= '1';
2607 sync
+= r1
.ls_valid
.eq(1)
2610 # r1.mmu_done <= '1';
2611 sync
+= r1
.mmu_done
.eq(1)
2614 # if req.op = OP_STORE_HIT then
2615 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
2616 # r1.write_bram <= '1';
2617 sync
+= r1
.write_bram
.eq(1)
2622 # -- dcbz is handled much like a load
2623 # -- miss except that we are writing
2624 # -- to memory instead of reading
2625 # r1.state <= RELOAD_WAIT_ACK;
2626 # dcbz is handled much like a load
2627 # miss except that we are writing
2628 # to memory instead of reading
2629 sync
+= r1
.state
.eq(Op
.RELOAD_WAIT_ACK
)
2631 # if req.op = OP_STORE_MISS then
2632 with m
.If(req
.op
== Op
.OP_STORE_MISS
):
2633 # r1.write_tag <= '1';
2634 sync
+= r1
.write_tag
.eq(1)
2641 sync
+= r1
.wb
.we
.eq(1)
2642 sync
+= r1
.wb
.cyc
.eq(1)
2643 sync
+= r1
.wb
.stb
.eq(1)
2645 # -- OP_NONE and OP_BAD do nothing
2646 # -- OP_BAD & OP_STCX_FAIL were handled above already
2649 # when OP_STCX_FAIL =>
2650 # OP_NONE and OP_BAD do nothing
2651 # OP_BAD & OP_STCX_FAIL were handled above already
2652 with m
.Case(Op
.OP_NONE
):
2655 with m
.Case(OP_BAD
):
2658 with m
.Case(OP_STCX_FAIL
):
2662 # when RELOAD_WAIT_ACK =>
2663 with m
.Case(State
.RELOAD_WAIT_ACK
):
2664 # -- Requests are all sent if stb is 0
2665 # Requests are all sent if stb is 0
2666 sync
+= stbs_done
.eq(~r1
.wb
.stb
)
2667 # stbs_done := r1.wb.stb = '0';
2669 # -- If we are still sending requests,
2670 # -- was one accepted?
2671 # if wishbone_in.stall = '0' and not stbs_done then
2672 # If we are still sending requests,
2674 with m
.If(~wb_in
.stall
& ~stbs_done
):
2675 # -- That was the last word ? We are done sending.
2676 # -- Clear stb and set stbs_done so we can handle
2677 # -- an eventual last ack on the same cycle.
2678 # if is_last_row_addr(r1.wb.adr, r1.end_row_ix) then
2679 # That was the last word ? We are done sending.
2680 # Clear stb and set stbs_done so we can handle
2681 # an eventual last ack on the same cycle.
2682 with m
.If(is_last_row_addr(
2683 r1
.wb
.adr
, r1
.end_row_ix
)):
2685 # stbs_done := true;
2686 sync
+= r1
.wb
.stb
.eq(0)
2687 sync
+= stbs_done
.eq(0)
2690 # -- Calculate the next row address
2691 # r1.wb.adr <= next_row_addr(r1.wb.adr);
2692 # Calculate the next row address
2693 sync
+= r1
.wb
.adr
.eq(next_row_addr(r1
.wb
.adr
))
2696 # -- Incoming acks processing
2697 # r1.forward_valid1 <= wishbone_in.ack;
2698 # Incoming acks processing
2699 sync
+= r1
.forward_valid1
.eq(wb_in
.ack
)
2701 # if wishbone_in.ack = '1' then
2702 with m
.If(wb_in
.ack
):
2704 # r1.store_row mod ROW_PER_LINE
2706 sync
+= r1
.rows_valid
[
2707 r1
.store_row
% ROW_PER_LINE
2710 # -- If this is the data we were looking for,
2711 # -- we can complete the request next cycle.
2712 # -- Compare the whole address in case the
2713 # -- request in r1.req is not the one that
2714 # -- started this refill.
2715 # if r1.full = '1' and r1.req.same_tag = '1'
2716 # and ((r1.dcbz = '1' and r1.req.dcbz = '1')
2717 # or (r1.dcbz = '0' and r1.req.op = OP_LOAD_MISS))
2718 # and r1.store_row = get_row(r1.req.real_addr) then
2719 # If this is the data we were looking for,
2720 # we can complete the request next cycle.
2721 # Compare the whole address in case the
2722 # request in r1.req is not the one that
2723 # started this refill.
2724 with m
.If(r1
.full
& r1
.req
.same_tag
&
2725 ((r1
.dcbz
& r1
.req
.dcbz
)
2727 r1
.req
.op
== Op
.OP_LOAD_MISS
)
2730 == get_row(r1
.req
.real_addr
):
2732 # r1.slow_valid <= '1';
2733 sync
+= r1
.full
.eq(0)
2734 sync
+= r1
.slow_valid
.eq(1)
2736 # if r1.mmu_req = '0' then
2737 with m
.If(~r1
.mmu_req
):
2738 # r1.ls_valid <= '1';
2739 sync
+= r1
.ls_valid
.eq(1)
2742 # r1.mmu_done <= '1';
2743 sync
+= r1
.mmu_done
.eq(1)
2745 # r1.forward_sel <= (others => '1');
2746 # r1.use_forward1 <= '1';
2747 sync
+= r1
.forward_sel
.eq(1)
2748 sync
+= r1
.use_forward1
.eq(1)
2751 # -- Check for completion
2752 # if stbs_done and is_last_row(r1.store_row,
2753 # r1.end_row_ix) then
2754 # Check for completion
2755 with m
.If(stbs_done
&
2756 is_last_row(r1
.store_row
,
2759 # -- Complete wishbone cycle
2761 # Complete wishbone cycle
2762 sync
+= r1
.wb
.cyc
.eq(0)
2764 # -- Cache line is now valid
2765 # cache_valids(r1.store_index)(
2768 # Cache line is now valid
2769 sync
+= cache_valid_bits
[
2771 ][r1
.store_way
].eq(1)
2774 sync
+= r1
.state
.eq(State
.IDLE
)
2777 # -- Increment store row counter
2778 # r1.store_row <= next_row(r1.store_row);
2779 # Increment store row counter
2780 sync
+= r1
.store_row
.eq(next_row(r1
.store_row
))
2783 # when STORE_WAIT_ACK =>
2784 with m
.Case(State
.STORE_WAIT_ACK
):
2785 # stbs_done := r1.wb.stb = '0';
2786 # acks := r1.acks_pending;
2787 sync
+= stbs_done
.eq(~r1
.wb
.stb
)
2788 sync
+= acks
.eq(r1
.acks_pending
)
2790 # if r1.inc_acks /= r1.dec_acks then
2791 with m
.If(r1
.inc_acks
!= r1
.dec_acks
):
2793 # if r1.inc_acks = '1' then
2794 with m
.If(r1
.inc_acks
):
2796 sync
+= acks
.eq(acks
+ 1)
2801 sync
+= acks
.eq(acks
- 1)
2805 # r1.acks_pending <= acks;
2806 sync
+= r1
.acks_pending
.eq(acks
)
2808 # -- Clear stb when slave accepted request
2809 # if wishbone_in.stall = '0' then
2810 # Clear stb when slave accepted request
2811 with m
.If(~wb_in
.stall
):
2812 # -- See if there is another store waiting
2813 # -- to be done which is in the same real page.
2814 # if req.valid = '1' then
2815 # See if there is another store waiting
2816 # to be done which is in the same real page.
2817 with m
.If(req
.valid
):
2819 # SET_SIZE_BITS - 1 downto 0
2820 # ) <= req.real_addr(
2821 # SET_SIZE_BITS - 1 downto 0
2823 # r1.wb.dat <= req.data;
2824 # r1.wb.sel <= req.byte_sel;
2825 sync
+= r1
.wb
.adr
[0:SET_SIZE_BITS
].eq(
2826 req
.real_addr
[0:SET_SIZE_BITS
]
2830 # if acks < 7 and req.same_tag = '1'
2831 # and (req.op = OP_STORE_MISS
2832 # or req.op = OP_STORE_HIT) then
2833 with m
.Elif(acks
< 7 & req
.same_tag
&
2834 (req
.op
== Op
.Op_STORE_MISS
2835 | req
.op
== Op
.OP_SOTRE_HIT
)):
2837 # stbs_done := false;
2838 sync
+= r1
.wb
.stb
.eq(1)
2839 sync
+= stbs_done
.eq(0)
2841 # if req.op = OP_STORE_HIT then
2842 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
2843 # r1.write_bram <= '1';
2844 sync
+= r1
.write_bram
.eq(1)
2847 # r1.slow_valid <= '1';
2848 sync
+= r1
.full
.eq(0)
2849 sync
+= r1
.slow_valid
.eq(1)
2851 # -- Store requests never come from the MMU
2852 # r1.ls_valid <= '1';
2853 # stbs_done := false;
2854 # r1.inc_acks <= '1';
2855 # Store request never come from the MMU
2856 sync
+= r1
.ls_valid
.eq(1)
2857 sync
+= stbs_done
.eq(0)
2858 sync
+= r1
.inc_acks
.eq(1)
2862 # stbs_done := true;
2863 sync
+= r1
.wb
.stb
.eq(0)
2864 sync
+= stbs_done
.eq(1)
2868 # -- Got ack ? See if complete.
2869 # if wishbone_in.ack = '1' then
2870 # Got ack ? See if complete.
2871 with m
.If(wb_in
.ack
):
2872 # if stbs_done and acks = 1 then
2873 with m
.If(stbs_done
& acks
)
2877 sync
+= r1
.state
.eq(State
.IDLE
)
2878 sync
+= r1
.wb
.cyc
.eq(0)
2879 sync
+= r1
.wb
.stb
.eq(0)
2881 # r1.dec_acks <= '1';
2882 sync
+= r1
.dec_acks
.eq(1)
2885 # when NC_LOAD_WAIT_ACK =>
2886 with m
.Case(State
.NC_LOAD_WAIT_ACK
):
2887 # -- Clear stb when slave accepted request
2888 # if wishbone_in.stall = '0' then
2889 # Clear stb when slave accepted request
2890 with m
.If(~wb_in
.stall
):
2892 sync
+= r1
.wb
.stb
.eq(0)
2895 # -- Got ack ? complete.
2896 # if wishbone_in.ack = '1' then
2897 # Got ack ? complete.
2898 with m
.If(wb_in
.ack
):
2901 # r1.slow_valid <= '1';
2902 sync
+= r1
.state
.eq(State
.IDLE
)
2903 sync
+= r1
.full
.eq(0)
2904 sync
+= r1
.slow_valid
.eq(1)
2906 # if r1.mmu_req = '0' then
2907 with m
.If(~r1
.mmu_req
):
2908 # r1.ls_valid <= '1';
2909 sync
+= r1
.ls_valid
.eq(1)
2913 # r1.mmu_done <= '1';
2914 sync
+= r1
.mmu_done
.eq(1)
2917 # r1.forward_sel <= (others => '1');
2918 # r1.use_forward1 <= '1';
2921 sync
+= r1
.forward_sel
.eq(1)
2922 sync
+= r1
.use_forward1
.eq(1)
2923 sync
+= r1
.wb
.cyc
.eq(0)
2924 sync
+= r1
.wb
.stb
.eq(0)
2931 # dc_log: if LOG_LENGTH > 0 generate
2932 # TODO learn how to tranlate vhdl generate into nmigen
2933 class DcacheLog(Elaborate
):
2937 def elaborate(self
, platform
):
2943 # signal log_data : std_ulogic_vector(19 downto 0);
2944 log_data
= Signal(20)
2949 # dcache_log: process(clk)
2951 # if rising_edge(clk) then
2952 # log_data <= r1.wb.adr(5 downto 3) &
2953 # wishbone_in.stall &
2955 # r1.wb.stb & r1.wb.cyc &
2958 # std_ulogic_vector(
2959 # to_unsigned(op_t'pos(req_op), 3)) &
2961 # std_ulogic_vector(
2962 # to_unsigned(tlb_hit_way, 3)) &
2964 # std_ulogic_vector(
2965 # to_unsigned(state_t'pos(r1.state), 3));
2966 sync
+= log_data
.eq(Cat(
2967 Const(r1
.state
, 3), valid_ra
, Const(tlb_hit_way
, 3),
2968 stall_out
, Const(req_op
, 3), d_out
.valid
, d_out
.error
,
2969 r1
.wb
.cyc
, r1
.wb
.stb
, wb_in
.ack
, wb_in
.stall
,
2974 # log_out <= log_data;
2975 # TODO ??? I am very confused need help
2976 comb
+= log_out
.eq(log_data
)