3 based on Anton Blanchard microwatt dcache.vhdl
7 from enum
import Enum
, unique
9 from nmigen
import Module
, Signal
, Elaboratable
,
11 from nmigen
.cli
import main
12 from nmigen
.iocontrol
import RecordObject
13 from nmigen
.util
import log2_int
15 from experiment
.mem_types
import LoadStore1ToDcacheType
,
16 DcacheToLoadStore1Type
,
20 from experiment
.wb_types
import WB_ADDR_BITS
, WB_DATA_BITS
, WB_SEL_BITS
,
21 WBAddrType
, WBDataType
, WBSelType
,
22 WbMasterOut
, WBSlaveOut
, WBMasterOutVector
,
23 WBSlaveOutVector
, WBIOMasterOut
,
26 # Record for storing permission, attribute, etc. bits from a PTE
27 class PermAttr(RecordObject
):
30 self
.reference
= Signal()
31 self
.changed
= Signal()
32 self
.nocache
= Signal()
34 self
.rd_perm
= Signal()
35 self
.wr_perm
= Signal()
38 def extract_perm_attr(pte
):
49 # Type of operation on a "valid" input
53 OP_BAD
= 1 # NC cache hit, TLB miss, prot/RC failure
54 OP_STCX_FAIL
= 2 # conditional store w/o reservation
55 OP_LOAD_HIT
= 3 # Cache hit on load
56 OP_LOAD_MISS
= 4 # Load missing cache
57 OP_LOAD_NC
= 5 # Non-cachable load
58 OP_STORE_HIT
= 6 # Store hitting cache
59 OP_STORE_MISS
= 7 # Store missing cache
64 IDLE
= 0 # Normal load hit processing
65 RELOAD_WAIT_ACK
= 1 # Cache reload wait ack
66 STORE_WAIT_ACK
= 2 # Store wait ack
67 NC_LOAD_WAIT_ACK
= 3 # Non-cachable load wait ack
71 # In order to make timing, we use the BRAMs with
72 # an output buffer, which means that the BRAM
73 # output is delayed by an extra cycle.
75 # Thus, the dcache has a 2-stage internal pipeline
76 # for cache hits with no stalls.
78 # All other operations are handled via stalling
81 # The second stage can thus complete a hit at the same
82 # time as the first stage emits a stall for a complex op.
84 # Stage 0 register, basically contains just the latched request
85 class RegStage0(RecordObject
):
88 self
.req
= LoadStore1ToDcacheType()
92 self
.mmu_req
= Signal() # indicates source of request
95 # -- Set associative dcache write-through
97 # -- TODO (in no specific order):
99 # -- * See list in icache.vhdl
100 # -- * Complete load misses on the cycle when WB data comes instead of
101 # -- at the end of line (this requires dealing with requests coming in
102 # -- while not idle...)
105 class Dcache(Elaboratable
):
107 # TODO: make these parameters of Dcache at some point
108 self
.LINE_SIZE
= 64 # Line size in bytes
109 self
.NUM_LINES
= 32 # Number of lines in a set
110 self
.NUM_WAYS
= 4 # Number of ways
111 self
.TLB_SET_SIZE
= 64 # L1 DTLB entries per set
112 self
.TLB_NUM_WAYS
= 2 # L1 DTLB number of sets
113 self
.TLB_LG_PGSZ
= 12 # L1 DTLB log_2(page_size)
114 self
.LOG_LENGTH
= 0 # Non-zero to enable log data collection
116 self
.d_in
= LoadStore1ToDcacheType()
117 self
.d_out
= DcacheToLoadStore1Type()
119 self
.m_in
= MmuToDcacheType()
120 self
.m_out
= DcacheToMmuType()
122 self
.stall_out
= Signal()
124 self
.wb_out
= WBMasterOut()
125 self
.wb_in
= WBSlaveOut()
127 self
.log_out
= Signal(20)
129 def elaborate(self
, platform
):
130 LINE_SIZE
= self
.LINE_SIZE
131 NUM_LINES
= self
.NUM_LINES
132 NUM_WAYS
= self
.NUM_WAYS
133 TLB_SET_SIZE
= self
.TLB_SET_SIZE
134 TLB_NUM_WAYS
= self
.TLB_NUM_WAYS
135 TLB_LG_PGSZ
= self
.TLB_LG_PGSZ
136 LOG_LENGTH
= self
.LOG_LENGTH
138 # BRAM organisation: We never access more than
139 # -- wishbone_data_bits at a time so to save
140 # -- resources we make the array only that wide, and
141 # -- use consecutive indices for to make a cache "line"
143 # -- ROW_SIZE is the width in bytes of the BRAM
144 # -- (based on WB, so 64-bits)
145 ROW_SIZE
= WB_DATA_BITS
/ 8;
147 # ROW_PER_LINE is the number of row (wishbone
148 # transactions) in a line
149 ROW_PER_LINE
= LINE_SIZE
// ROW_SIZE
151 # BRAM_ROWS is the number of rows in BRAM needed
152 # to represent the full dcache
153 BRAM_ROWS
= NUM_LINES
* ROW_PER_LINE
156 # Bit fields counts in the address
158 # REAL_ADDR_BITS is the number of real address
162 # ROW_BITS is the number of bits to select a row
163 ROW_BITS
= log2_int(BRAM_ROWS
)
165 # ROW_LINE_BITS is the number of bits to select
166 # a row within a line
167 ROW_LINE_BITS
= log2_int(ROW_PER_LINE
)
169 # LINE_OFF_BITS is the number of bits for
170 # the offset in a cache line
171 LINE_OFF_BITS
= log2_int(LINE_SIZE
)
173 # ROW_OFF_BITS is the number of bits for
174 # the offset in a row
175 ROW_OFF_BITS
= log2_int(ROW_SIZE
)
177 # INDEX_BITS is the number if bits to
178 # select a cache line
179 INDEX_BITS
= log2_int(NUM_LINES
)
181 # SET_SIZE_BITS is the log base 2 of the set size
182 SET_SIZE_BITS
= LINE_OFF_BITS
+ INDEX_BITS
184 # TAG_BITS is the number of bits of
185 # the tag part of the address
186 TAG_BITS
= REAL_ADDR_BITS
- SET_SIZE_BITS
188 # TAG_WIDTH is the width in bits of each way of the tag RAM
189 TAG_WIDTH
= TAG_BITS
+ 7 - ((TAG_BITS
+ 7) % 8)
191 # WAY_BITS is the number of bits to select a way
192 WAY_BITS
= log2_int(NUM_WAYS
)
194 # Example of layout for 32 lines of 64 bytes:
196 # .. tag |index| line |
198 # .. | |---| | ROW_LINE_BITS (3)
199 # .. | |--- - --| LINE_OFF_BITS (6)
200 # .. | |- --| ROW_OFF_BITS (3)
201 # .. |----- ---| | ROW_BITS (8)
202 # .. |-----| | INDEX_BITS (5)
203 # .. --------| | TAG_BITS (45)
206 # subtype row_t is integer range 0 to BRAM_ROWS-1;
207 # subtype index_t is integer range 0 to NUM_LINES-1;
208 """wherever way_t is used to make a Signal it must be substituted with
209 log2_int(NUM_WAYS) i.e. WAY_BITS. this because whilst the *range*
210 of the number is 0..NUM_WAYS it requires log2_int(NUM_WAYS) i.e.
211 WAY_BITS of space to store it
213 # subtype way_t is integer range 0 to NUM_WAYS-1;
214 # subtype row_in_line_t is unsigned(ROW_LINE_BITS-1 downto 0);
215 ROW
= BRAM_ROWS
# yyyeah not really necessary, delete
216 INDEX
= NUM_LINES
# yyyeah not really necessary, delete
217 WAY
= NUM_WAYS
# yyyeah not really necessary, delete
218 ROW_IN_LINE
= ROW_LINE_BITS
# yyyeah not really necessary, delete
220 # -- The cache data BRAM organized as described above for each way
221 # subtype cache_row_t is
222 # std_ulogic_vector(wishbone_data_bits-1 downto 0);
223 # The cache data BRAM organized as described above for each way
224 CACHE_ROW
= WB_DATA_BITS
226 # -- The cache tags LUTRAM has a row per set.
227 # -- Vivado is a pain and will not handle a
228 # -- clean (commented) definition of the cache
229 # -- tags as a 3d memory. For now, work around
230 # -- it by putting all the tags
231 # subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
232 # The cache tags LUTRAM has a row per set.
233 # Vivado is a pain and will not handle a
234 # clean (commented) definition of the cache
235 # tags as a 3d memory. For now, work around
236 # it by putting all the tags
239 # -- type cache_tags_set_t is array(way_t) of cache_tag_t;
240 # -- type cache_tags_array_t is array(index_t) of cache_tags_set_t;
241 # constant TAG_RAM_WIDTH : natural := TAG_WIDTH * NUM_WAYS;
242 # subtype cache_tags_set_t is
243 # std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
244 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
245 # type cache_tags_set_t is array(way_t) of cache_tag_t;
246 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
247 TAG_RAM_WIDTH
= TAG_WIDTH
* NUM_WAYS
249 CACHE_TAG_SET
= TAG_RAM_WIDTH
252 return Array(CacheTagSet() for x
in range(INDEX
))
254 # -- The cache valid bits
255 # subtype cache_way_valids_t is
256 # std_ulogic_vector(NUM_WAYS-1 downto 0);
257 # type cache_valids_t is array(index_t) of cache_way_valids_t;
258 # type row_per_line_valid_t is
259 # array(0 to ROW_PER_LINE - 1) of std_ulogic;
260 # The cache valid bits
261 CACHE_WAY_VALID_BITS
= NUM_WAYS
263 def CacheValidBitsArray():
264 return Array(CacheWayValidBits() for x
in range(INDEX
))
266 def RowPerLineValidArray():
267 return Array(Signal() for x
in range(ROW_PER_LINE
))
269 # -- Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
270 # signal cache_tags : cache_tags_array_t;
271 # signal cache_tag_set : cache_tags_set_t;
272 # signal cache_valids : cache_valids_t;
274 # attribute ram_style : string;
275 # attribute ram_style of cache_tags : signal is "distributed";
276 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
277 cache_tags
= CacheTagArray()
278 cache_tag_set
= Signal(CACHE_TAG_SET
)
279 cache_valid_bits
= CacheValidBitsArray()
281 # TODO attribute ram_style : string;
282 # TODO attribute ram_style of cache_tags : signal is "distributed";
285 # constant TLB_SET_BITS : natural := log2(TLB_SET_SIZE);
286 # constant TLB_WAY_BITS : natural := log2(TLB_NUM_WAYS);
287 # constant TLB_EA_TAG_BITS : natural :=
288 # 64 - (TLB_LG_PGSZ + TLB_SET_BITS);
289 # constant TLB_TAG_WAY_BITS : natural :=
290 # TLB_NUM_WAYS * TLB_EA_TAG_BITS;
291 # constant TLB_PTE_BITS : natural := 64;
292 # constant TLB_PTE_WAY_BITS : natural :=
293 # TLB_NUM_WAYS * TLB_PTE_BITS;
295 TLB_SET_BITS
= log2_int(TLB_SET_SIZE
)
296 TLB_WAY_BITS
= log2_int(TLB_NUM_WAYS
)
297 TLB_EA_TAG_BITS
= 64 - (TLB_LG_PGSZ
+ TLB_SET_BITS
)
298 TLB_TAG_WAY_BITS
= TLB_NUM_WAYS
* TLB_EA_TAG_BITS
300 TLB_PTE_WAY_BITS
= TLB_NUM_WAYS
* TLB_PTE_BITS
;
302 # subtype tlb_way_t is integer range 0 to TLB_NUM_WAYS - 1;
303 # subtype tlb_index_t is integer range 0 to TLB_SET_SIZE - 1;
304 # subtype tlb_way_valids_t is
305 # std_ulogic_vector(TLB_NUM_WAYS-1 downto 0);
306 # type tlb_valids_t is
307 # array(tlb_index_t) of tlb_way_valids_t;
308 # subtype tlb_tag_t is
309 # std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
310 # subtype tlb_way_tags_t is
311 # std_ulogic_vector(TLB_TAG_WAY_BITS-1 downto 0);
313 # array(tlb_index_t) of tlb_way_tags_t;
314 # subtype tlb_pte_t is
315 # std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
316 # subtype tlb_way_ptes_t is
317 # std_ulogic_vector(TLB_PTE_WAY_BITS-1 downto 0);
318 # type tlb_ptes_t is array(tlb_index_t) of tlb_way_ptes_t;
319 # type hit_way_set_t is array(tlb_way_t) of way_t;
320 TLB_WAY
= TLB_NUM_WAYS
322 TLB_INDEX
= TLB_SET_SIZE
324 TLB_WAY_VALID_BITS
= TLB_NUM_WAYS
326 def TLBValidBitsArray():
328 Signal(TLB_WAY_VALID_BITS
) for x
in range(TLB_SET_SIZE
)
331 TLB_TAG
= TLB_EA_TAG_BITS
333 TLB_WAY_TAGS
= TLB_TAG_WAY_BITS
337 Signal(TLB_WAY_TAGS
) for x
in range (TLB_SET_SIZE
)
340 TLB_PTE
= TLB_PTE_BITS
342 TLB_WAY_PTES
= TLB_PTE_WAY_BITS
346 Signal(TLB_WAY_PTES
) for x
in range(TLB_SET_SIZE
)
350 return Array(Signal(NUM_WAYS
) for x
in range(TLB_NUM_WAYS
))
352 # signal dtlb_valids : tlb_valids_t;
353 # signal dtlb_tags : tlb_tags_t;
354 # signal dtlb_ptes : tlb_ptes_t;
356 """note: these are passed to nmigen.hdl.Memory as "attributes". don't
357 know how, just that they are.
359 # attribute ram_style of dtlb_tags : signal is "distributed";
360 # attribute ram_style of dtlb_ptes : signal is "distributed";
361 dtlb_valids
= TLBValidBitsArray()
362 dtlb_tags
= TLBTagsArray()
363 dtlb_ptes
= TLBPtesArray()
364 # TODO attribute ram_style of dtlb_tags : signal is "distributed";
365 # TODO attribute ram_style of dtlb_ptes : signal is "distributed";
367 # signal r0 : reg_stage_0_t;
368 # signal r0_full : std_ulogic;
372 # type mem_access_request_t is record
374 # valid : std_ulogic;
376 # real_addr : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
377 # data : std_ulogic_vector(63 downto 0);
378 # byte_sel : std_ulogic_vector(7 downto 0);
380 # same_tag : std_ulogic;
381 # mmu_req : std_ulogic;
383 class MemAccessRequest(RecordObject
):
387 self
.valid
= Signal()
389 self
.real_addr
= Signal(REAL_ADDR_BITS
)
390 self
.data
= Signal(64)
391 self
.byte_sel
= Signal(8)
392 self
.hit_way
= Signal(WAY_BITS
)
393 self
.same_tag
= Signal()
394 self
.mmu_req
= Signal()
396 # -- First stage register, contains state for stage 1 of load hits
397 # -- and for the state machine used by all other operations
398 # type reg_stage_1_t is record
399 # -- Info about the request
400 # full : std_ulogic; -- have uncompleted request
401 # mmu_req : std_ulogic; -- request is from MMU
402 # req : mem_access_request_t;
406 # hit_load_valid : std_ulogic;
407 # hit_index : index_t;
408 # cache_hit : std_ulogic;
411 # tlb_hit : std_ulogic;
412 # tlb_hit_way : tlb_way_t;
413 # tlb_hit_index : tlb_index_t;
415 # -- 2-stage data buffer for data forwarded from writes to reads
416 # forward_data1 : std_ulogic_vector(63 downto 0);
417 # forward_data2 : std_ulogic_vector(63 downto 0);
418 # forward_sel1 : std_ulogic_vector(7 downto 0);
419 # forward_valid1 : std_ulogic;
420 # forward_way1 : way_t;
421 # forward_row1 : row_t;
422 # use_forward1 : std_ulogic;
423 # forward_sel : std_ulogic_vector(7 downto 0);
425 # -- Cache miss state (reload state machine)
428 # write_bram : std_ulogic;
429 # write_tag : std_ulogic;
430 # slow_valid : std_ulogic;
431 # wb : wishbone_master_out;
432 # reload_tag : cache_tag_t;
435 # store_index : index_t;
436 # end_row_ix : row_in_line_t;
437 # rows_valid : row_per_line_valid_t;
438 # acks_pending : unsigned(2 downto 0);
439 # inc_acks : std_ulogic;
440 # dec_acks : std_ulogic;
442 # -- Signals to complete (possibly with error)
443 # ls_valid : std_ulogic;
444 # ls_error : std_ulogic;
445 # mmu_done : std_ulogic;
446 # mmu_error : std_ulogic;
447 # cache_paradox : std_ulogic;
449 # -- Signal to complete a failed stcx.
450 # stcx_fail : std_ulogic;
452 # First stage register, contains state for stage 1 of load hits
453 # and for the state machine used by all other operations
454 class RegStage1(RecordObject
):
457 # Info about the request
458 self
.full
= Signal() # have uncompleted request
459 self
.mmu_req
= Signal() # request is from MMU
460 self
.req
= MemAccessRequest()
463 self
.hit_way
= Signal(WAY_BITS
)
464 self
.hit_load_valid
= Signal()
465 self
.hit_index
= Signal(INDEX
)
466 self
.cache_hit
= Signal()
469 self
.tlb_hit
= Signal()
470 self
.tlb_hit_way
= Signal(TLB_WAY
)
471 self
.tlb_hit_index
= Signal(TLB_SET_SIZE
)
473 # 2-stage data buffer for data forwarded from writes to reads
474 self
.forward_data1
= Signal(64)
475 self
.forward_data2
= Signal(64)
476 self
.forward_sel1
= Signal(8)
477 self
.forward_valid1
= Signal()
478 self
.forward_way1
= Signal(WAY_BITS
)
479 self
.forward_row1
= Signal(ROW
)
480 self
.use_forward1
= Signal()
481 self
.forward_sel
= Signal(8)
483 # Cache miss state (reload state machine)
486 self
.write_bram
= Signal()
487 self
.write_tag
= Signal()
488 self
.slow_valid
= Signal()
489 self
.wb
= WishboneMasterOut()
490 self
.reload_tag
= Signal(CACHE_TAG
)
491 self
.store_way
= Signal(WAY_BITS
)
492 self
.store_row
= Signal(ROW
)
493 self
.store_index
= Signal(INDEX
)
494 self
.end_row_ix
= Signal(ROW_IN_LINE
)
495 self
.rows_valid
= RowPerLineValidArray()
496 self
.acks_pending
= Signal(3)
497 self
.inc_acks
= Signal()
498 self
.dec_acks
= Signal()
500 # Signals to complete (possibly with error)
501 self
.ls_valid
= Signal()
502 self
.ls_error
= Signal()
503 self
.mmu_done
= Signal()
504 self
.mmu_error
= Signal()
505 self
.cache_paradox
= Signal()
507 # Signal to complete a failed stcx.
508 self
.stcx_fail
= Signal()
510 # signal r1 : reg_stage_1_t;
513 # -- Reservation information
515 # type reservation_t is record
516 # valid : std_ulogic;
517 # addr : std_ulogic_vector(63 downto LINE_OFF_BITS);
519 # Reservation information
521 class Reservation(RecordObject
):
525 # TODO LINE_OFF_BITS is 6
526 addr
= Signal(63 downto LINE_OFF_BITS
)
528 # signal reservation : reservation_t;
529 reservation
= Reservation()
531 # -- Async signals on incoming request
532 # signal req_index : index_t;
533 # signal req_row : row_t;
534 # signal req_hit_way : way_t;
535 # signal req_tag : cache_tag_t;
536 # signal req_op : op_t;
537 # signal req_data : std_ulogic_vector(63 downto 0);
538 # signal req_same_tag : std_ulogic;
539 # signal req_go : std_ulogic;
540 # Async signals on incoming request
541 req_index
= Signal(INDEX
)
542 req_row
= Signal(ROW
)
543 req_hit_way
= Signal(WAY_BITS
)
544 req_tag
= Signal(CACHE_TAG
)
546 req_data
= Signal(64)
547 req_same_tag
= Signal()
550 # signal early_req_row : row_t;
552 # signal cancel_store : std_ulogic;
553 # signal set_rsrv : std_ulogic;
554 # signal clear_rsrv : std_ulogic;
556 # signal r0_valid : std_ulogic;
557 # signal r0_stall : std_ulogic;
559 # signal use_forward1_next : std_ulogic;
560 # signal use_forward2_next : std_ulogic;
561 early_req_row
= Signal(ROW
)
563 cancel_store
= Signal()
565 clear_rsrv
= Signal()
570 use_forward1_next
= Signal()
571 use_forward2_next
= Signal()
573 # -- Cache RAM interface
574 # type cache_ram_out_t is array(way_t) of cache_row_t;
575 # signal cache_out : cache_ram_out_t;
576 # Cache RAM interface
578 return Array(Signal(CACHE_ROW
) for x
in range(NUM_WAYS
))
580 cache_out
= CacheRamOut()
582 # -- PLRU output interface
583 # type plru_out_t is array(index_t) of
584 # std_ulogic_vector(WAY_BITS-1 downto 0);
585 # signal plru_victim : plru_out_t;
586 # signal replace_way : way_t;
587 # PLRU output interface
589 return Array(Signal(WAY_BITS
) for x
in range(Index()))
591 plru_victim
= PLRUOut()
592 replace_way
= Signal(WAY_BITS
)
594 # -- Wishbone read/write/cache write formatting signals
595 # signal bus_sel : std_ulogic_vector(7 downto 0);
596 # Wishbone read/write/cache write formatting signals
600 # signal tlb_tag_way : tlb_way_tags_t;
601 # signal tlb_pte_way : tlb_way_ptes_t;
602 # signal tlb_valid_way : tlb_way_valids_t;
603 # signal tlb_req_index : tlb_index_t;
604 # signal tlb_hit : std_ulogic;
605 # signal tlb_hit_way : tlb_way_t;
606 # signal pte : tlb_pte_t;
607 # signal ra : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
608 # signal valid_ra : std_ulogic;
609 # signal perm_attr : perm_attr_t;
610 # signal rc_ok : std_ulogic;
611 # signal perm_ok : std_ulogic;
612 # signal access_ok : std_ulogic;
614 tlb_tag_way
= Signal(TLB_WAY_TAGS
)
615 tlb_pte_way
= Signal(TLB_WAY_PTES
)
616 tlb_valid_way
= Signal(TLB_WAY_VALID_BITS
)
617 tlb_req_index
= Signal(TLB_SET_SIZE
)
619 tlb_hit_way
= Signal(TLB_WAY
)
620 pte
= Signal(TLB_PTE
)
621 ra
= Signal(REAL_ADDR_BITS
)
623 perm_attr
= PermAttr()
628 # -- TLB PLRU output interface
629 # type tlb_plru_out_t is array(tlb_index_t) of
630 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
631 # signal tlb_plru_victim : tlb_plru_out_t;
632 # TLB PLRU output interface
634 return Array(Signal(TLB_WAY_BITS
) for x
in range(TLB_SET_SIZE
))
636 tlb_plru_victim
= TLBPLRUOut()
638 # -- Helper functions to decode incoming requests
640 # -- Return the cache line index (tag index) for an address
641 # function get_index(addr: std_ulogic_vector) return index_t is
644 # unsigned(addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS))
647 # Helper functions to decode incoming requests
649 # Return the cache line index (tag index) for an address
651 return addr
[LINE_OFF_BITS
:SET_SIZE_BITS
]
653 # -- Return the cache row index (data memory) for an address
654 # function get_row(addr: std_ulogic_vector) return row_t is
657 # unsigned(addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS))
660 # Return the cache row index (data memory) for an address
662 return addr
[ROW_OFF_BITS
:SET_SIZE_BITS
]
664 # -- Return the index of a row within a line
665 # function get_row_of_line(row: row_t) return row_in_line_t is
666 # variable row_v : unsigned(ROW_BITS-1 downto 0);
668 # row_v := to_unsigned(row, ROW_BITS);
669 # return row_v(ROW_LINEBITS-1 downto 0);
671 # Return the index of a row within a line
672 def get_row_of_line(row
):
673 row_v
= Signal(ROW_BITS
)
675 return row_v
[0:ROW_LINE_BITS
]
677 # -- Returns whether this is the last row of a line
678 # function is_last_row_addr(addr: wishbone_addr_type;
679 # last: row_in_line_t) return boolean is
682 # unsigned(addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS)) = last;
684 # Returns whether this is the last row of a line
685 def is_last_row_addr(addr
, last
):
686 return addr
[ROW_OFF_BITS
:LINE_OFF_BITS
] == last
688 # -- Returns whether this is the last row of a line
689 # function is_last_row(row: row_t; last: row_in_line_t)
692 # return get_row_of_line(row) = last;
694 # Returns whether this is the last row of a line
695 def is_last_row(row
, last
):
696 return get_row_of_line(row
) == last
698 # -- Return the address of the next row in the current cache line
699 # function next_row_addr(addr: wishbone_addr_type)
700 # return std_ulogic_vector is
701 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
702 # variable result : wishbone_addr_type;
704 # -- Is there no simpler way in VHDL to
705 # -- generate that 3 bits adder ?
706 # row_idx := addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS);
707 # row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
709 # result(LINE_OFF_BITS-1 downto ROW_OFF_BITS) := row_idx;
712 # Return the address of the next row in the current cache line
713 def next_row_addr(addr
):
714 row_idx
= Signal(ROW_LINE_BITS
)
715 result
= WBAddrType()
716 # Is there no simpler way in VHDL to
717 # generate that 3 bits adder ?
718 row_idx
= addr
[ROW_OFF_BITS
:LINE_OFF_BITS
]
719 row_idx
= Signal(row_idx
+ 1)
721 result
[ROW_OFF_BITS
:LINE_OFF_BITS
] = row_idx
724 # -- Return the next row in the current cache line. We use a
725 # -- dedicated function in order to limit the size of the
726 # -- generated adder to be only the bits within a cache line
727 # -- (3 bits with default settings)
728 # function next_row(row: row_t) return row_t is
729 # variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
730 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
731 # variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
733 # row_v := std_ulogic_vector(to_unsigned(row, ROW_BITS));
734 # row_idx := row_v(ROW_LINEBITS-1 downto 0);
735 # row_v(ROW_LINEBITS-1 downto 0) :=
736 # std_ulogic_vector(unsigned(row_idx) + 1);
737 # return to_integer(unsigned(row_v));
739 # Return the next row in the current cache line. We use a
740 # dedicated function in order to limit the size of the
741 # generated adder to be only the bits within a cache line
742 # (3 bits with default settings)
744 row_v
= Signal(ROW_BITS
)
745 row_idx
= Signal(ROW_LINE_BITS
)
746 result
= Signal(ROW_BITS
)
749 row_idx
= row_v
[ROW_LINE_BITS
]
750 row_v
[0:ROW_LINE_BITS
] = Signal(row_idx
+ 1)
753 # -- Get the tag value from the address
754 # function get_tag(addr: std_ulogic_vector) return cache_tag_t is
756 # return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
758 # Get the tag value from the address
760 return addr
[SET_SIZE_BITS
:REAL_ADDR_BITS
]
762 # -- Read a tag from a tag memory row
763 # function read_tag(way: way_t; tagset: cache_tags_set_t)
764 # return cache_tag_t is
766 # return tagset(way * TAG_WIDTH + TAG_BITS
767 # - 1 downto way * TAG_WIDTH);
769 # Read a tag from a tag memory row
770 def read_tag(way
, tagset
):
771 return tagset
[way
*TAG_WIDTH
:way
* TAG_WIDTH
+ TAG_BITS
]
773 # -- Read a TLB tag from a TLB tag memory row
774 # function read_tlb_tag(way: tlb_way_t; tags: tlb_way_tags_t)
775 # return tlb_tag_t is
776 # variable j : integer;
778 # j := way * TLB_EA_TAG_BITS;
779 # return tags(j + TLB_EA_TAG_BITS - 1 downto j);
781 # Read a TLB tag from a TLB tag memory row
782 def read_tlb_tag(way
, tags
):
785 j
= way
* TLB_EA_TAG_BITS
786 return tags
[j
:j
+ TLB_EA_TAG_BITS
]
788 # -- Write a TLB tag to a TLB tag memory row
789 # procedure write_tlb_tag(way: tlb_way_t; tags: inout tlb_way_tags_t;
791 # variable j : integer;
793 # j := way * TLB_EA_TAG_BITS;
794 # tags(j + TLB_EA_TAG_BITS - 1 downto j) := tag;
796 # Write a TLB tag to a TLB tag memory row
797 def write_tlb_tag(way
, tags
), tag
):
800 j
= way
* TLB_EA_TAG_BITS
801 tags
[j
:j
+ TLB_EA_TAG_BITS
] = tag
803 # -- Read a PTE from a TLB PTE memory row
804 # function read_tlb_pte(way: tlb_way_t; ptes: tlb_way_ptes_t)
805 # return tlb_pte_t is
806 # variable j : integer;
808 # j := way * TLB_PTE_BITS;
809 # return ptes(j + TLB_PTE_BITS - 1 downto j);
811 # Read a PTE from a TLB PTE memory row
812 def read_tlb_pte(way
, ptes
):
815 j
= way
* TLB_PTE_BITS
816 return ptes
[j
:j
+ TLB_PTE_BITS
]
818 # procedure write_tlb_pte(way: tlb_way_t;
819 # ptes: inout tlb_way_ptes_t; newpte: tlb_pte_t) is
820 # variable j : integer;
822 # j := way * TLB_PTE_BITS;
823 # ptes(j + TLB_PTE_BITS - 1 downto j) := newpte;
825 def write_tlb_pte(way
, ptes
,newpte
):
828 j
= way
* TLB_PTE_BITS
829 return ptes
[j
:j
+ TLB_PTE_BITS
] = newpte
833 """these, because they are constants, can actually be done *as*
835 assert LINE_SIZE % ROWSIZE == 0, "line size not ...."
837 # assert LINE_SIZE mod ROW_SIZE = 0
838 # report "LINE_SIZE not multiple of ROW_SIZE" severity FAILURE;
839 # assert ispow2(LINE_SIZE)
840 # report "LINE_SIZE not power of 2" severity FAILURE;
841 # assert ispow2(NUM_LINES)
842 # report "NUM_LINES not power of 2" severity FAILURE;
843 # assert ispow2(ROW_PER_LINE)
844 # report "ROW_PER_LINE not power of 2" severity FAILURE;
845 # assert (ROW_BITS = INDEX_BITS + ROW_LINEBITS)
846 # report "geometry bits don't add up" severity FAILURE;
847 # assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
848 # report "geometry bits don't add up" severity FAILURE;
849 # assert (REAL_ADDR_BITS = TAG_BITS + INDEX_BITS + LINE_OFF_BITS)
850 # report "geometry bits don't add up" severity FAILURE;
851 # assert (REAL_ADDR_BITS = TAG_BITS + ROW_BITS + ROW_OFF_BITS)
852 # report "geometry bits don't add up" severity FAILURE;
853 # assert (64 = wishbone_data_bits)
854 # report "Can't yet handle a wishbone width that isn't 64-bits"
856 # assert SET_SIZE_BITS <= TLB_LG_PGSZ
857 # report "Set indexed by virtual address" severity FAILURE;
858 assert (LINE_SIZE
% ROW_SIZE
) == 0 "LINE_SIZE not " \
859 "multiple of ROW_SIZE"
861 assert (LINE_SIZE
% 2) == 0 "LINE_SIZE not power of 2"
863 assert (NUM_LINES
% 2) == 0 "NUM_LINES not power of 2"
865 assert (ROW_PER_LINE
% 2) == 0 "ROW_PER_LINE not" \
868 assert ROW_BITS
== (INDEX_BITS
+ ROW_LINE_BITS
) \
869 "geometry bits don't add up"
871 assert (LINE_OFF_BITS
= ROW_OFF_BITS
+ ROW_LINEBITS
) \
872 "geometry bits don't add up"
874 assert REAL_ADDR_BITS
== (TAG_BITS
+ INDEX_BITS \
875 + LINE_OFF_BITS
) "geometry bits don't add up"
877 assert REAL_ADDR_BITS
== (TAG_BITS
+ ROW_BITS
+ ROW_OFF_BITS
) \
878 "geometry bits don't add up"
880 assert 64 == wishbone_data_bits
"Can't yet handle a" \
881 "wishbone width that isn't 64-bits"
883 assert SET_SIZE_BITS
<= TLB_LG_PGSZ
"Set indexed by" \
886 # -- Latch the request in r0.req as long as we're not stalling
887 # stage_0 : process(clk)
888 # Latch the request in r0.req as long as we're not stalling
889 class Stage0(Elaboratable
):
893 def elaborate(self
, platform
):
899 # variable r : reg_stage_0_t;
904 # if rising_edge(clk) then
905 # assert (d_in.valid and m_in.valid) = '0'
906 # report "request collision loadstore vs MMU";
907 assert ~
(d_in
.valid
& m_in
.valid
) "request collision
910 # if m_in.valid = '1' then
911 with m
.If(m_in
.valid
):
912 # r.req.valid := '1';
913 # r.req.load := not (m_in.tlbie or m_in.tlbld);
916 # r.req.reserve := '0';
917 # r.req.virt_mode := '0';
918 # r.req.priv_mode := '1';
919 # r.req.addr := m_in.addr;
920 # r.req.data := m_in.pte;
921 # r.req.byte_sel := (others => '1');
922 # r.tlbie := m_in.tlbie;
923 # r.doall := m_in.doall;
924 # r.tlbld := m_in.tlbld;
926 sync
+= r
.req
.valid
.eq(1)
927 sync
+= r
.req
.load
.eq(~
(m_in
.tlbie | m_in
.tlbld
))
928 sync
+= r
.req
.priv_mode
.eq(1)
929 sync
+= r
.req
.addr
.eq(m_in
.addr
)
930 sync
+= r
.req
.data
.eq(m_in
.pte
)
931 sync
+= r
.req
.byte_sel
.eq(1)
932 sync
+= r
.tlbie
.eq(m_in
.tlbie
)
933 sync
+= r
.doall
.eq(m_in
.doall
)
934 sync
+= r
.tlbld
.eq(m_in
.tlbld
)
935 sync
+= r
.mmu_req
.eq(1)
943 sync
+= r
.req
.eq(d_in
)
947 # elsif r1.full = '0' or r0_full = '0' then
948 with m
.If(~r1
.full | ~r0_full
):
950 # r0_full <= r.req.valid;
952 sync
+= r0_full
.eq(r
.req
.valid
)
957 # -- we don't yet handle collisions between loadstore1 requests
958 # -- and MMU requests
959 # m_out.stall <= '0';
960 # we don't yet handle collisions between loadstore1 requests
962 comb
+= m_out
.stall
.eq(0)
964 # -- Hold off the request in r0 when r1 has an uncompleted request
965 # r0_stall <= r0_full and r1.full;
966 # r0_valid <= r0_full and not r1.full;
967 # stall_out <= r0_stall;
968 # Hold off the request in r0 when r1 has an uncompleted request
969 comb
+= r0_stall
.eq(r0_full
& r1
.full
)
970 comb
+= r0_valid
.eq(r0_full
& ~r1
.full
)
971 comb
+= stall_out
.eq(r0_stall
)
974 # -- Operates in the second cycle on the request latched in r0.req.
975 # -- TLB updates write the entry at the end of the second cycle.
976 # tlb_read : process(clk)
978 # Operates in the second cycle on the request latched in r0.req.
979 # TLB updates write the entry at the end of the second cycle.
980 class TLBRead(Elaboratable
):
984 def elaborate(self
, platform
):
990 # variable index : tlb_index_t;
991 # variable addrbits :
992 # std_ulogic_vector(TLB_SET_BITS - 1 downto 0);
994 addrbits
= Signal(TLB_SET_BITS
)
1000 # if rising_edge(clk) then
1001 # if m_in.valid = '1' then
1002 with m
.If(m_in
.valid
):
1003 # addrbits := m_in.addr(TLB_LG_PGSZ + TLB_SET_BITS
1004 # - 1 downto TLB_LG_PGSZ);
1005 sync
+= addrbits
.eq(m_in
.addr
[
1006 TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_SET_BITS
1010 # addrbits := d_in.addr(TLB_LG_PGSZ + TLB_SET_BITS
1011 # - 1 downto TLB_LG_PGSZ);
1012 sync
+= addrbits
.eq(d_in
.addr
[
1013 TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_SET_BITS
1017 # index := to_integer(unsigned(addrbits));
1018 sync
+= index
.eq(addrbits
)
1019 # -- If we have any op and the previous op isn't finished,
1020 # -- then keep the same output for next cycle.
1021 # if r0_stall = '0' then
1022 # If we have any op and the previous op isn't finished,
1023 # then keep the same output for next cycle.
1024 with m
.If(~r0_stall
):
1025 sync
+= tlb_valid_way
.eq(dtlb_valids
[index
])
1026 sync
+= tlb_tag_way
.eq(dtlb_tags
[index
])
1027 sync
+= tlb_pte_way
.eq(dtlb_ptes
[index
])
1032 # -- Generate TLB PLRUs
1033 # maybe_tlb_plrus: if TLB_NUM_WAYS > 1 generate
1034 # Generate TLB PLRUs
1035 class MaybeTLBPLRUs(Elaboratable
):
1039 def elaborate(self
, platform
):
1045 with m
.If(TLB_NUM_WAYS
> 1):
1047 # TODO understand how to conver generate statements
1048 # tlb_plrus: for i in 0 to TLB_SET_SIZE - 1 generate
1049 # -- TLB PLRU interface
1050 # signal tlb_plru_acc :
1051 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
1052 # signal tlb_plru_acc_en : std_ulogic;
1053 # signal tlb_plru_out :
1054 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
1056 # tlb_plru : entity work.plru
1058 # BITS => TLB_WAY_BITS
1063 # acc => tlb_plru_acc,
1064 # acc_en => tlb_plru_acc_en,
1065 # lru => tlb_plru_out
1071 # if r1.tlb_hit_index = i then
1072 # tlb_plru_acc_en <= r1.tlb_hit;
1074 # tlb_plru_acc_en <= '0';
1077 # std_ulogic_vector(to_unsigned(
1078 # r1.tlb_hit_way, TLB_WAY_BITS
1080 # tlb_plru_victim(i) <= tlb_plru_out;
1086 # tlb_search : process(all)
1087 class TLBSearch(Elaboratable
):
1091 def elborate(self
, platform
):
1097 # variable hitway : tlb_way_t;
1098 # variable hit : std_ulogic;
1099 # variable eatag : tlb_tag_t;
1110 # to_integer(unsigned(r0.req.addr(
1111 # TLB_LG_PGSZ + TLB_SET_BITS - 1 downto TLB_LG_PGSZ
1115 # eatag := r0.req.addr(63 downto TLB_LG_PGSZ + TLB_SET_BITS);
1116 # for i in tlb_way_t loop
1117 # if tlb_valid_way(i) = '1' and
1118 # read_tlb_tag(i, tlb_tag_way) = eatag then
1123 # tlb_hit <= hit and r0_valid;
1124 # tlb_hit_way <= hitway;
1125 comb
+= tlb_req_index
.eq(r0
.req
.addr
[
1126 TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_SET_BITS
1129 comb
+= eatag
.eq(r0
.req
.addr
[
1130 TLB_LG_PGSZ
+ TLB_SET_BITS
:64
1134 with m
.If(tlb_valid_way(i
)
1135 & read_tlb_tag(i
, tlb_tag_way
) == eatag
):
1137 comb
+= hitway
.eq(i
)
1140 comb
+= tlb_hit
.eq(hit
& r0_valid
)
1141 comb
+= tlb_hit_way
.eq(hitway
)
1143 # if tlb_hit = '1' then
1145 # pte <= read_tlb_pte(hitway, tlb_pte_way);
1146 comb
+= pte
.eq(read_tlb_pte(hitway
, tlb_pte_way
))
1149 # pte <= (others => '0');
1152 # valid_ra <= tlb_hit or not r0.req.virt_mode;
1153 comb
+= valid_ra
.eq(tlb_hit | ~r0
.req
.virt_mode
)
1154 # if r0.req.virt_mode = '1' then
1155 with m
.If(r0
.req
.virt_mode
):
1156 # ra <= pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
1157 # r0.req.addr(TLB_LG_PGSZ - 1 downto ROW_OFF_BITS) &
1158 # (ROW_OFF_BITS-1 downto 0 => '0');
1159 # perm_attr <= extract_perm_attr(pte);
1161 Const(ROW_OFF_BITS
, ROW_OFF_BITS
),
1162 r0
.req
.addr
[ROW_OFF_BITS
:TLB_LG_PGSZ
],
1163 pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]
1165 comb
+= perm_attr
.eq(extract_perm_attr(pte
))
1168 # ra <= r0.req.addr(
1169 # REAL_ADDR_BITS - 1 downto ROW_OFF_BITS
1170 # ) & (ROW_OFF_BITS-1 downto 0 => '0');
1172 Const(ROW_OFF_BITS
, ROW_OFF_BITS
),
1173 r0
.rq
.addr
[ROW_OFF_BITS
:REAL_ADDR_BITS
]
1176 # perm_attr <= real_mode_perm_attr;
1177 comb
+= perm_attr
.reference
.eq(1)
1178 comb
+= perm_attr
.changed
.eq(1)
1179 comb
+= perm_attr
.priv
.eq(1)
1180 comb
+= perm_attr
.nocache
.eq(0)
1181 comb
+= perm_attr
.rd_perm
.eq(1)
1182 comb
+= perm_attr
.wr_perm
.eq(1)
1186 # tlb_update : process(clk)
1187 class TLBUpdate(Elaboratable
):
1191 def elaborate(self
, platform
):
1197 # variable tlbie : std_ulogic;
1198 # variable tlbwe : std_ulogic;
1199 # variable repl_way : tlb_way_t;
1200 # variable eatag : tlb_tag_t;
1201 # variable tagset : tlb_way_tags_t;
1202 # variable pteset : tlb_way_ptes_t;
1207 tagset
= TLBWayTags()
1208 pteset
= TLBWayPtes()
1218 # if rising_edge(clk) then
1219 # tlbie := r0_valid and r0.tlbie;
1220 # tlbwe := r0_valid and r0.tlbldoi;
1221 sync
+= tlbie
.eq(r0_valid
& r0
.tlbie
)
1222 sync
+= tlbwe
.eq(r0_valid
& r0
.tlbldoi
)
1224 # if rst = '1' or (tlbie = '1' and r0.doall = '1') then
1225 # with m.If (TODO understand how signal resets work in nmigen)
1226 # -- clear all valid bits at once
1227 # for i in tlb_index_t loop
1228 # dtlb_valids(i) <= (others => '0');
1230 # clear all valid bits at once
1231 for i
in range(TLB_SET_SIZE
):
1232 sync
+= dtlb_valids
[i
].eq(0)
1233 # elsif tlbie = '1' then
1235 # if tlb_hit = '1' then
1237 # dtlb_valids(tlb_req_index)(tlb_hit_way) <= '0';
1238 sync
+= dtlb_valids
[tlb_req_index
][tlb_hit_way
].eq(0)
1240 # elsif tlbwe = '1' then
1242 # if tlb_hit = '1' then
1244 # repl_way := tlb_hit_way;
1245 sync
+= repl_way
.eq(tlb_hit_way
)
1248 # repl_way := to_integer(unsigned(
1249 # tlb_plru_victim(tlb_req_index)));
1250 sync
+= repl_way
.eq(tlb_plru_victim
[tlb_req_index
])
1252 # eatag := r0.req.addr(
1253 # 63 downto TLB_LG_PGSZ + TLB_SET_BITS
1255 # tagset := tlb_tag_way;
1256 # write_tlb_tag(repl_way, tagset, eatag);
1257 # dtlb_tags(tlb_req_index) <= tagset;
1258 # pteset := tlb_pte_way;
1259 # write_tlb_pte(repl_way, pteset, r0.req.data);
1260 # dtlb_ptes(tlb_req_index) <= pteset;
1261 # dtlb_valids(tlb_req_index)(repl_way) <= '1';
1262 sync
+= eatag
.eq(r0
.req
.addr
[TLB_LG_PGSZ
+ TLB_SET_BITS
:64])
1263 sync
+= tagset
.eq(tlb_tag_way
)
1264 sync
+= write_tlb_tag(repl_way
, tagset
, eatag
)
1265 sync
+= dtlb_tags
[tlb_req_index
].eq(tagset
)
1266 sync
+= pteset
.eq(tlb_pte_way
)
1267 sync
+= write_tlb_pte(repl_way
, pteset
, r0
.req
.data
)
1268 sync
+= dtlb_ptes
[tlb_req_index
].eq(pteset
)
1269 sync
+= dtlb_valids
[tlb_req_index
][repl_way
].eq(1)
1275 # maybe_plrus: if NUM_WAYS > 1 generate
1276 class MaybePLRUs(Elaboratable
):
1280 def elaborate(self
, platform
):
1287 # TODO learn translation of generate into nmgien @lkcl
1288 # plrus: for i in 0 to NUM_LINES-1 generate
1290 # signal plru_acc : std_ulogic_vector(WAY_BITS-1 downto 0);
1291 # signal plru_acc_en : std_ulogic;
1292 # signal plru_out : std_ulogic_vector(WAY_BITS-1 downto 0);
1295 # TODO learn tranlation of entity, generic map, port map in
1297 # plru : entity work.plru
1305 # acc_en => plru_acc_en,
1312 # if r1.hit_index = i then
1314 with m
.If(r1
.hit_index
== i
):
1315 # plru_acc_en <= r1.cache_hit;
1316 comb
+= plru_acc_en
.eq(r1
.cache_hit
)
1319 # plru_acc_en <= '0';
1320 comb
+= plru_acc_en
.eq(0)
1322 # plru_acc <= std_ulogic_vector(to_unsigned(
1323 # r1.hit_way, WAY_BITS
1325 # plru_victim(i) <= plru_out;
1326 comb
+= plru_acc
.eq(r1
.hit_way
)
1327 comb
+= plru_victime
[i
].eq(plru_out
)
1332 # -- Cache tag RAM read port
1333 # cache_tag_read : process(clk)
1334 # Cache tag RAM read port
1335 class CacheTagRead(Elaboratable
):
1339 def elaborate(self
, platform
):
1345 # variable index : index_t;
1346 index
= Signal(INDEX
)
1351 # if rising_edge(clk) then
1352 # if r0_stall = '1' then
1353 with m
.If(r0_stall
):
1354 # index := req_index;
1355 sync
+= index
.eq(req_index
)
1357 # elsif m_in.valid = '1' then
1358 with m
.Elif(m_in
.valid
):
1359 # index := get_index(m_in.addr);
1360 sync
+= index
.eq(get_index(m_in
.addr
))
1364 # index := get_index(d_in.addr);
1365 sync
+= index
.eq(get_index(d_in
.addr
))
1367 # cache_tag_set <= cache_tags(index);
1368 sync
+= cache_tag_set
.eq(cache_tags(index
))
1372 # -- Cache request parsing and hit detection
1373 # dcache_request : process(all)
1374 # Cache request parsing and hit detection
1375 class DcacheRequest(Elaboratable
):
1379 def elaborate(self
, platform
):
1380 # variable is_hit : std_ulogic;
1381 # variable hit_way : way_t;
1382 # variable op : op_t;
1383 # variable opsel : std_ulogic_vector(2 downto 0);
1384 # variable go : std_ulogic;
1385 # variable nc : std_ulogic;
1386 # variable s_hit : std_ulogic;
1387 # variable s_tag : cache_tag_t;
1388 # variable s_pte : tlb_pte_t;
1389 # variable s_ra : std_ulogic_vector(
1390 # REAL_ADDR_BITS - 1 downto 0
1392 # variable hit_set : std_ulogic_vector(
1393 # TLB_NUM_WAYS - 1 downto 0
1395 # variable hit_way_set : hit_way_set_t;
1396 # variable rel_matches : std_ulogic_vector(
1397 # TLB_NUM_WAYS - 1 downto 0
1399 rel_match
= Signal()
1401 hit_way
= Signal(WAY_BITS
)
1407 s_tag
= Signal(CACHE_TAG
)
1408 s_pte
= Signal(TLB_PTE
)
1409 s_ra
= Signal(REAL_ADDR_BITS
)
1410 hit_set
= Signal(TLB_NUM_WAYS
)
1411 hit_way_set
= HitWaySet()
1412 rel_matches
= Signal(TLB_NUM_WAYS
)
1413 rel_match
= Signal()
1432 # -- Extract line, row and tag from request
1433 # req_index <= get_index(r0.req.addr);
1434 # req_row <= get_row(r0.req.addr);
1435 # req_tag <= get_tag(ra);
1437 # go := r0_valid and not (r0.tlbie or r0.tlbld)
1438 # and not r1.ls_error;
1439 # Extract line, row and tag from request
1440 comb
+= req_index
.eq(get_index(r0
.req
.addr
))
1441 comb
+= req_row
.eq(get_row(r0
.req
.addr
))
1442 comb
+= req_tag
.eq(get_tag(ra
))
1444 comb
+= go
.eq(r0_valid
& ~
(r0
.tlbie | r0
.tlbld
) & ~r1
.ls_error
)
1446 # -- Test if pending request is a hit on any way
1447 # -- In order to make timing in virtual mode,
1448 # -- when we are using the TLB, we compare each
1449 # --way with each of the real addresses from each way of
1450 # -- the TLB, and then decide later which match to use.
1454 # Test if pending request is a hit on any way
1455 # In order to make timing in virtual mode,
1456 # when we are using the TLB, we compare each
1457 # way with each of the real addresses from each way of
1458 # the TLB, and then decide later which match to use.
1459 comb
+= hit_way
.eq(0)
1460 comb
+= is_hit
.eq(0)
1461 comb
+= rel_match
.eq(0)
1463 # if r0.req.virt_mode = '1' then
1464 with m
.If(r0
.req
.virt_mode
):
1465 # rel_matches := (others => '0');
1466 comb
+= rel_matches
.eq(0)
1467 # for j in tlb_way_t loop
1468 for j
in range(TLB_WAY
):
1469 # hit_way_set(j) := 0;
1471 # s_pte := read_tlb_pte(j, tlb_pte_way);
1472 # s_ra := s_pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ)
1473 # & r0.req.addr(TLB_LG_PGSZ - 1 downto 0);
1474 # s_tag := get_tag(s_ra);
1475 comb
+= hit_way_set
[j
].eq(0)
1477 comb
+= s_pte
.eq(read_tlb_pte(j
, tlb_pte_way
))
1478 comb
+= s_ra
.eq(Cat(
1479 r0
.req
.addr
[0:TLB_LG_PGSZ
],
1480 s_pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]
1482 comb
+= s_tag
.eq(get_tag(s_ra
))
1484 # for i in way_t loop
1485 for i
in range(NUM_WAYS
):
1486 # if go = '1' and cache_valids(req_index)(i) = '1'
1487 # and read_tag(i, cache_tag_set) = s_tag
1488 # and tlb_valid_way(j) = '1' then
1489 with m
.If(go
& cache_valid_bits
[req_index
][i
] &
1490 read_tag(i
, cache_tag_set
) == s_tag
1491 & tlb_valid_way
[j
]):
1492 # hit_way_set(j) := i;
1494 comb
+= hit_way_set
[j
].eq(i
)
1498 # hit_set(j) := s_hit;
1499 comb
+= hit_set
[j
].eq(s_hit
)
1500 # if s_tag = r1.reload_tag then
1501 with m
.If(s_tag
== r1
.reload_tag
):
1502 # rel_matches(j) := '1';
1503 comb
+= rel_matches
[j
].eq(1)
1506 # if tlb_hit = '1' then
1508 # is_hit := hit_set(tlb_hit_way);
1509 # hit_way := hit_way_set(tlb_hit_way);
1510 # rel_match := rel_matches(tlb_hit_way);
1511 comb
+= is_hit
.eq(hit_set
[tlb_hit_way
])
1512 comb
+= hit_way
.eq(hit_way_set
[tlb_hit_way
])
1513 comb
+= rel_match
.eq(rel_matches
[tlb_hit_way
])
1517 # s_tag := get_tag(r0.req.addr);
1518 comb
+= s_tag
.eq(get_tag(r0
.req
.addr
))
1519 # for i in way_t loop
1520 for i
in range(NUM_WAYS
):
1521 # if go = '1' and cache_valids(req_index)(i) = '1' and
1522 # read_tag(i, cache_tag_set) = s_tag then
1523 with m
.If(go
& cache_valid_bits
[req_index
][i
] &
1524 read_tag(i
, cache_tag_set
) == s_tag
):
1527 comb
+= hit_way
.eq(i
)
1528 comb
+= is_hit
.eq(1)
1531 # if s_tag = r1.reload_tag then
1532 with m
.If(s_tag
== r1
.reload_tag
):
1534 comb
+= rel_match
.eq(1)
1537 # req_same_tag <= rel_match;
1538 comb
+= req_same_tag
.eq(rel_match
)
1540 # -- See if the request matches the line currently being reloaded
1541 # if r1.state = RELOAD_WAIT_ACK and req_index = r1.store_index
1542 # and rel_match = '1' then
1543 # See if the request matches the line currently being reloaded
1544 with m
.If(r1
.state
== State
.RELOAD_WAIT_ACK
& req_index
==
1545 r1
.store_index
& rel_match
):
1546 # -- For a store, consider this a hit even if the row isn't
1547 # -- valid since it will be by the time we perform the store.
1548 # -- For a load, check the appropriate row valid bit.
1549 # For a store, consider this a hit even if the row isn't
1550 # valid since it will be by the time we perform the store.
1551 # For a load, check the appropriate row valid bit.
1553 # not r0.req.load or r1.rows_valid(req_row mod ROW_PER_LINE);
1554 # hit_way := replace_way;
1555 comb
+= is_hit
.eq(~r0
.req
.load
1556 | r1
.rows_valid
[req_row
% ROW_PER_LINE
])
1557 comb
+= hit_way
.eq(replace_way
)
1560 # -- Whether to use forwarded data for a load or not
1561 # Whether to use forwarded data for a load or not
1562 # use_forward1_next <= '0';
1563 comb
+= use_forward1_next
.eq(0)
1564 # if get_row(r1.req.real_addr) = req_row
1565 # and r1.req.hit_way = hit_way then
1566 with m
.If(get_row(r1
.req
.real_addr
) == req_row
1567 & r1
.req
.hit_way
== hit_way
)
1568 # -- Only need to consider r1.write_bram here, since if we
1569 # -- are writing refill data here, then we don't have a
1570 # -- cache hit this cycle on the line being refilled.
1571 # -- (There is the possibility that the load following the
1572 # -- load miss that started the refill could be to the old
1573 # -- contents of the victim line, since it is a couple of
1574 # -- cycles after the refill starts before we see the updated
1575 # -- cache tag. In that case we don't use the bypass.)
1576 # Only need to consider r1.write_bram here, since if we
1577 # are writing refill data here, then we don't have a
1578 # cache hit this cycle on the line being refilled.
1579 # (There is the possibility that the load following the
1580 # load miss that started the refill could be to the old
1581 # contents of the victim line, since it is a couple of
1582 # cycles after the refill starts before we see the updated
1583 # cache tag. In that case we don't use the bypass.)
1584 # use_forward1_next <= r1.write_bram;
1585 comb
+= use_forward1_next
.eq(r1
.write_bram
)
1587 # use_forward2_next <= '0';
1588 comb
+= use_forward2_next
.eq(0)
1589 # if r1.forward_row1 = req_row and r1.forward_way1 = hit_way then
1590 with m
.If(r1
.forward_row1
== req_row
& r1
.forward_way1
== hit_way
):
1591 # use_forward2_next <= r1.forward_valid1;
1592 comb
+= use_forward2_next
.eq(r1
.forward_valid1
)
1595 # -- The way that matched on a hit
1596 # The way that matched on a hit
1597 # req_hit_way <= hit_way;
1598 comb
+= req_hit_way
.eq(hit_way
)
1600 # -- The way to replace on a miss
1601 # The way to replace on a miss
1602 # if r1.write_tag = '1' then
1603 with m
.If(r1
.write_tag
):
1604 # replace_way <= to_integer(unsigned(
1605 # plru_victim(r1.store_index)
1607 replace_way
.eq(plru_victim
[r1
.store_index
])
1610 # replace_way <= r1.store_way;
1611 comb
+= replace_way
.eq(r1
.store_way
)
1614 # -- work out whether we have permission for this access
1615 # -- NB we don't yet implement AMR, thus no KUAP
1616 # work out whether we have permission for this access
1617 # NB we don't yet implement AMR, thus no KUAP
1618 # rc_ok <= perm_attr.reference and
1619 # (r0.req.load or perm_attr.changed);
1620 # perm_ok <= (r0.req.priv_mode or not perm_attr.priv) and
1621 # (perm_attr.wr_perm or (r0.req.load
1622 # and perm_attr.rd_perm));
1623 # access_ok <= valid_ra and perm_ok and rc_ok;
1625 perm_attr
.reference
& (r0
.req
.load | perm_attr
.changed
)
1627 comb
+= perm_ok
.eq((r0
.req
.prive_mode | ~perm_attr
.priv
)
1629 |
(r0
.req
.load
& perm_attr
.rd_perm
)
1631 comb
+= access_ok
.eq(valid_ra
& perm_ok
& rc_ok
)
1632 # -- Combine the request and cache hit status to decide what
1633 # -- operation needs to be done
1634 # nc := r0.req.nc or perm_attr.nocache;
1636 # Combine the request and cache hit status to decide what
1637 # operation needs to be done
1638 comb
+= nc
.eq(r0
.req
.nc | perm_attr
.nocache
)
1639 comb
+= op
.eq(Op
.OP_NONE
)
1642 # if access_ok = '0' then
1643 with m
.If(~access_ok
):
1645 comb
+= op
.eq(Op
.OP_BAD
)
1646 # elsif cancel_store = '1' then
1647 with m
.Elif(cancel_store
):
1648 # op := OP_STCX_FAIL;
1649 comb
+= op
.eq(Op
.OP_STCX_FAIL
)
1652 # opsel := r0.req.load & nc & is_hit;
1653 comb
+= opsel
.eq(Cat(is_hit
, nc
, r0
.req
.load
))
1655 with m
.Switch(opsel
):
1656 # when "101" => op := OP_LOAD_HIT;
1657 # when "100" => op := OP_LOAD_MISS;
1658 # when "110" => op := OP_LOAD_NC;
1659 # when "001" => op := OP_STORE_HIT;
1660 # when "000" => op := OP_STORE_MISS;
1661 # when "010" => op := OP_STORE_MISS;
1662 # when "011" => op := OP_BAD;
1663 # when "111" => op := OP_BAD;
1664 # when others => op := OP_NONE;
1665 with m
.Case(Const(0b101, 3)):
1666 comb
+= op
.eq(Op
.OP_LOAD_HIT
)
1668 with m
.Case(Cosnt(0b100, 3)):
1669 comb
+= op
.eq(Op
.OP_LOAD_MISS
)
1671 with m
.Case(Const(0b110, 3)):
1672 comb
+= op
.eq(Op
.OP_LOAD_NC
)
1674 with m
.Case(Const(0b001, 3)):
1675 comb
+= op
.eq(Op
.OP_STORE_HIT
)
1677 with m
.Case(Const(0b000, 3)):
1678 comb
+= op
.eq(Op
.OP_STORE_MISS
)
1680 with m
.Case(Const(0b010, 3)):
1681 comb
+= op
.eq(Op
.OP_STORE_MISS
)
1683 with m
.Case(Const(0b011, 3)):
1684 comb
+= op
.eq(Op
.OP_BAD
)
1686 with m
.Case(Const(0b111, 3)):
1687 comb
+= op
.eq(Op
.OP_BAD
)
1690 comb
+= op
.eq(Op
.OP_NONE
)
1696 comb
+= req_op
.eq(op
)
1697 comb
+= req_go
.eq(go
)
1699 # -- Version of the row number that is valid one cycle earlier
1700 # -- in the cases where we need to read the cache data BRAM.
1701 # -- If we're stalling then we need to keep reading the last
1703 # Version of the row number that is valid one cycle earlier
1704 # in the cases where we need to read the cache data BRAM.
1705 # If we're stalling then we need to keep reading the last
1707 # if r0_stall = '0' then
1708 with m
.If(~r0_stall
):
1709 # if m_in.valid = '1' then
1710 with m
.If(m_in
.valid
):
1711 # early_req_row <= get_row(m_in.addr);
1712 comb
+= early_req_row
.eq(get_row(m_in
.addr
))
1715 # early_req_row <= get_row(d_in.addr);
1716 comb
+= early_req_row
.eq(get_row(d_in
.addr
))
1720 # early_req_row <= req_row;
1721 comb
+= early_req_row
.eq(req_row
)
1725 # -- Wire up wishbone request latch out of stage 1
1726 # wishbone_out <= r1.wb;
1727 # Wire up wishbone request latch out of stage 1
1728 comb
+= wishbone_out
.eq(r1
.wb
)
1730 # -- Handle load-with-reservation and store-conditional instructions
1731 # reservation_comb: process(all)
1732 # Handle load-with-reservation and store-conditional instructions
1733 class ReservationComb(Elaboratable
):
1737 def elaborate(self
, platform
):
1744 # cancel_store <= '0';
1746 # clear_rsrv <= '0';
1747 # if r0_valid = '1' and r0.req.reserve = '1' then
1748 with m
.If(r0_valid
& r0
.req
.reserve
):
1750 # -- XXX generate alignment interrupt if address
1751 # -- is not aligned XXX or if r0.req.nc = '1'
1752 # if r0.req.load = '1' then
1753 # XXX generate alignment interrupt if address
1754 # is not aligned XXX or if r0.req.nc = '1'
1755 with m
.If(r0
.req
.load
):
1756 # -- load with reservation
1758 # load with reservation
1762 # -- store conditional
1763 # clear_rsrv <= '1';
1765 comb
+= clear_rsrv
.eq(1)
1766 # if reservation.valid = '0' or r0.req.addr(63
1767 # downto LINE_OFF_BITS) /= reservation.addr then
1768 with m
.If(~reservation
.valid
1769 | r0
.req
.addr
[LINE_OFF_BITS
:64]):
1770 # cancel_store <= '1';
1771 comb
+= cancel_store
.eq(1)
1777 # reservation_reg: process(clk)
1778 class ReservationReg(Elaboratable
):
1782 def elaborate(self
, platform
):
1789 # if rising_edge(clk) then
1791 # reservation.valid <= '0';
1792 # TODO understand how resets work in nmigen
1793 # elsif r0_valid = '1' and access_ok = '1' then
1794 with m
.Elif(r0_valid
& access_ok
)""
1795 # if clear_rsrv = '1' then
1796 with m
.If(clear_rsrv
):
1797 # reservation.valid <= '0';
1798 sync
+= reservation
.valid
.ea(0)
1799 # elsif set_rsrv = '1' then
1800 with m
.Elif(set_rsrv
):
1801 # reservation.valid <= '1';
1802 # reservation.addr <=
1803 # r0.req.addr(63 downto LINE_OFF_BITS);
1804 sync
+= reservation
.valid
.eq(1)
1805 sync
+= reservation
.addr(r0
.req
.addr
[LINE_OFF_BITS
:64])
1811 # -- Return data for loads & completion control logic
1812 # writeback_control: process(all)
1813 # Return data for loads & completion control logic
1814 class WriteBackControl(Elaboratable
):
1818 def elaborate(self
, platform
):
1824 # variable data_out : std_ulogic_vector(63 downto 0);
1825 # variable data_fwd : std_ulogic_vector(63 downto 0);
1826 # variable j : integer;
1827 data_out
= Signal(64)
1828 data_fwd
= Signal(64)
1832 # -- Use the bypass if are reading the row that was
1833 # -- written 1 or 2 cycles ago, including for the
1834 # -- slow_valid = 1 case (i.e. completing a load
1835 # -- miss or a non-cacheable load).
1836 # if r1.use_forward1 = '1' then
1837 # Use the bypass if are reading the row that was
1838 # written 1 or 2 cycles ago, including for the
1839 # slow_valid = 1 case (i.e. completing a load
1840 # miss or a non-cacheable load).
1841 with m
.If(r1
.use_forward1
):
1842 # data_fwd := r1.forward_data1;
1843 comb
+= data_fwd
.eq(r1
.forward_data1
)
1846 # data_fwd := r1.forward_data2;
1847 comb
+= data_fwd
.eq(r1
.forward_data2
)
1850 # data_out := cache_out(r1.hit_way);
1851 comb
+= data_out
.eq(cache_out
[r1
.hit_way
])
1853 # for i in 0 to 7 loop
1858 # if r1.forward_sel(i) = '1' then
1859 with m
.If(r1
.forward_sel
[i
]):
1860 # data_out(j + 7 downto j) := data_fwd(j + 7 downto j);
1861 comb
+= data_out
[j
:j
+8].eq(data_fwd
[j
:j
+8])
1865 # d_out.valid <= r1.ls_valid;
1866 # d_out.data <= data_out;
1867 # d_out.store_done <= not r1.stcx_fail;
1868 # d_out.error <= r1.ls_error;
1869 # d_out.cache_paradox <= r1.cache_paradox;
1870 comb
+= d_out
.valid
.eq(r1
.ls_valid
)
1871 comb
+= d_out
.data
.eq(data_out
)
1872 comb
+= d_out
.store_done
.eq(~r1
.stcx_fail
)
1873 comb
+= d_out
.error
.eq(r1
.ls_error
)
1874 comb
+= d_out
.cache_paradox
.eq(r1
.cache_paradox
)
1877 # m_out.done <= r1.mmu_done;
1878 # m_out.err <= r1.mmu_error;
1879 # m_out.data <= data_out;
1880 comb
+= m_out
.done
.eq(r1
.mmu_done
)
1881 comb
+= m_out
.err
.eq(r1
.mmu_error
)
1882 comb
+= m_out
.data
.eq(data_out
)
1884 # -- We have a valid load or store hit or we just completed
1885 # -- a slow op such as a load miss, a NC load or a store
1887 # -- Note: the load hit is delayed by one cycle. However it
1888 # -- can still not collide with r.slow_valid (well unless I
1889 # -- miscalculated) because slow_valid can only be set on a
1890 # -- subsequent request and not on its first cycle (the state
1891 # -- machine must have advanced), which makes slow_valid
1892 # -- at least 2 cycles from the previous hit_load_valid.
1894 # -- Sanity: Only one of these must be set in any given cycle
1895 # assert (r1.slow_valid and r1.stcx_fail) /= '1'
1896 # report "unexpected slow_valid collision with stcx_fail"
1898 # assert ((r1.slow_valid or r1.stcx_fail) and r1.hit_load_valid)
1899 # /= '1' report "unexpected hit_load_delayed collision with
1900 # slow_valid" severity FAILURE;
1901 # We have a valid load or store hit or we just completed
1902 # a slow op such as a load miss, a NC load or a store
1904 # Note: the load hit is delayed by one cycle. However it
1905 # can still not collide with r.slow_valid (well unless I
1906 # miscalculated) because slow_valid can only be set on a
1907 # subsequent request and not on its first cycle (the state
1908 # machine must have advanced), which makes slow_valid
1909 # at least 2 cycles from the previous hit_load_valid.
1911 # Sanity: Only one of these must be set in any given cycle
1912 assert (r1
.slow_valid
& r1
.stcx_fail
) != 1 "unexpected" \
1913 "slow_valid collision with stcx_fail -!- severity FAILURE"
1915 assert ((r1
.slow_valid | r1
.stcx_fail
) | r1
.hit_load_valid
) != 1
1916 "unexpected hit_load_delayed collision with slow_valid -!-" \
1919 # if r1.mmu_req = '0' then
1920 with m
.If(~r1
._mmu_req
):
1921 # -- Request came from loadstore1...
1922 # -- Load hit case is the standard path
1923 # if r1.hit_load_valid = '1' then
1924 # Request came from loadstore1...
1925 # Load hit case is the standard path
1926 with m
.If(r1
.hit_load_valid
):
1928 # "completing load hit data=" & to_hstring(data_out);
1929 print(f
"completing load hit data={data_out}")
1932 # -- error cases complete without stalling
1933 # if r1.ls_error = '1' then
1934 # error cases complete without stalling
1935 with m
.If(r1
.ls_error
):
1936 # report "completing ld/st with error";
1937 print("completing ld/st with error")
1940 # -- Slow ops (load miss, NC, stores)
1941 # if r1.slow_valid = '1' then
1942 # Slow ops (load miss, NC, stores)
1943 with m
.If(r1
.slow_valid
):
1945 # "completing store or load miss data="
1946 # & to_hstring(data_out);
1947 print(f
"completing store or load miss data={data_out}")
1952 # -- Request came from MMU
1953 # if r1.hit_load_valid = '1' then
1954 # Request came from MMU
1955 with m
.If(r1
.hit_load_valid
):
1956 # report "completing load hit to MMU, data="
1957 # & to_hstring(m_out.data);
1958 print(f
"completing load hit to MMU, data={m_out.data}")
1961 # -- error cases complete without stalling
1962 # if r1.mmu_error = '1' then
1963 # report "completing MMU ld with error";
1964 # error cases complete without stalling
1965 with m
.If(r1
.mmu_error
):
1966 print("combpleting MMU ld with error")
1969 # -- Slow ops (i.e. load miss)
1970 # if r1.slow_valid = '1' then
1971 # Slow ops (i.e. load miss)
1972 with m
.If(r1
.slow_valid
):
1973 # report "completing MMU load miss, data="
1974 # & to_hstring(m_out.data);
1975 print("completing MMU load miss, data={m_out.data}")
1981 # -- Generate a cache RAM for each way. This handles the normal
1982 # -- reads, writes from reloads and the special store-hit update
1985 # -- Note: the BRAMs have an extra read buffer, meaning the output
1986 # -- is pipelined an extra cycle. This differs from the
1987 # -- icache. The writeback logic needs to take that into
1988 # -- account by using 1-cycle delayed signals for load hits.
1990 # rams: for i in 0 to NUM_WAYS-1 generate
1991 # signal do_read : std_ulogic;
1992 # signal rd_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1993 # signal do_write : std_ulogic;
1994 # signal wr_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1996 # std_ulogic_vector(wishbone_data_bits-1 downto 0);
1997 # signal wr_sel : std_ulogic_vector(ROW_SIZE-1 downto 0);
1998 # signal wr_sel_m : std_ulogic_vector(ROW_SIZE-1 downto 0);
1999 # signal dout : cache_row_t;
2001 # way: entity work.cache_ram
2003 # ROW_BITS => ROW_BITS,
2004 # WIDTH => wishbone_data_bits,
2010 # rd_addr => rd_addr,
2012 # wr_sel => wr_sel_m,
2013 # wr_addr => wr_addr,
2014 # wr_data => wr_data
2018 class TODO(Elaboratable
):
2022 def elaborate(self
, platform
):
2029 # -- Cache hit reads
2032 # std_ulogic_vector(to_unsigned(early_req_row, ROW_BITS));
2033 # cache_out(i) <= dout;
2035 comb
+= do_read
.eq(1)
2036 comb
+= rd_addr
.eq(Signal(ROW
))
2037 comb
+= cache_out
[i
].eq(dout
)
2041 # -- Defaults to wishbone read responses (cache refill)
2043 # -- For timing, the mux on wr_data/sel/addr is not
2044 # -- dependent on anything other than the current state.
2047 # Defaults to wishbone read responses (cache refill)
2049 # For timing, the mux on wr_data/sel/addr is not
2050 # dependent on anything other than the current state.
2051 # wr_sel_m <= (others => '0');
2052 comb
+= wr_sel_m
.eq(0)
2055 comb
+= do_write
.eq(0)
2056 # if r1.write_bram = '1' then
2057 with m
.If(r1
.write_bram
):
2058 # -- Write store data to BRAM. This happens one
2059 # -- cycle after the store is in r0.
2060 # Write store data to BRAM. This happens one
2061 # cycle after the store is in r0.
2062 # wr_data <= r1.req.data;
2063 # wr_sel <= r1.req.byte_sel;
2064 # wr_addr <= std_ulogic_vector(to_unsigned(
2065 # get_row(r1.req.real_addr), ROW_BITS
2067 comb
+= wr_data
.eq(r1
.req
.data
)
2068 comb
+= wr_sel
.eq(r1
.req
.byte_sel
)
2069 comb
+= wr_addr
.eq(Signal(get_row(r1
.req
.real_addr
)))
2071 # if i = r1.req.hit_way then
2072 with m
.If(i
== r1
.req
.hit_way
):
2074 comb
+= do_write
.eq(1)
2078 # -- Otherwise, we might be doing a reload or a DCBZ
2079 # if r1.dcbz = '1' then
2080 # Otherwise, we might be doing a reload or a DCBZ
2082 # wr_data <= (others => '0');
2083 comb
+= wr_data
.eq(0)
2086 # wr_data <= wishbone_in.dat;
2087 comb
+= wr_data
.eq(wishbone_in
.dat
)
2090 # wr_addr <= std_ulogic_vector(to_unsigned(
2091 # r1.store_row, ROW_BITS
2093 # wr_sel <= (others => '1');
2094 comb
+= wr_addr
.eq(Signal(r1
.store_row
))
2095 comb
+= wr_sel
.eq(1)
2097 # if r1.state = RELOAD_WAIT_ACK and
2098 # wishbone_in.ack = '1' and replace_way = i then
2099 with m
.If(r1
.state
== State
.RELOAD_WAIT_ACK
& wishbone_in
.ack
2100 & relpace_way
== i
):
2102 comb
+= do_write
.eq(1)
2106 # -- Mask write selects with do_write since BRAM
2107 # -- doesn't have a global write-enable
2108 # if do_write = '1' then
2109 # -- Mask write selects with do_write since BRAM
2110 # -- doesn't have a global write-enable
2111 with m
.If(do_write
):
2112 # wr_sel_m <= wr_sel;
2113 comb
+= wr_sel_m
.eq(wr_sel
)
2118 # -- Cache hit synchronous machine for the easy case.
2119 # -- This handles load hits.
2120 # -- It also handles error cases (TLB miss, cache paradox)
2121 # dcache_fast_hit : process(clk)
2122 # Cache hit synchronous machine for the easy case.
2123 # This handles load hits.
2124 # It also handles error cases (TLB miss, cache paradox)
2125 class DcacheFastHit(Elaboratable
):
2129 def elaborate(self
, platform
):
2136 # if rising_edge(clk) then
2137 # if req_op /= OP_NONE then
2138 with m
.If(req_op
!= Op
.OP_NONE
):
2139 # report "op:" & op_t'image(req_op) &
2140 # " addr:" & to_hstring(r0.req.addr) &
2141 # " nc:" & std_ulogic'image(r0.req.nc) &
2142 # " idx:" & integer'image(req_index) &
2143 # " tag:" & to_hstring(req_tag) &
2144 # " way: " & integer'image(req_hit_way);
2145 print(f
"op:{req_op} addr:{r0.req.addr} nc: {r0.req.nc}" \
2146 f
"idx:{req_index} tag:{req_tag} way: {req_hit_way}"
2149 # if r0_valid = '1' then
2150 with m
.If(r0_valid
):
2151 # r1.mmu_req <= r0.mmu_req;
2152 sync
+= r1
.mmu_req
.eq(r0
.mmu_req
)
2155 # -- Fast path for load/store hits.
2156 # -- Set signals for the writeback controls.
2157 # r1.hit_way <= req_hit_way;
2158 # r1.hit_index <= req_index;
2159 # Fast path for load/store hits.
2160 # Set signals for the writeback controls.
2161 sync
+= r1
.hit_way
.eq(req_hit_way
)
2162 sync
+= r1
.hit_index
.eq(req_index
)
2164 # if req_op = OP_LOAD_HIT then
2165 with m
.If(req_op
== Op
.OP_LOAD_HIT
):
2166 # r1.hit_load_valid <= '1';
2167 sync
+= r1
.hit_load_valid
.eq(1)
2171 # r1.hit_load_valid <= '0';
2172 sync
+= r1
.hit_load_valid
.eq(0)
2175 # if req_op = OP_LOAD_HIT or req_op = OP_STORE_HIT then
2176 with m
.If(req_op
== Op
.OP_LOAD_HIT | req_op
== Op
.OP_STORE_HIT
):
2177 # r1.cache_hit <= '1';
2178 sync
+= r1
.cache_hit
.eq(1)
2181 # r1.cache_hit <= '0';
2182 sync
+= r1
.cache_hit
.eq(0)
2185 # if req_op = OP_BAD then
2186 with m
.If(req_op
== Op
.OP_BAD
):
2187 # report "Signalling ld/st error valid_ra=" &
2188 # std_ulogic'image(valid_ra) & " rc_ok=" &
2189 # std_ulogic'image(rc_ok) & " perm_ok=" &
2190 # std_ulogic'image(perm_ok);
2191 print(f
"Signalling ld/st error valid_ra={valid_ra}"
2192 f
"rc_ok={rc_ok} perm_ok={perm_ok}"
2194 # r1.ls_error <= not r0.mmu_req;
2195 # r1.mmu_error <= r0.mmu_req;
2196 # r1.cache_paradox <= access_ok;
2197 sync
+= r1
.ls_error
.eq(~r0
.mmu_req
)
2198 sync
+= r1
.mmu_error
.eq(r0
.mmu_req
)
2199 sync
+= r1
.cache_paradox
.eq(access_ok
)
2203 # r1.ls_error <= '0';
2204 # r1.mmu_error <= '0';
2205 # r1.cache_paradox <= '0';
2206 sync
+= r1
.ls_error
.eq(0)
2207 sync
+= r1
.mmu_error
.eq(0)
2208 sync
+= r1
.cache_paradox
.eq(0)
2211 # if req_op = OP_STCX_FAIL then
2212 with m
.If(req_op
== Op
.OP_STCX_FAIL
):
2213 # r1.stcx_fail <= '1';
2218 # r1.stcx_fail <= '0';
2219 sync
+= r1
.stcx_fail
.eq(0)
2222 # -- Record TLB hit information for updating TLB PLRU
2223 # r1.tlb_hit <= tlb_hit;
2224 # r1.tlb_hit_way <= tlb_hit_way;
2225 # r1.tlb_hit_index <= tlb_req_index;
2226 # Record TLB hit information for updating TLB PLRU
2227 sync
+= r1
.tlb_hit
.eq(tlb_hit
)
2228 sync
+= r1
.tlb_hit_way
.eq(tlb_hit_way
)
2229 sync
+= r1
.tlb_hit_index
.eq(tlb_req_index
)
2233 # -- Memory accesses are handled by this state machine:
2235 # -- * Cache load miss/reload (in conjunction with "rams")
2236 # -- * Load hits for non-cachable forms
2237 # -- * Stores (the collision case is handled in "rams")
2239 # -- All wishbone requests generation is done here.
2240 # -- This machine operates at stage 1.
2241 # dcache_slow : process(clk)
2242 # Memory accesses are handled by this state machine:
2244 # * Cache load miss/reload (in conjunction with "rams")
2245 # * Load hits for non-cachable forms
2246 # * Stores (the collision case is handled in "rams")
2248 # All wishbone requests generation is done here.
2249 # This machine operates at stage 1.
2250 class DcacheSlow(Elaboratable
):
2254 def elaborate(self
, platform
):
2260 # variable stbs_done : boolean;
2261 # variable req : mem_access_request_t;
2262 # variable acks : unsigned(2 downto 0);
2263 stbs_done
= Signal()
2264 req
= MemAccessRequest()
2272 # if rising_edge(clk) then
2273 # r1.use_forward1 <= use_forward1_next;
2274 # r1.forward_sel <= (others => '0');
2275 sync
+= r1
.use_forward1
.eq(use_forward1_next
)
2276 sync
+= r1
.forward_sel
.eq(0)
2278 # if use_forward1_next = '1' then
2279 with m
.If(use_forward1_next
):
2280 # r1.forward_sel <= r1.req.byte_sel;
2281 sync
+= r1
.forward_sel
.eq(r1
.req
.byte_sel
)
2283 # elsif use_forward2_next = '1' then
2284 with m
.Elif(use_forward2_next
):
2285 # r1.forward_sel <= r1.forward_sel1;
2286 sync
+= r1
.forward_sel
.eq(r1
.forward_sel1
)
2289 # r1.forward_data2 <= r1.forward_data1;
2290 sync
+= r1
.forward_data2
.eq(r1
.forward_data1
)
2292 # if r1.write_bram = '1' then
2293 with m
.If(r1
.write_bram
):
2294 # r1.forward_data1 <= r1.req.data;
2295 # r1.forward_sel1 <= r1.req.byte_sel;
2296 # r1.forward_way1 <= r1.req.hit_way;
2297 # r1.forward_row1 <= get_row(r1.req.real_addr);
2298 # r1.forward_valid1 <= '1';
2299 sync
+= r1
.forward_data1
.eq(r1
.req
.data
)
2300 sync
+= r1
.forward_sel1
.eq(r1
.req
.byte_sel
)
2301 sync
+= r1
.forward_way1
.eq(r1
.req
.hit_way
)
2302 sync
+= r1
.forward_row1
.eq(get_row(r1
.req
.real_addr
))
2303 sync
+= r1
.forward_valid1
.eq(1)
2307 # if r1.dcbz = '1' then
2309 # r1.forward_data1 <= (others => '0');
2310 sync
+= r1
.forward_data1
.eq(0)
2314 # r1.forward_data1 <= wishbone_in.dat;
2315 sync
+= r1
.forward_data1
.eq(wb_in
.dat
)
2318 # r1.forward_sel1 <= (others => '1');
2319 # r1.forward_way1 <= replace_way;
2320 # r1.forward_row1 <= r1.store_row;
2321 # r1.forward_valid1 <= '0';
2322 sync
+= r1
.forward_sel1
.eq(1)
2323 sync
+= r1
.forward_way1
.eq(replace_way
)
2324 sync
+= r1
.forward_row1
.eq(r1
.store_row
)
2325 sync
+= r1
.forward_valid1
.eq(0)
2328 # -- On reset, clear all valid bits to force misses
2330 # On reset, clear all valid bits to force misses
2331 # TODO figure out how reset signal works in nmigeni
2332 with m
.If("""TODO RST???"""):
2333 # for i in index_t loop
2334 for i
in range(INDEX
):
2335 # cache_valids(i) <= (others => '0');
2336 sync
+= cache_valid_bits
[i
].eq(0)
2341 # r1.slow_valid <= '0';
2344 # r1.ls_valid <= '0';
2345 # r1.mmu_done <= '0';
2346 sync
+= r1
.state
.eq(State
.IDLE
)
2347 sync
+= r1
.full
.eq(0)
2348 sync
+= r1
.slow_valid
.eq(0)
2349 sync
+= r1
.wb
.cyc
.eq(0)
2350 sync
+= r1
.wb
.stb
.eq(0)
2351 sync
+= r1
.ls_valid
.eq(0)
2352 sync
+= r1
.mmu_done
.eq(0)
2354 # -- Not useful normally but helps avoiding
2355 # -- tons of sim warnings
2356 # Not useful normally but helps avoiding
2357 # tons of sim warnings
2358 # r1.wb.adr <= (others => '0');
2359 sync
+= r1
.wb
.adr
.eq(0)
2362 # -- One cycle pulses reset
2363 # r1.slow_valid <= '0';
2364 # r1.write_bram <= '0';
2365 # r1.inc_acks <= '0';
2366 # r1.dec_acks <= '0';
2368 # r1.ls_valid <= '0';
2369 # -- complete tlbies and TLB loads in the third cycle
2370 # r1.mmu_done <= r0_valid and (r0.tlbie or r0.tlbld);
2371 # One cycle pulses reset
2372 sync
+= r1
.slow_valid
.eq(0)
2373 sync
+= r1
.write_bram
.eq(0)
2374 sync
+= r1
.inc_acks
.eq(0)
2375 sync
+= r1
.dec_acks
.eq(0)
2377 sync
+= r1
.ls_valid
.eq(0)
2378 # complete tlbies and TLB loads in the third cycle
2379 sync
+= r1
.mmu_done
.eq(r0_valid
& (r0
.tlbie | r0
.tlbld
))
2381 # if req_op = OP_LOAD_HIT or req_op = OP_STCX_FAIL then
2382 with m
.If(req_op
== Op
.OP_LOAD_HIT | req_op
== Op
.OP_STCX_FAIL
)
2383 # if r0.mmu_req = '0' then
2384 with m
.If(~r0
.mmu_req
):
2385 # r1.ls_valid <= '1';
2386 sync
+= r1
.ls_valid
.eq(1)
2389 # r1.mmu_done <= '1';
2390 sync
+= r1
.mmu_done
.eq(1)
2394 # if r1.write_tag = '1' then
2395 with m
.If(r1
.write_tag
):
2396 # -- Store new tag in selected way
2397 # for i in 0 to NUM_WAYS-1 loop
2398 # Store new tag in selected way
2399 for i
in range(NUM_WAYS
):
2400 # if i = replace_way then
2401 with m
.If(i
== replace_way
):
2402 # cache_tags(r1.store_index)(
2403 # (i + 1) * TAG_WIDTH - 1
2404 # downto i * TAG_WIDTH
2406 # (TAG_WIDTH - 1 downto TAG_BITS => '0')
2410 ][i
* TAG_WIDTH
:(i
+1) * TAG_WIDTH
].eq(
2411 Const(TAG_WIDTH
, TAG_WIDTH
)
2416 # r1.store_way <= replace_way;
2417 # r1.write_tag <= '0';
2418 sync
+= r1
.store_way
.eq(replace_way
)
2419 sync
+= r1
.write_tag
.eq(0)
2422 # -- Take request from r1.req if there is one there,
2423 # -- else from req_op, ra, etc.
2424 # if r1.full = '1' then
2425 # Take request from r1.req if there is one there,
2426 # else from req_op, ra, etc.
2429 sync
+= req
.eq(r1
.req
)
2434 # req.valid := req_go;
2435 # req.mmu_req := r0.mmu_req;
2436 # req.dcbz := r0.req.dcbz;
2437 # req.real_addr := ra;
2438 sync
+= req
.op
.eq(req_op
)
2439 sync
+= req
.valid
.eq(req_go
)
2440 sync
+= req
.mmu_req
.eq(r0
.mmu_req
)
2441 sync
+= req
.dcbz
.eq(r0
.req
.dcbz
)
2442 sync
+= req
.real_addr
.eq(ra
)
2444 # -- Force data to 0 for dcbz
2445 # if r0.req.dcbz = '0' then
2446 with m
.If(~r0
.req
.dcbz
):
2447 # req.data := r0.req.data;
2448 sync
+= req
.data
.eq(r0
.req
.data
)
2452 # req.data := (others => '0');
2453 sync
+= req
.data
.eq(0)
2456 # -- Select all bytes for dcbz
2457 # -- and for cacheable loads
2458 # if r0.req.dcbz = '1'
2459 # or (r0.req.load = '1' and r0.req.nc = '0') then
2460 # Select all bytes for dcbz
2461 # and for cacheable loads
2462 with m
.If(r0
.req
.dcbz |
(r0
.req
.load
& ~r0
.req
.nc
):
2463 # req.byte_sel := (others => '1');
2464 sync
+= req
.byte_sel
.eq(1)
2468 # req.byte_sel := r0.req.byte_sel;
2469 sync
+= req
.byte_sel
.eq(r0
.req
.byte_sel
)
2472 # req.hit_way := req_hit_way;
2473 # req.same_tag := req_same_tag;
2474 sync
+= req
.hit_way
.eq(req_hit_way
)
2475 sync
+= req
.same_tag
.eq(req_same_tag
)
2477 # -- Store the incoming request from r0,
2478 # -- if it is a slow request
2479 # -- Note that r1.full = 1 implies req_op = OP_NONE
2480 # if req_op = OP_LOAD_MISS or req_op = OP_LOAD_NC
2481 # or req_op = OP_STORE_MISS
2482 # or req_op = OP_STORE_HIT then
2483 # Store the incoming request from r0,
2484 # if it is a slow request
2485 # Note that r1.full = 1 implies req_op = OP_NONE
2486 with m
.If(req_op
== Op
.OP_LOAD_MISS
2487 | req_op
== Op
.OP_LOAD_NC
2488 | req_op
== Op
.OP_STORE_MISS
2489 | req_op
== Op
.OP_STORE_HIT
):
2493 sync
+= r1
.full
.eq(1)
2497 # -- Main state machine
2499 # Main state machine
2500 with m
.Switch(r1
.state
):
2503 with m
.Case(State
.IDLE
)
2504 # r1.wb.adr <= req.real_addr(r1.wb.adr'left downto 0);
2505 # r1.wb.sel <= req.byte_sel;
2506 # r1.wb.dat <= req.data;
2507 # r1.dcbz <= req.dcbz;
2509 # -- Keep track of our index and way
2510 # -- for subsequent stores.
2511 # r1.store_index <= get_index(req.real_addr);
2512 # r1.store_row <= get_row(req.real_addr);
2514 # get_row_of_line(get_row(req.real_addr)) - 1;
2515 # r1.reload_tag <= get_tag(req.real_addr);
2516 # r1.req.same_tag <= '1';
2517 sync
+= r1
.wb
.adr
.eq(req
.real_addr
[0:r1
.wb
.adr
])
2518 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
2519 sync
+= r1
.wb
.dat
.eq(req
.data
)
2520 sync
+= r1
.dcbz
.eq(req
.dcbz
)
2522 # Keep track of our index and way
2523 # for subsequent stores.
2524 sync
+= r1
.store_index
.eq(get_index(req
.real_addr
))
2525 sync
+= r1
.store_row
.eq(get_row(req
.real_addr
))
2526 sync
+= r1
.end_row_ix
.eq(
2527 get_row_of_line(get_row(req
.real_addr
))
2529 sync
+= r1
.reload_tag
.eq(get_tag(req
.real_addr
))
2530 sync
+= r1
.req
.same_tag
.eq(1)
2532 # if req.op = OP_STORE_HIT theni
2533 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
2534 # r1.store_way <= req.hit_way;
2535 sync
+= r1
.store_way
.eq(req
.hit_way
)
2538 # -- Reset per-row valid bits,
2539 # -- ready for handling OP_LOAD_MISS
2540 # for i in 0 to ROW_PER_LINE - 1 loop
2541 # Reset per-row valid bits,
2542 # ready for handling OP_LOAD_MISS
2543 for i
in range(ROW_PER_LINE
):
2544 # r1.rows_valid(i) <= '0';
2545 sync
+= r1
.rows_valid
[i
].eq(0)
2549 with m
.Switch(req
.op
):
2550 # when OP_LOAD_HIT =>
2551 with m
.Case(Op
.OP_LOAD_HIT
):
2552 # -- stay in IDLE state
2553 # stay in IDLE state
2556 # when OP_LOAD_MISS =>
2557 with m
.Case(Op
.OP_LOAD_MISS
):
2558 # -- Normal load cache miss,
2559 # -- start the reload machine
2560 # report "cache miss real addr:" &
2561 # to_hstring(req.real_addr) & " idx:" &
2562 # integer'image(get_index(req.real_addr)) &
2563 # " tag:" & to_hstring(get_tag(req.real_addr));
2564 # Normal load cache miss,
2565 # start the reload machine
2566 print(f
"cache miss real addr:{req_real_addr}" \
2567 f
" idx:{get_index(req_real_addr)}" \
2568 f
" tag:{get_tag(req.real_addr)}")
2570 # -- Start the wishbone cycle
2574 # Start the wishbone cycle
2575 sync
+= r1
.wb
.we
.eq(0)
2576 sync
+= r1
.wb
.cyc
.eq(1)
2577 sync
+= r1
.wb
.stb
.eq(1)
2579 # -- Track that we had one request sent
2580 # r1.state <= RELOAD_WAIT_ACK;
2581 # r1.write_tag <= '1';
2582 # Track that we had one request sent
2583 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
2584 sync
+= r1
.write_tag
.eq(1)
2586 # when OP_LOAD_NC =>
2587 with m
.Case(Op
.OP_LOAD_NC
):
2591 # r1.state <= NC_LOAD_WAIT_ACK;
2592 sync
+= r1
.wb
.cyc
.eq(1)
2593 sync
+= r1
.wb
.stb
.eq(1)
2594 sync
+= r1
.wb
.we
.eq(0)
2595 sync
+= r1
.state
.eq(State
.NC_LOAD_WAIT_ACK
)
2597 # when OP_STORE_HIT | OP_STORE_MISS =>
2598 with m
.Case(Op
.OP_STORE_HIT | Op
.OP_STORE_MISS
):
2599 # if req.dcbz = '0' then
2600 with m
.If(~req
.bcbz
):
2601 # r1.state <= STORE_WAIT_ACK;
2602 # r1.acks_pending <= to_unsigned(1, 3);
2604 # r1.slow_valid <= '1';
2605 sync
+= r1
.state
.eq(State
.STORE_WAIT_ACK
)
2606 sync
+= r1
.acks_pending
.eq(
2607 '''TODO to_unsignes(1,3)'''
2609 sync
+= r1
.full
.eq(0)
2610 sync
+= r1
.slow_valid
.eq(1)
2612 # if req.mmu_req = '0' then
2613 with m
.If(~req
.mmu_req
):
2614 # r1.ls_valid <= '1';
2615 sync
+= r1
.ls_valid
.eq(1)
2618 # r1.mmu_done <= '1';
2619 sync
+= r1
.mmu_done
.eq(1)
2622 # if req.op = OP_STORE_HIT then
2623 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
2624 # r1.write_bram <= '1';
2625 sync
+= r1
.write_bram
.eq(1)
2630 # -- dcbz is handled much like a load
2631 # -- miss except that we are writing
2632 # -- to memory instead of reading
2633 # r1.state <= RELOAD_WAIT_ACK;
2634 # dcbz is handled much like a load
2635 # miss except that we are writing
2636 # to memory instead of reading
2637 sync
+= r1
.state
.eq(Op
.RELOAD_WAIT_ACK
)
2639 # if req.op = OP_STORE_MISS then
2640 with m
.If(req
.op
== Op
.OP_STORE_MISS
):
2641 # r1.write_tag <= '1';
2642 sync
+= r1
.write_tag
.eq(1)
2649 sync
+= r1
.wb
.we
.eq(1)
2650 sync
+= r1
.wb
.cyc
.eq(1)
2651 sync
+= r1
.wb
.stb
.eq(1)
2653 # -- OP_NONE and OP_BAD do nothing
2654 # -- OP_BAD & OP_STCX_FAIL were handled above already
2657 # when OP_STCX_FAIL =>
2658 # OP_NONE and OP_BAD do nothing
2659 # OP_BAD & OP_STCX_FAIL were handled above already
2660 with m
.Case(Op
.OP_NONE
):
2663 with m
.Case(OP_BAD
):
2666 with m
.Case(OP_STCX_FAIL
):
2670 # when RELOAD_WAIT_ACK =>
2671 with m
.Case(State
.RELOAD_WAIT_ACK
):
2672 # -- Requests are all sent if stb is 0
2673 # Requests are all sent if stb is 0
2674 sync
+= stbs_done
.eq(~r1
.wb
.stb
)
2675 # stbs_done := r1.wb.stb = '0';
2677 # -- If we are still sending requests,
2678 # -- was one accepted?
2679 # if wishbone_in.stall = '0' and not stbs_done then
2680 # If we are still sending requests,
2682 with m
.If(~wb_in
.stall
& ~stbs_done
):
2683 # -- That was the last word ? We are done sending.
2684 # -- Clear stb and set stbs_done so we can handle
2685 # -- an eventual last ack on the same cycle.
2686 # if is_last_row_addr(r1.wb.adr, r1.end_row_ix) then
2687 # That was the last word ? We are done sending.
2688 # Clear stb and set stbs_done so we can handle
2689 # an eventual last ack on the same cycle.
2690 with m
.If(is_last_row_addr(
2691 r1
.wb
.adr
, r1
.end_row_ix
)):
2693 # stbs_done := true;
2694 sync
+= r1
.wb
.stb
.eq(0)
2695 sync
+= stbs_done
.eq(0)
2698 # -- Calculate the next row address
2699 # r1.wb.adr <= next_row_addr(r1.wb.adr);
2700 # Calculate the next row address
2701 sync
+= r1
.wb
.adr
.eq(next_row_addr(r1
.wb
.adr
))
2704 # -- Incoming acks processing
2705 # r1.forward_valid1 <= wishbone_in.ack;
2706 # Incoming acks processing
2707 sync
+= r1
.forward_valid1
.eq(wb_in
.ack
)
2709 # if wishbone_in.ack = '1' then
2710 with m
.If(wb_in
.ack
):
2712 # r1.store_row mod ROW_PER_LINE
2714 sync
+= r1
.rows_valid
[
2715 r1
.store_row
% ROW_PER_LINE
2718 # -- If this is the data we were looking for,
2719 # -- we can complete the request next cycle.
2720 # -- Compare the whole address in case the
2721 # -- request in r1.req is not the one that
2722 # -- started this refill.
2723 # if r1.full = '1' and r1.req.same_tag = '1'
2724 # and ((r1.dcbz = '1' and r1.req.dcbz = '1')
2725 # or (r1.dcbz = '0' and r1.req.op = OP_LOAD_MISS))
2726 # and r1.store_row = get_row(r1.req.real_addr) then
2727 # If this is the data we were looking for,
2728 # we can complete the request next cycle.
2729 # Compare the whole address in case the
2730 # request in r1.req is not the one that
2731 # started this refill.
2732 with m
.If(r1
.full
& r1
.req
.same_tag
&
2733 ((r1
.dcbz
& r1
.req
.dcbz
)
2735 r1
.req
.op
== Op
.OP_LOAD_MISS
)
2738 == get_row(r1
.req
.real_addr
):
2740 # r1.slow_valid <= '1';
2741 sync
+= r1
.full
.eq(0)
2742 sync
+= r1
.slow_valid
.eq(1)
2744 # if r1.mmu_req = '0' then
2745 with m
.If(~r1
.mmu_req
):
2746 # r1.ls_valid <= '1';
2747 sync
+= r1
.ls_valid
.eq(1)
2750 # r1.mmu_done <= '1';
2751 sync
+= r1
.mmu_done
.eq(1)
2753 # r1.forward_sel <= (others => '1');
2754 # r1.use_forward1 <= '1';
2755 sync
+= r1
.forward_sel
.eq(1)
2756 sync
+= r1
.use_forward1
.eq(1)
2759 # -- Check for completion
2760 # if stbs_done and is_last_row(r1.store_row,
2761 # r1.end_row_ix) then
2762 # Check for completion
2763 with m
.If(stbs_done
&
2764 is_last_row(r1
.store_row
,
2767 # -- Complete wishbone cycle
2769 # Complete wishbone cycle
2770 sync
+= r1
.wb
.cyc
.eq(0)
2772 # -- Cache line is now valid
2773 # cache_valids(r1.store_index)(
2776 # Cache line is now valid
2777 sync
+= cache_valid_bits
[
2779 ][r1
.store_way
].eq(1)
2782 sync
+= r1
.state
.eq(State
.IDLE
)
2785 # -- Increment store row counter
2786 # r1.store_row <= next_row(r1.store_row);
2787 # Increment store row counter
2788 sync
+= r1
.store_row
.eq(next_row(r1
.store_row
))
2791 # when STORE_WAIT_ACK =>
2792 with m
.Case(State
.STORE_WAIT_ACK
):
2793 # stbs_done := r1.wb.stb = '0';
2794 # acks := r1.acks_pending;
2795 sync
+= stbs_done
.eq(~r1
.wb
.stb
)
2796 sync
+= acks
.eq(r1
.acks_pending
)
2798 # if r1.inc_acks /= r1.dec_acks then
2799 with m
.If(r1
.inc_acks
!= r1
.dec_acks
):
2801 # if r1.inc_acks = '1' then
2802 with m
.If(r1
.inc_acks
):
2804 sync
+= acks
.eq(acks
+ 1)
2809 sync
+= acks
.eq(acks
- 1)
2813 # r1.acks_pending <= acks;
2814 sync
+= r1
.acks_pending
.eq(acks
)
2816 # -- Clear stb when slave accepted request
2817 # if wishbone_in.stall = '0' then
2818 # Clear stb when slave accepted request
2819 with m
.If(~wb_in
.stall
):
2820 # -- See if there is another store waiting
2821 # -- to be done which is in the same real page.
2822 # if req.valid = '1' then
2823 # See if there is another store waiting
2824 # to be done which is in the same real page.
2825 with m
.If(req
.valid
):
2827 # SET_SIZE_BITS - 1 downto 0
2828 # ) <= req.real_addr(
2829 # SET_SIZE_BITS - 1 downto 0
2831 # r1.wb.dat <= req.data;
2832 # r1.wb.sel <= req.byte_sel;
2833 sync
+= r1
.wb
.adr
[0:SET_SIZE_BITS
].eq(
2834 req
.real_addr
[0:SET_SIZE_BITS
]
2838 # if acks < 7 and req.same_tag = '1'
2839 # and (req.op = OP_STORE_MISS
2840 # or req.op = OP_STORE_HIT) then
2841 with m
.Elif(acks
< 7 & req
.same_tag
&
2842 (req
.op
== Op
.Op_STORE_MISS
2843 | req
.op
== Op
.OP_SOTRE_HIT
)):
2845 # stbs_done := false;
2846 sync
+= r1
.wb
.stb
.eq(1)
2847 sync
+= stbs_done
.eq(0)
2849 # if req.op = OP_STORE_HIT then
2850 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
2851 # r1.write_bram <= '1';
2852 sync
+= r1
.write_bram
.eq(1)
2855 # r1.slow_valid <= '1';
2856 sync
+= r1
.full
.eq(0)
2857 sync
+= r1
.slow_valid
.eq(1)
2859 # -- Store requests never come from the MMU
2860 # r1.ls_valid <= '1';
2861 # stbs_done := false;
2862 # r1.inc_acks <= '1';
2863 # Store request never come from the MMU
2864 sync
+= r1
.ls_valid
.eq(1)
2865 sync
+= stbs_done
.eq(0)
2866 sync
+= r1
.inc_acks
.eq(1)
2870 # stbs_done := true;
2871 sync
+= r1
.wb
.stb
.eq(0)
2872 sync
+= stbs_done
.eq(1)
2876 # -- Got ack ? See if complete.
2877 # if wishbone_in.ack = '1' then
2878 # Got ack ? See if complete.
2879 with m
.If(wb_in
.ack
):
2880 # if stbs_done and acks = 1 then
2881 with m
.If(stbs_done
& acks
)
2885 sync
+= r1
.state
.eq(State
.IDLE
)
2886 sync
+= r1
.wb
.cyc
.eq(0)
2887 sync
+= r1
.wb
.stb
.eq(0)
2889 # r1.dec_acks <= '1';
2890 sync
+= r1
.dec_acks
.eq(1)
2893 # when NC_LOAD_WAIT_ACK =>
2894 with m
.Case(State
.NC_LOAD_WAIT_ACK
):
2895 # -- Clear stb when slave accepted request
2896 # if wishbone_in.stall = '0' then
2897 # Clear stb when slave accepted request
2898 with m
.If(~wb_in
.stall
):
2900 sync
+= r1
.wb
.stb
.eq(0)
2903 # -- Got ack ? complete.
2904 # if wishbone_in.ack = '1' then
2905 # Got ack ? complete.
2906 with m
.If(wb_in
.ack
):
2909 # r1.slow_valid <= '1';
2910 sync
+= r1
.state
.eq(State
.IDLE
)
2911 sync
+= r1
.full
.eq(0)
2912 sync
+= r1
.slow_valid
.eq(1)
2914 # if r1.mmu_req = '0' then
2915 with m
.If(~r1
.mmu_req
):
2916 # r1.ls_valid <= '1';
2917 sync
+= r1
.ls_valid
.eq(1)
2921 # r1.mmu_done <= '1';
2922 sync
+= r1
.mmu_done
.eq(1)
2925 # r1.forward_sel <= (others => '1');
2926 # r1.use_forward1 <= '1';
2929 sync
+= r1
.forward_sel
.eq(1)
2930 sync
+= r1
.use_forward1
.eq(1)
2931 sync
+= r1
.wb
.cyc
.eq(0)
2932 sync
+= r1
.wb
.stb
.eq(0)
2939 # dc_log: if LOG_LENGTH > 0 generate
2940 # TODO learn how to tranlate vhdl generate into nmigen
2941 class DcacheLog(Elaborate
):
2945 def elaborate(self
, platform
):
2951 # signal log_data : std_ulogic_vector(19 downto 0);
2952 log_data
= Signal(20)
2957 # dcache_log: process(clk)
2959 # if rising_edge(clk) then
2960 # log_data <= r1.wb.adr(5 downto 3) &
2961 # wishbone_in.stall &
2963 # r1.wb.stb & r1.wb.cyc &
2966 # std_ulogic_vector(
2967 # to_unsigned(op_t'pos(req_op), 3)) &
2969 # std_ulogic_vector(
2970 # to_unsigned(tlb_hit_way, 3)) &
2972 # std_ulogic_vector(
2973 # to_unsigned(state_t'pos(r1.state), 3));
2974 sync
+= log_data
.eq(Cat(
2975 Const(r1
.state
, 3), valid_ra
, Const(tlb_hit_way
, 3),
2976 stall_out
, Const(req_op
, 3), d_out
.valid
, d_out
.error
,
2977 r1
.wb
.cyc
, r1
.wb
.stb
, wb_in
.ack
, wb_in
.stall
,
2982 # log_out <= log_data;
2983 # TODO ??? I am very confused need help
2984 comb
+= log_out
.eq(log_data
)