dcache.py move Stage0 RecordObject to top of file
[soc.git] / src / soc / experiment / dcache.py
1 """Dcache
2
3 based on Anton Blanchard microwatt dcache.vhdl
4
5 """
6
7 from enum import Enum, unique
8
9 from nmigen import Module, Signal, Elaboratable,
10 Cat, Repl
11 from nmigen.cli import main
12 from nmigen.iocontrol import RecordObject
13 from nmigen.util import log2_int
14
15 from experiment.mem_types import LoadStore1ToDcacheType,
16 DcacheToLoadStore1Type,
17 MmuToDcacheType,
18 DcacheToMmuType
19
20 from experiment.wb_types import WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
21 WBAddrType, WBDataType, WBSelType,
22 WbMasterOut, WBSlaveOut, WBMasterOutVector,
23 WBSlaveOutVector, WBIOMasterOut,
24 WBIOSlaveOut
25
26 # Record for storing permission, attribute, etc. bits from a PTE
27 class PermAttr(RecordObject):
28 def __init__(self):
29 super().__init__()
30 self.reference = Signal()
31 self.changed = Signal()
32 self.nocache = Signal()
33 self.priv = Signal()
34 self.rd_perm = Signal()
35 self.wr_perm = Signal()
36
37
38 def extract_perm_attr(pte):
39 pa = PermAttr()
40 pa.reference = pte[8]
41 pa.changed = pte[7]
42 pa.nocache = pte[5]
43 pa.priv = pte[3]
44 pa.rd_perm = pte[2]
45 pa.wr_perm = pte[1]
46 return pa;
47
48
49 # Type of operation on a "valid" input
50 @unique
51 class Op(Enum):
52 OP_NONE = 0
53 OP_BAD = 1 # NC cache hit, TLB miss, prot/RC failure
54 OP_STCX_FAIL = 2 # conditional store w/o reservation
55 OP_LOAD_HIT = 3 # Cache hit on load
56 OP_LOAD_MISS = 4 # Load missing cache
57 OP_LOAD_NC = 5 # Non-cachable load
58 OP_STORE_HIT = 6 # Store hitting cache
59 OP_STORE_MISS = 7 # Store missing cache
60
61 # Cache state machine
62 @unique
63 class State(Enum):
64 IDLE = 0 # Normal load hit processing
65 RELOAD_WAIT_ACK = 1 # Cache reload wait ack
66 STORE_WAIT_ACK = 2 # Store wait ack
67 NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
68
69 # Dcache operations:
70 #
71 # In order to make timing, we use the BRAMs with
72 # an output buffer, which means that the BRAM
73 # output is delayed by an extra cycle.
74 #
75 # Thus, the dcache has a 2-stage internal pipeline
76 # for cache hits with no stalls.
77 #
78 # All other operations are handled via stalling
79 # in the first stage.
80 #
81 # The second stage can thus complete a hit at the same
82 # time as the first stage emits a stall for a complex op.
83 #
84 # Stage 0 register, basically contains just the latched request
85 class RegStage0(RecordObject):
86 def __init__(self):
87 super().__init__()
88 self.req = LoadStore1ToDcacheType()
89 self.tlbie = Signal()
90 self.doall = Signal()
91 self.tlbld = Signal()
92 self.mmu_req = Signal() # indicates source of request
93
94 # --
95 # -- Set associative dcache write-through
96 # --
97 # -- TODO (in no specific order):
98 # --
99 # -- * See list in icache.vhdl
100 # -- * Complete load misses on the cycle when WB data comes instead of
101 # -- at the end of line (this requires dealing with requests coming in
102 # -- while not idle...)
103 # --
104
105 class Dcache(Elaboratable):
106 def __init__(self):
107 # TODO: make these parameters of Dcache at some point
108 self.LINE_SIZE = 64 # Line size in bytes
109 self.NUM_LINES = 32 # Number of lines in a set
110 self.NUM_WAYS = 4 # Number of ways
111 self.TLB_SET_SIZE = 64 # L1 DTLB entries per set
112 self.TLB_NUM_WAYS = 2 # L1 DTLB number of sets
113 self.TLB_LG_PGSZ = 12 # L1 DTLB log_2(page_size)
114 self.LOG_LENGTH = 0 # Non-zero to enable log data collection
115
116 self.d_in = LoadStore1ToDcacheType()
117 self.d_out = DcacheToLoadStore1Type()
118
119 self.m_in = MmuToDcacheType()
120 self.m_out = DcacheToMmuType()
121
122 self.stall_out = Signal()
123
124 self.wb_out = WBMasterOut()
125 self.wb_in = WBSlaveOut()
126
127 self.log_out = Signal(20)
128
129 def elaborate(self, platform):
130 LINE_SIZE = self.LINE_SIZE
131 NUM_LINES = self.NUM_LINES
132 NUM_WAYS = self.NUM_WAYS
133 TLB_SET_SIZE = self.TLB_SET_SIZE
134 TLB_NUM_WAYS = self.TLB_NUM_WAYS
135 TLB_LG_PGSZ = self.TLB_LG_PGSZ
136 LOG_LENGTH = self.LOG_LENGTH
137
138 # BRAM organisation: We never access more than
139 # -- wishbone_data_bits at a time so to save
140 # -- resources we make the array only that wide, and
141 # -- use consecutive indices for to make a cache "line"
142 # --
143 # -- ROW_SIZE is the width in bytes of the BRAM
144 # -- (based on WB, so 64-bits)
145 ROW_SIZE = WB_DATA_BITS / 8;
146
147 # ROW_PER_LINE is the number of row (wishbone
148 # transactions) in a line
149 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
150
151 # BRAM_ROWS is the number of rows in BRAM needed
152 # to represent the full dcache
153 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
154
155
156 # Bit fields counts in the address
157
158 # REAL_ADDR_BITS is the number of real address
159 # bits that we store
160 REAL_ADDR_BITS = 56
161
162 # ROW_BITS is the number of bits to select a row
163 ROW_BITS = log2_int(BRAM_ROWS)
164
165 # ROW_LINE_BITS is the number of bits to select
166 # a row within a line
167 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
168
169 # LINE_OFF_BITS is the number of bits for
170 # the offset in a cache line
171 LINE_OFF_BITS = log2_int(LINE_SIZE)
172
173 # ROW_OFF_BITS is the number of bits for
174 # the offset in a row
175 ROW_OFF_BITS = log2_int(ROW_SIZE)
176
177 # INDEX_BITS is the number if bits to
178 # select a cache line
179 INDEX_BITS = log2_int(NUM_LINES)
180
181 # SET_SIZE_BITS is the log base 2 of the set size
182 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
183
184 # TAG_BITS is the number of bits of
185 # the tag part of the address
186 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
187
188 # TAG_WIDTH is the width in bits of each way of the tag RAM
189 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
190
191 # WAY_BITS is the number of bits to select a way
192 WAY_BITS = log2_int(NUM_WAYS)
193
194 # Example of layout for 32 lines of 64 bytes:
195 #
196 # .. tag |index| line |
197 # .. | row | |
198 # .. | |---| | ROW_LINE_BITS (3)
199 # .. | |--- - --| LINE_OFF_BITS (6)
200 # .. | |- --| ROW_OFF_BITS (3)
201 # .. |----- ---| | ROW_BITS (8)
202 # .. |-----| | INDEX_BITS (5)
203 # .. --------| | TAG_BITS (45)
204
205
206 # subtype row_t is integer range 0 to BRAM_ROWS-1;
207 # subtype index_t is integer range 0 to NUM_LINES-1;
208 """wherever way_t is used to make a Signal it must be substituted with
209 log2_int(NUM_WAYS) i.e. WAY_BITS. this because whilst the *range*
210 of the number is 0..NUM_WAYS it requires log2_int(NUM_WAYS) i.e.
211 WAY_BITS of space to store it
212 """
213 # subtype way_t is integer range 0 to NUM_WAYS-1;
214 # subtype row_in_line_t is unsigned(ROW_LINE_BITS-1 downto 0);
215 ROW = BRAM_ROWS # yyyeah not really necessary, delete
216 INDEX = NUM_LINES # yyyeah not really necessary, delete
217 WAY = NUM_WAYS # yyyeah not really necessary, delete
218 ROW_IN_LINE = ROW_LINE_BITS # yyyeah not really necessary, delete
219
220 # -- The cache data BRAM organized as described above for each way
221 # subtype cache_row_t is
222 # std_ulogic_vector(wishbone_data_bits-1 downto 0);
223 # The cache data BRAM organized as described above for each way
224 CACHE_ROW = WB_DATA_BITS
225
226 # -- The cache tags LUTRAM has a row per set.
227 # -- Vivado is a pain and will not handle a
228 # -- clean (commented) definition of the cache
229 # -- tags as a 3d memory. For now, work around
230 # -- it by putting all the tags
231 # subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
232 # The cache tags LUTRAM has a row per set.
233 # Vivado is a pain and will not handle a
234 # clean (commented) definition of the cache
235 # tags as a 3d memory. For now, work around
236 # it by putting all the tags
237 CACHE_TAG = TAG_BITS
238
239 # -- type cache_tags_set_t is array(way_t) of cache_tag_t;
240 # -- type cache_tags_array_t is array(index_t) of cache_tags_set_t;
241 # constant TAG_RAM_WIDTH : natural := TAG_WIDTH * NUM_WAYS;
242 # subtype cache_tags_set_t is
243 # std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
244 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
245 # type cache_tags_set_t is array(way_t) of cache_tag_t;
246 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
247 TAG_RAM_WIDTH = TAG_WIDTH * NUM_WAYS
248
249 CACHE_TAG_SET = TAG_RAM_WIDTH
250
251 def CacheTagArray():
252 return Array(CacheTagSet() for x in range(INDEX))
253
254 # -- The cache valid bits
255 # subtype cache_way_valids_t is
256 # std_ulogic_vector(NUM_WAYS-1 downto 0);
257 # type cache_valids_t is array(index_t) of cache_way_valids_t;
258 # type row_per_line_valid_t is
259 # array(0 to ROW_PER_LINE - 1) of std_ulogic;
260 # The cache valid bits
261 CACHE_WAY_VALID_BITS = NUM_WAYS
262
263 def CacheValidBitsArray():
264 return Array(CacheWayValidBits() for x in range(INDEX))
265
266 def RowPerLineValidArray():
267 return Array(Signal() for x in range(ROW_PER_LINE))
268
269 # -- Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
270 # signal cache_tags : cache_tags_array_t;
271 # signal cache_tag_set : cache_tags_set_t;
272 # signal cache_valids : cache_valids_t;
273 #
274 # attribute ram_style : string;
275 # attribute ram_style of cache_tags : signal is "distributed";
276 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
277 cache_tags = CacheTagArray()
278 cache_tag_set = Signal(CACHE_TAG_SET)
279 cache_valid_bits = CacheValidBitsArray()
280
281 # TODO attribute ram_style : string;
282 # TODO attribute ram_style of cache_tags : signal is "distributed";
283
284 # -- L1 TLB.
285 # constant TLB_SET_BITS : natural := log2(TLB_SET_SIZE);
286 # constant TLB_WAY_BITS : natural := log2(TLB_NUM_WAYS);
287 # constant TLB_EA_TAG_BITS : natural :=
288 # 64 - (TLB_LG_PGSZ + TLB_SET_BITS);
289 # constant TLB_TAG_WAY_BITS : natural :=
290 # TLB_NUM_WAYS * TLB_EA_TAG_BITS;
291 # constant TLB_PTE_BITS : natural := 64;
292 # constant TLB_PTE_WAY_BITS : natural :=
293 # TLB_NUM_WAYS * TLB_PTE_BITS;
294 # L1 TLB
295 TLB_SET_BITS = log2_int(TLB_SET_SIZE)
296 TLB_WAY_BITS = log2_int(TLB_NUM_WAYS)
297 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_SET_BITS)
298 TLB_TAG_WAY_BITS = TLB_NUM_WAYS * TLB_EA_TAG_BITS
299 TLB_PTE_BITS = 64
300 TLB_PTE_WAY_BITS = TLB_NUM_WAYS * TLB_PTE_BITS;
301
302 # subtype tlb_way_t is integer range 0 to TLB_NUM_WAYS - 1;
303 # subtype tlb_index_t is integer range 0 to TLB_SET_SIZE - 1;
304 # subtype tlb_way_valids_t is
305 # std_ulogic_vector(TLB_NUM_WAYS-1 downto 0);
306 # type tlb_valids_t is
307 # array(tlb_index_t) of tlb_way_valids_t;
308 # subtype tlb_tag_t is
309 # std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
310 # subtype tlb_way_tags_t is
311 # std_ulogic_vector(TLB_TAG_WAY_BITS-1 downto 0);
312 # type tlb_tags_t is
313 # array(tlb_index_t) of tlb_way_tags_t;
314 # subtype tlb_pte_t is
315 # std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
316 # subtype tlb_way_ptes_t is
317 # std_ulogic_vector(TLB_PTE_WAY_BITS-1 downto 0);
318 # type tlb_ptes_t is array(tlb_index_t) of tlb_way_ptes_t;
319 # type hit_way_set_t is array(tlb_way_t) of way_t;
320 TLB_WAY = TLB_NUM_WAYS
321
322 TLB_INDEX = TLB_SET_SIZE
323
324 TLB_WAY_VALID_BITS = TLB_NUM_WAYS
325
326 def TLBValidBitsArray():
327 return Array(
328 Signal(TLB_WAY_VALID_BITS) for x in range(TLB_SET_SIZE)
329 )
330
331 TLB_TAG = TLB_EA_TAG_BITS
332
333 TLB_WAY_TAGS = TLB_TAG_WAY_BITS
334
335 def TLBTagsArray():
336 return Array(
337 Signal(TLB_WAY_TAGS) for x in range (TLB_SET_SIZE)
338 )
339
340 TLB_PTE = TLB_PTE_BITS
341
342 TLB_WAY_PTES = TLB_PTE_WAY_BITS
343
344 def TLBPtesArray():
345 return Array(
346 Signal(TLB_WAY_PTES) for x in range(TLB_SET_SIZE)
347 )
348
349 def HitWaySet():
350 return Array(Signal(NUM_WAYS) for x in range(TLB_NUM_WAYS))
351
352 # signal dtlb_valids : tlb_valids_t;
353 # signal dtlb_tags : tlb_tags_t;
354 # signal dtlb_ptes : tlb_ptes_t;
355
356 """note: these are passed to nmigen.hdl.Memory as "attributes". don't
357 know how, just that they are.
358 """
359 # attribute ram_style of dtlb_tags : signal is "distributed";
360 # attribute ram_style of dtlb_ptes : signal is "distributed";
361 dtlb_valids = TLBValidBitsArray()
362 dtlb_tags = TLBTagsArray()
363 dtlb_ptes = TLBPtesArray()
364 # TODO attribute ram_style of dtlb_tags : signal is "distributed";
365 # TODO attribute ram_style of dtlb_ptes : signal is "distributed";
366
367 # signal r0 : reg_stage_0_t;
368 # signal r0_full : std_ulogic;
369 r0 = RegStage0()
370 r0_full = Signal()
371
372 # type mem_access_request_t is record
373 # op : op_t;
374 # valid : std_ulogic;
375 # dcbz : std_ulogic;
376 # real_addr : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
377 # data : std_ulogic_vector(63 downto 0);
378 # byte_sel : std_ulogic_vector(7 downto 0);
379 # hit_way : way_t;
380 # same_tag : std_ulogic;
381 # mmu_req : std_ulogic;
382 # end record;
383 class MemAccessRequest(RecordObject):
384 def __init__(self):
385 super().__init__()
386 self.op = Op()
387 self.valid = Signal()
388 self.dcbz = Signal()
389 self.real_addr = Signal(REAL_ADDR_BITS)
390 self.data = Signal(64)
391 self.byte_sel = Signal(8)
392 self.hit_way = Signal(WAY_BITS)
393 self.same_tag = Signal()
394 self.mmu_req = Signal()
395
396 # -- First stage register, contains state for stage 1 of load hits
397 # -- and for the state machine used by all other operations
398 # type reg_stage_1_t is record
399 # -- Info about the request
400 # full : std_ulogic; -- have uncompleted request
401 # mmu_req : std_ulogic; -- request is from MMU
402 # req : mem_access_request_t;
403 #
404 # -- Cache hit state
405 # hit_way : way_t;
406 # hit_load_valid : std_ulogic;
407 # hit_index : index_t;
408 # cache_hit : std_ulogic;
409 #
410 # -- TLB hit state
411 # tlb_hit : std_ulogic;
412 # tlb_hit_way : tlb_way_t;
413 # tlb_hit_index : tlb_index_t;
414 #
415 # -- 2-stage data buffer for data forwarded from writes to reads
416 # forward_data1 : std_ulogic_vector(63 downto 0);
417 # forward_data2 : std_ulogic_vector(63 downto 0);
418 # forward_sel1 : std_ulogic_vector(7 downto 0);
419 # forward_valid1 : std_ulogic;
420 # forward_way1 : way_t;
421 # forward_row1 : row_t;
422 # use_forward1 : std_ulogic;
423 # forward_sel : std_ulogic_vector(7 downto 0);
424 #
425 # -- Cache miss state (reload state machine)
426 # state : state_t;
427 # dcbz : std_ulogic;
428 # write_bram : std_ulogic;
429 # write_tag : std_ulogic;
430 # slow_valid : std_ulogic;
431 # wb : wishbone_master_out;
432 # reload_tag : cache_tag_t;
433 # store_way : way_t;
434 # store_row : row_t;
435 # store_index : index_t;
436 # end_row_ix : row_in_line_t;
437 # rows_valid : row_per_line_valid_t;
438 # acks_pending : unsigned(2 downto 0);
439 # inc_acks : std_ulogic;
440 # dec_acks : std_ulogic;
441 #
442 # -- Signals to complete (possibly with error)
443 # ls_valid : std_ulogic;
444 # ls_error : std_ulogic;
445 # mmu_done : std_ulogic;
446 # mmu_error : std_ulogic;
447 # cache_paradox : std_ulogic;
448 #
449 # -- Signal to complete a failed stcx.
450 # stcx_fail : std_ulogic;
451 # end record;
452 # First stage register, contains state for stage 1 of load hits
453 # and for the state machine used by all other operations
454 class RegStage1(RecordObject):
455 def __init__(self):
456 super().__init__()
457 # Info about the request
458 self.full = Signal() # have uncompleted request
459 self.mmu_req = Signal() # request is from MMU
460 self.req = MemAccessRequest()
461
462 # Cache hit state
463 self.hit_way = Signal(WAY_BITS)
464 self.hit_load_valid = Signal()
465 self.hit_index = Signal(INDEX)
466 self.cache_hit = Signal()
467
468 # TLB hit state
469 self.tlb_hit = Signal()
470 self.tlb_hit_way = Signal(TLB_WAY)
471 self.tlb_hit_index = Signal(TLB_SET_SIZE)
472 self.
473 # 2-stage data buffer for data forwarded from writes to reads
474 self.forward_data1 = Signal(64)
475 self.forward_data2 = Signal(64)
476 self.forward_sel1 = Signal(8)
477 self.forward_valid1 = Signal()
478 self.forward_way1 = Signal(WAY_BITS)
479 self.forward_row1 = Signal(ROW)
480 self.use_forward1 = Signal()
481 self.forward_sel = Signal(8)
482
483 # Cache miss state (reload state machine)
484 self.state = State()
485 self.dcbz = Signal()
486 self.write_bram = Signal()
487 self.write_tag = Signal()
488 self.slow_valid = Signal()
489 self.wb = WishboneMasterOut()
490 self.reload_tag = Signal(CACHE_TAG)
491 self.store_way = Signal(WAY_BITS)
492 self.store_row = Signal(ROW)
493 self.store_index = Signal(INDEX)
494 self.end_row_ix = Signal(ROW_IN_LINE)
495 self.rows_valid = RowPerLineValidArray()
496 self.acks_pending = Signal(3)
497 self.inc_acks = Signal()
498 self.dec_acks = Signal()
499
500 # Signals to complete (possibly with error)
501 self.ls_valid = Signal()
502 self.ls_error = Signal()
503 self.mmu_done = Signal()
504 self.mmu_error = Signal()
505 self.cache_paradox = Signal()
506
507 # Signal to complete a failed stcx.
508 self.stcx_fail = Signal()
509
510 # signal r1 : reg_stage_1_t;
511 r1 = RegStage1()
512
513 # -- Reservation information
514 # --
515 # type reservation_t is record
516 # valid : std_ulogic;
517 # addr : std_ulogic_vector(63 downto LINE_OFF_BITS);
518 # end record;
519 # Reservation information
520
521 class Reservation(RecordObject):
522 def __init__(self):
523 super().__init__()
524 valid = Signal()
525 # TODO LINE_OFF_BITS is 6
526 addr = Signal(63 downto LINE_OFF_BITS)
527
528 # signal reservation : reservation_t;
529 reservation = Reservation()
530
531 # -- Async signals on incoming request
532 # signal req_index : index_t;
533 # signal req_row : row_t;
534 # signal req_hit_way : way_t;
535 # signal req_tag : cache_tag_t;
536 # signal req_op : op_t;
537 # signal req_data : std_ulogic_vector(63 downto 0);
538 # signal req_same_tag : std_ulogic;
539 # signal req_go : std_ulogic;
540 # Async signals on incoming request
541 req_index = Signal(INDEX)
542 req_row = Signal(ROW)
543 req_hit_way = Signal(WAY_BITS)
544 req_tag = Signal(CACHE_TAG)
545 req_op = Op()
546 req_data = Signal(64)
547 req_same_tag = Signal()
548 req_go = Signal()
549
550 # signal early_req_row : row_t;
551 #
552 # signal cancel_store : std_ulogic;
553 # signal set_rsrv : std_ulogic;
554 # signal clear_rsrv : std_ulogic;
555 #
556 # signal r0_valid : std_ulogic;
557 # signal r0_stall : std_ulogic;
558 #
559 # signal use_forward1_next : std_ulogic;
560 # signal use_forward2_next : std_ulogic;
561 early_req_row = Signal(ROW)
562
563 cancel_store = Signal()
564 set_rsrv = Signal()
565 clear_rsrv = Signal()
566
567 r0_valid = Signal()
568 r0_stall = Signal()
569
570 use_forward1_next = Signal()
571 use_forward2_next = Signal()
572
573 # -- Cache RAM interface
574 # type cache_ram_out_t is array(way_t) of cache_row_t;
575 # signal cache_out : cache_ram_out_t;
576 # Cache RAM interface
577 def CacheRamOut():
578 return Array(Signal(CACHE_ROW) for x in range(NUM_WAYS))
579
580 cache_out = CacheRamOut()
581
582 # -- PLRU output interface
583 # type plru_out_t is array(index_t) of
584 # std_ulogic_vector(WAY_BITS-1 downto 0);
585 # signal plru_victim : plru_out_t;
586 # signal replace_way : way_t;
587 # PLRU output interface
588 def PLRUOut():
589 return Array(Signal(WAY_BITS) for x in range(Index()))
590
591 plru_victim = PLRUOut()
592 replace_way = Signal(WAY_BITS)
593
594 # -- Wishbone read/write/cache write formatting signals
595 # signal bus_sel : std_ulogic_vector(7 downto 0);
596 # Wishbone read/write/cache write formatting signals
597 bus_sel = Signal(8)
598
599 # -- TLB signals
600 # signal tlb_tag_way : tlb_way_tags_t;
601 # signal tlb_pte_way : tlb_way_ptes_t;
602 # signal tlb_valid_way : tlb_way_valids_t;
603 # signal tlb_req_index : tlb_index_t;
604 # signal tlb_hit : std_ulogic;
605 # signal tlb_hit_way : tlb_way_t;
606 # signal pte : tlb_pte_t;
607 # signal ra : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
608 # signal valid_ra : std_ulogic;
609 # signal perm_attr : perm_attr_t;
610 # signal rc_ok : std_ulogic;
611 # signal perm_ok : std_ulogic;
612 # signal access_ok : std_ulogic;
613 # TLB signals
614 tlb_tag_way = Signal(TLB_WAY_TAGS)
615 tlb_pte_way = Signal(TLB_WAY_PTES)
616 tlb_valid_way = Signal(TLB_WAY_VALID_BITS)
617 tlb_req_index = Signal(TLB_SET_SIZE)
618 tlb_hit = Signal()
619 tlb_hit_way = Signal(TLB_WAY)
620 pte = Signal(TLB_PTE)
621 ra = Signal(REAL_ADDR_BITS)
622 valid_ra = Signal()
623 perm_attr = PermAttr()
624 rc_ok = Signal()
625 perm_ok = Signal()
626 access_ok = Signal()
627
628 # -- TLB PLRU output interface
629 # type tlb_plru_out_t is array(tlb_index_t) of
630 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
631 # signal tlb_plru_victim : tlb_plru_out_t;
632 # TLB PLRU output interface
633 DEF TLBPLRUOut():
634 return Array(Signal(TLB_WAY_BITS) for x in range(TLB_SET_SIZE))
635
636 tlb_plru_victim = TLBPLRUOut()
637
638 # -- Helper functions to decode incoming requests
639 #
640 # -- Return the cache line index (tag index) for an address
641 # function get_index(addr: std_ulogic_vector) return index_t is
642 # begin
643 # return to_integer(
644 # unsigned(addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS))
645 # );
646 # end;
647 # Helper functions to decode incoming requests
648 #
649 # Return the cache line index (tag index) for an address
650 def get_index(addr):
651 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
652
653 # -- Return the cache row index (data memory) for an address
654 # function get_row(addr: std_ulogic_vector) return row_t is
655 # begin
656 # return to_integer(
657 # unsigned(addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS))
658 # );
659 # end;
660 # Return the cache row index (data memory) for an address
661 def get_row(addr):
662 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
663
664 # -- Return the index of a row within a line
665 # function get_row_of_line(row: row_t) return row_in_line_t is
666 # variable row_v : unsigned(ROW_BITS-1 downto 0);
667 # begin
668 # row_v := to_unsigned(row, ROW_BITS);
669 # return row_v(ROW_LINEBITS-1 downto 0);
670 # end;
671 # Return the index of a row within a line
672 def get_row_of_line(row):
673 row_v = Signal(ROW_BITS)
674 row_v = Signal(row)
675 return row_v[0:ROW_LINE_BITS]
676
677 # -- Returns whether this is the last row of a line
678 # function is_last_row_addr(addr: wishbone_addr_type;
679 # last: row_in_line_t) return boolean is
680 # begin
681 # return
682 # unsigned(addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS)) = last;
683 # end;
684 # Returns whether this is the last row of a line
685 def is_last_row_addr(addr, last):
686 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
687
688 # -- Returns whether this is the last row of a line
689 # function is_last_row(row: row_t; last: row_in_line_t)
690 # return boolean is
691 # begin
692 # return get_row_of_line(row) = last;
693 # end;
694 # Returns whether this is the last row of a line
695 def is_last_row(row, last):
696 return get_row_of_line(row) == last
697
698 # -- Return the address of the next row in the current cache line
699 # function next_row_addr(addr: wishbone_addr_type)
700 # return std_ulogic_vector is
701 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
702 # variable result : wishbone_addr_type;
703 # begin
704 # -- Is there no simpler way in VHDL to
705 # -- generate that 3 bits adder ?
706 # row_idx := addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS);
707 # row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
708 # result := addr;
709 # result(LINE_OFF_BITS-1 downto ROW_OFF_BITS) := row_idx;
710 # return result;
711 # end;
712 # Return the address of the next row in the current cache line
713 def next_row_addr(addr):
714 row_idx = Signal(ROW_LINE_BITS)
715 result = WBAddrType()
716 # Is there no simpler way in VHDL to
717 # generate that 3 bits adder ?
718 row_idx = addr[ROW_OFF_BITS:LINE_OFF_BITS]
719 row_idx = Signal(row_idx + 1)
720 result = addr
721 result[ROW_OFF_BITS:LINE_OFF_BITS] = row_idx
722 return result
723
724 # -- Return the next row in the current cache line. We use a
725 # -- dedicated function in order to limit the size of the
726 # -- generated adder to be only the bits within a cache line
727 # -- (3 bits with default settings)
728 # function next_row(row: row_t) return row_t is
729 # variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
730 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
731 # variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
732 # begin
733 # row_v := std_ulogic_vector(to_unsigned(row, ROW_BITS));
734 # row_idx := row_v(ROW_LINEBITS-1 downto 0);
735 # row_v(ROW_LINEBITS-1 downto 0) :=
736 # std_ulogic_vector(unsigned(row_idx) + 1);
737 # return to_integer(unsigned(row_v));
738 # end;
739 # Return the next row in the current cache line. We use a
740 # dedicated function in order to limit the size of the
741 # generated adder to be only the bits within a cache line
742 # (3 bits with default settings)
743 def next_row(row)
744 row_v = Signal(ROW_BITS)
745 row_idx = Signal(ROW_LINE_BITS)
746 result = Signal(ROW_BITS)
747
748 row_v = Signal(row)
749 row_idx = row_v[ROW_LINE_BITS]
750 row_v[0:ROW_LINE_BITS] = Signal(row_idx + 1)
751 return row_v
752
753 # -- Get the tag value from the address
754 # function get_tag(addr: std_ulogic_vector) return cache_tag_t is
755 # begin
756 # return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
757 # end;
758 # Get the tag value from the address
759 def get_tag(addr):
760 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
761
762 # -- Read a tag from a tag memory row
763 # function read_tag(way: way_t; tagset: cache_tags_set_t)
764 # return cache_tag_t is
765 # begin
766 # return tagset(way * TAG_WIDTH + TAG_BITS
767 # - 1 downto way * TAG_WIDTH);
768 # end;
769 # Read a tag from a tag memory row
770 def read_tag(way, tagset):
771 return tagset[way *TAG_WIDTH:way * TAG_WIDTH + TAG_BITS]
772
773 # -- Read a TLB tag from a TLB tag memory row
774 # function read_tlb_tag(way: tlb_way_t; tags: tlb_way_tags_t)
775 # return tlb_tag_t is
776 # variable j : integer;
777 # begin
778 # j := way * TLB_EA_TAG_BITS;
779 # return tags(j + TLB_EA_TAG_BITS - 1 downto j);
780 # end;
781 # Read a TLB tag from a TLB tag memory row
782 def read_tlb_tag(way, tags):
783 j = Signal()
784
785 j = way * TLB_EA_TAG_BITS
786 return tags[j:j + TLB_EA_TAG_BITS]
787
788 # -- Write a TLB tag to a TLB tag memory row
789 # procedure write_tlb_tag(way: tlb_way_t; tags: inout tlb_way_tags_t;
790 # tag: tlb_tag_t) is
791 # variable j : integer;
792 # begin
793 # j := way * TLB_EA_TAG_BITS;
794 # tags(j + TLB_EA_TAG_BITS - 1 downto j) := tag;
795 # end;
796 # Write a TLB tag to a TLB tag memory row
797 def write_tlb_tag(way, tags), tag):
798 j = Signal()
799
800 j = way * TLB_EA_TAG_BITS
801 tags[j:j + TLB_EA_TAG_BITS] = tag
802
803 # -- Read a PTE from a TLB PTE memory row
804 # function read_tlb_pte(way: tlb_way_t; ptes: tlb_way_ptes_t)
805 # return tlb_pte_t is
806 # variable j : integer;
807 # begin
808 # j := way * TLB_PTE_BITS;
809 # return ptes(j + TLB_PTE_BITS - 1 downto j);
810 # end;
811 # Read a PTE from a TLB PTE memory row
812 def read_tlb_pte(way, ptes):
813 j = Signal()
814
815 j = way * TLB_PTE_BITS
816 return ptes[j:j + TLB_PTE_BITS]
817
818 # procedure write_tlb_pte(way: tlb_way_t;
819 # ptes: inout tlb_way_ptes_t; newpte: tlb_pte_t) is
820 # variable j : integer;
821 # begin
822 # j := way * TLB_PTE_BITS;
823 # ptes(j + TLB_PTE_BITS - 1 downto j) := newpte;
824 # end;
825 def write_tlb_pte(way, ptes,newpte):
826 j = Signal()
827
828 j = way * TLB_PTE_BITS
829 return ptes[j:j + TLB_PTE_BITS] = newpte
830
831 # begin
832 #
833 """these, because they are constants, can actually be done *as*
834 python asserts:
835 assert LINE_SIZE % ROWSIZE == 0, "line size not ...."
836 """
837 # assert LINE_SIZE mod ROW_SIZE = 0
838 # report "LINE_SIZE not multiple of ROW_SIZE" severity FAILURE;
839 # assert ispow2(LINE_SIZE)
840 # report "LINE_SIZE not power of 2" severity FAILURE;
841 # assert ispow2(NUM_LINES)
842 # report "NUM_LINES not power of 2" severity FAILURE;
843 # assert ispow2(ROW_PER_LINE)
844 # report "ROW_PER_LINE not power of 2" severity FAILURE;
845 # assert (ROW_BITS = INDEX_BITS + ROW_LINEBITS)
846 # report "geometry bits don't add up" severity FAILURE;
847 # assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
848 # report "geometry bits don't add up" severity FAILURE;
849 # assert (REAL_ADDR_BITS = TAG_BITS + INDEX_BITS + LINE_OFF_BITS)
850 # report "geometry bits don't add up" severity FAILURE;
851 # assert (REAL_ADDR_BITS = TAG_BITS + ROW_BITS + ROW_OFF_BITS)
852 # report "geometry bits don't add up" severity FAILURE;
853 # assert (64 = wishbone_data_bits)
854 # report "Can't yet handle a wishbone width that isn't 64-bits"
855 # severity FAILURE;
856 # assert SET_SIZE_BITS <= TLB_LG_PGSZ
857 # report "Set indexed by virtual address" severity FAILURE;
858 assert (LINE_SIZE % ROW_SIZE) == 0 "LINE_SIZE not " \
859 "multiple of ROW_SIZE"
860
861 assert (LINE_SIZE % 2) == 0 "LINE_SIZE not power of 2"
862
863 assert (NUM_LINES % 2) == 0 "NUM_LINES not power of 2"
864
865 assert (ROW_PER_LINE % 2) == 0 "ROW_PER_LINE not" \
866 "power of 2"
867
868 assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS) \
869 "geometry bits don't add up"
870
871 assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS) \
872 "geometry bits don't add up"
873
874 assert REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS \
875 + LINE_OFF_BITS) "geometry bits don't add up"
876
877 assert REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS) \
878 "geometry bits don't add up"
879
880 assert 64 == wishbone_data_bits "Can't yet handle a" \
881 "wishbone width that isn't 64-bits"
882
883 assert SET_SIZE_BITS <= TLB_LG_PGSZ "Set indexed by" \
884 "virtual address"
885
886 # -- Latch the request in r0.req as long as we're not stalling
887 # stage_0 : process(clk)
888 # Latch the request in r0.req as long as we're not stalling
889 class Stage0(Elaboratable):
890 def __init__(self):
891 pass
892
893 def elaborate(self, platform):
894 m = Module()
895
896 comb = m.d.comb
897 sync = m.d.sync
898
899 # variable r : reg_stage_0_t;
900 r = RegStage0()
901 comb += r
902
903 # begin
904 # if rising_edge(clk) then
905 # assert (d_in.valid and m_in.valid) = '0'
906 # report "request collision loadstore vs MMU";
907 assert ~(d_in.valid & m_in.valid) "request collision
908 loadstore vs MMU"
909
910 # if m_in.valid = '1' then
911 with m.If(m_in.valid):
912 # r.req.valid := '1';
913 # r.req.load := not (m_in.tlbie or m_in.tlbld);
914 # r.req.dcbz := '0';
915 # r.req.nc := '0';
916 # r.req.reserve := '0';
917 # r.req.virt_mode := '0';
918 # r.req.priv_mode := '1';
919 # r.req.addr := m_in.addr;
920 # r.req.data := m_in.pte;
921 # r.req.byte_sel := (others => '1');
922 # r.tlbie := m_in.tlbie;
923 # r.doall := m_in.doall;
924 # r.tlbld := m_in.tlbld;
925 # r.mmu_req := '1';
926 sync += r.req.valid.eq(1)
927 sync += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))
928 sync += r.req.priv_mode.eq(1)
929 sync += r.req.addr.eq(m_in.addr)
930 sync += r.req.data.eq(m_in.pte)
931 sync += r.req.byte_sel.eq(1)
932 sync += r.tlbie.eq(m_in.tlbie)
933 sync += r.doall.eq(m_in.doall)
934 sync += r.tlbld.eq(m_in.tlbld)
935 sync += r.mmu_req.eq(1)
936 # else
937 with m.Else():
938 # r.req := d_in;
939 # r.tlbie := '0';
940 # r.doall := '0';
941 # r.tlbld := '0';
942 # r.mmu_req := '0';
943 sync += r.req.eq(d_in)
944 # end if;
945 # if rst = '1' then
946 # r0_full <= '0';
947 # elsif r1.full = '0' or r0_full = '0' then
948 with m.If(~r1.full | ~r0_full):
949 # r0 <= r;
950 # r0_full <= r.req.valid;
951 sync += r0.eq(r)
952 sync += r0_full.eq(r.req.valid)
953 # end if;
954 # end if;
955 # end process;
956 #
957 # -- we don't yet handle collisions between loadstore1 requests
958 # -- and MMU requests
959 # m_out.stall <= '0';
960 # we don't yet handle collisions between loadstore1 requests
961 # and MMU requests
962 comb += m_out.stall.eq(0)
963
964 # -- Hold off the request in r0 when r1 has an uncompleted request
965 # r0_stall <= r0_full and r1.full;
966 # r0_valid <= r0_full and not r1.full;
967 # stall_out <= r0_stall;
968 # Hold off the request in r0 when r1 has an uncompleted request
969 comb += r0_stall.eq(r0_full & r1.full)
970 comb += r0_valid.eq(r0_full & ~r1.full)
971 comb += stall_out.eq(r0_stall)
972
973 # -- TLB
974 # -- Operates in the second cycle on the request latched in r0.req.
975 # -- TLB updates write the entry at the end of the second cycle.
976 # tlb_read : process(clk)
977 # TLB
978 # Operates in the second cycle on the request latched in r0.req.
979 # TLB updates write the entry at the end of the second cycle.
980 class TLBRead(Elaboratable):
981 def __init__(self):
982 pass
983
984 def elaborate(self, platform):
985 m = Module()
986
987 comb = m.d.comb
988 sync = m.d.sync
989
990 # variable index : tlb_index_t;
991 # variable addrbits :
992 # std_ulogic_vector(TLB_SET_BITS - 1 downto 0);
993 index = TLB_SET_SIZE
994 addrbits = Signal(TLB_SET_BITS)
995
996 comb += index
997 comb += addrbits
998
999 # begin
1000 # if rising_edge(clk) then
1001 # if m_in.valid = '1' then
1002 with m.If(m_in.valid):
1003 # addrbits := m_in.addr(TLB_LG_PGSZ + TLB_SET_BITS
1004 # - 1 downto TLB_LG_PGSZ);
1005 sync += addrbits.eq(m_in.addr[
1006 TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_SET_BITS
1007 ])
1008 # else
1009 with m.Else():
1010 # addrbits := d_in.addr(TLB_LG_PGSZ + TLB_SET_BITS
1011 # - 1 downto TLB_LG_PGSZ);
1012 sync += addrbits.eq(d_in.addr[
1013 TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_SET_BITS
1014 ])
1015 # end if;
1016
1017 # index := to_integer(unsigned(addrbits));
1018 sync += index.eq(addrbits)
1019 # -- If we have any op and the previous op isn't finished,
1020 # -- then keep the same output for next cycle.
1021 # if r0_stall = '0' then
1022 # If we have any op and the previous op isn't finished,
1023 # then keep the same output for next cycle.
1024 with m.If(~r0_stall):
1025 sync += tlb_valid_way.eq(dtlb_valids[index])
1026 sync += tlb_tag_way.eq(dtlb_tags[index])
1027 sync += tlb_pte_way.eq(dtlb_ptes[index])
1028 # end if;
1029 # end if;
1030 # end process;
1031
1032 # -- Generate TLB PLRUs
1033 # maybe_tlb_plrus: if TLB_NUM_WAYS > 1 generate
1034 # Generate TLB PLRUs
1035 class MaybeTLBPLRUs(Elaboratable):
1036 def __init__(self):
1037 pass
1038
1039 def elaborate(self, platform):
1040 m = Module()
1041
1042 comb = m.d.comb
1043 sync = m.d.sync
1044
1045 with m.If(TLB_NUM_WAYS > 1):
1046 # begin
1047 # TODO understand how to conver generate statements
1048 # tlb_plrus: for i in 0 to TLB_SET_SIZE - 1 generate
1049 # -- TLB PLRU interface
1050 # signal tlb_plru_acc :
1051 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
1052 # signal tlb_plru_acc_en : std_ulogic;
1053 # signal tlb_plru_out :
1054 # std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
1055 # begin
1056 # tlb_plru : entity work.plru
1057 # generic map (
1058 # BITS => TLB_WAY_BITS
1059 # )
1060 # port map (
1061 # clk => clk,
1062 # rst => rst,
1063 # acc => tlb_plru_acc,
1064 # acc_en => tlb_plru_acc_en,
1065 # lru => tlb_plru_out
1066 # );
1067 #
1068 # process(all)
1069 # begin
1070 # -- PLRU interface
1071 # if r1.tlb_hit_index = i then
1072 # tlb_plru_acc_en <= r1.tlb_hit;
1073 # else
1074 # tlb_plru_acc_en <= '0';
1075 # end if;
1076 # tlb_plru_acc <=
1077 # std_ulogic_vector(to_unsigned(
1078 # r1.tlb_hit_way, TLB_WAY_BITS
1079 # ));
1080 # tlb_plru_victim(i) <= tlb_plru_out;
1081 # end process;
1082 # end generate;
1083 # end generate;
1084 # end TODO
1085 #
1086 # tlb_search : process(all)
1087 class TLBSearch(Elaboratable):
1088 def __init__(self):
1089 pass
1090
1091 def elborate(self, platform):
1092 m = Module()
1093
1094 comb = m.d.comb
1095 sync = m.d.sync
1096
1097 # variable hitway : tlb_way_t;
1098 # variable hit : std_ulogic;
1099 # variable eatag : tlb_tag_t;
1100 hitway = TLBWay()
1101 hit = Signal()
1102 eatag = TLBTag()
1103
1104 comb += hitway
1105 comb += hit
1106 comb += eatag
1107
1108 # begin
1109 # tlb_req_index <=
1110 # to_integer(unsigned(r0.req.addr(
1111 # TLB_LG_PGSZ + TLB_SET_BITS - 1 downto TLB_LG_PGSZ
1112 # )));
1113 # hitway := 0;
1114 # hit := '0';
1115 # eatag := r0.req.addr(63 downto TLB_LG_PGSZ + TLB_SET_BITS);
1116 # for i in tlb_way_t loop
1117 # if tlb_valid_way(i) = '1' and
1118 # read_tlb_tag(i, tlb_tag_way) = eatag then
1119 # hitway := i;
1120 # hit := '1';
1121 # end if;
1122 # end loop;
1123 # tlb_hit <= hit and r0_valid;
1124 # tlb_hit_way <= hitway;
1125 comb += tlb_req_index.eq(r0.req.addr[
1126 TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_SET_BITS
1127 ])
1128
1129 comb += eatag.eq(r0.req.addr[
1130 TLB_LG_PGSZ + TLB_SET_BITS:64
1131 ])
1132
1133 for i in TLBWay():
1134 with m.If(tlb_valid_way(i)
1135 & read_tlb_tag(i, tlb_tag_way) == eatag):
1136
1137 comb += hitway.eq(i)
1138 comb += hit.eq(1)
1139
1140 comb += tlb_hit.eq(hit & r0_valid)
1141 comb += tlb_hit_way.eq(hitway)
1142
1143 # if tlb_hit = '1' then
1144 with m.If(tlb_hit):
1145 # pte <= read_tlb_pte(hitway, tlb_pte_way);
1146 comb += pte.eq(read_tlb_pte(hitway, tlb_pte_way))
1147 # else
1148 with m.Else():
1149 # pte <= (others => '0');
1150 comb += pte.eq(0)
1151 # end if;
1152 # valid_ra <= tlb_hit or not r0.req.virt_mode;
1153 comb += valid_ra.eq(tlb_hit | ~r0.req.virt_mode)
1154 # if r0.req.virt_mode = '1' then
1155 with m.If(r0.req.virt_mode):
1156 # ra <= pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
1157 # r0.req.addr(TLB_LG_PGSZ - 1 downto ROW_OFF_BITS) &
1158 # (ROW_OFF_BITS-1 downto 0 => '0');
1159 # perm_attr <= extract_perm_attr(pte);
1160 comb += ra.eq(Cat(
1161 Const(ROW_OFF_BITS, ROW_OFF_BITS),
1162 r0.req.addr[ROW_OFF_BITS:TLB_LG_PGSZ],
1163 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]
1164 ))
1165 comb += perm_attr.eq(extract_perm_attr(pte))
1166 # else
1167 with m.Else():
1168 # ra <= r0.req.addr(
1169 # REAL_ADDR_BITS - 1 downto ROW_OFF_BITS
1170 # ) & (ROW_OFF_BITS-1 downto 0 => '0');
1171 comb += ra.eq(Cat(
1172 Const(ROW_OFF_BITS, ROW_OFF_BITS),
1173 r0.rq.addr[ROW_OFF_BITS:REAL_ADDR_BITS]
1174 )
1175
1176 # perm_attr <= real_mode_perm_attr;
1177 comb += perm_attr.reference.eq(1)
1178 comb += perm_attr.changed.eq(1)
1179 comb += perm_attr.priv.eq(1)
1180 comb += perm_attr.nocache.eq(0)
1181 comb += perm_attr.rd_perm.eq(1)
1182 comb += perm_attr.wr_perm.eq(1)
1183 # end if;
1184 # end process;
1185
1186 # tlb_update : process(clk)
1187 class TLBUpdate(Elaboratable):
1188 def __init__(self):
1189 pass
1190
1191 def elaborate(self, platform):
1192 m = Module()
1193
1194 comb = m.d.comb
1195 sync = m.d.sync
1196
1197 # variable tlbie : std_ulogic;
1198 # variable tlbwe : std_ulogic;
1199 # variable repl_way : tlb_way_t;
1200 # variable eatag : tlb_tag_t;
1201 # variable tagset : tlb_way_tags_t;
1202 # variable pteset : tlb_way_ptes_t;
1203 tlbie = Signal()
1204 tlbwe = Signal()
1205 repl_way = TLBWay()
1206 eatag = TLBTag()
1207 tagset = TLBWayTags()
1208 pteset = TLBWayPtes()
1209
1210 comb += tlbie
1211 comb += tlbwe
1212 comb += repl_way
1213 comb += eatag
1214 comb += tagset
1215 comb += pteset
1216
1217 # begin
1218 # if rising_edge(clk) then
1219 # tlbie := r0_valid and r0.tlbie;
1220 # tlbwe := r0_valid and r0.tlbldoi;
1221 sync += tlbie.eq(r0_valid & r0.tlbie)
1222 sync += tlbwe.eq(r0_valid & r0.tlbldoi)
1223
1224 # if rst = '1' or (tlbie = '1' and r0.doall = '1') then
1225 # with m.If (TODO understand how signal resets work in nmigen)
1226 # -- clear all valid bits at once
1227 # for i in tlb_index_t loop
1228 # dtlb_valids(i) <= (others => '0');
1229 # end loop;
1230 # clear all valid bits at once
1231 for i in range(TLB_SET_SIZE):
1232 sync += dtlb_valids[i].eq(0)
1233 # elsif tlbie = '1' then
1234 with m.Elif(tlbie):
1235 # if tlb_hit = '1' then
1236 with m.If(tlb_hit):
1237 # dtlb_valids(tlb_req_index)(tlb_hit_way) <= '0';
1238 sync += dtlb_valids[tlb_req_index][tlb_hit_way].eq(0)
1239 # end if;
1240 # elsif tlbwe = '1' then
1241 with m.Elif(tlbwe):
1242 # if tlb_hit = '1' then
1243 with m.If(tlb_hit):
1244 # repl_way := tlb_hit_way;
1245 sync += repl_way.eq(tlb_hit_way)
1246 # else
1247 with m.Else():
1248 # repl_way := to_integer(unsigned(
1249 # tlb_plru_victim(tlb_req_index)));
1250 sync += repl_way.eq(tlb_plru_victim[tlb_req_index])
1251 # end if;
1252 # eatag := r0.req.addr(
1253 # 63 downto TLB_LG_PGSZ + TLB_SET_BITS
1254 # );
1255 # tagset := tlb_tag_way;
1256 # write_tlb_tag(repl_way, tagset, eatag);
1257 # dtlb_tags(tlb_req_index) <= tagset;
1258 # pteset := tlb_pte_way;
1259 # write_tlb_pte(repl_way, pteset, r0.req.data);
1260 # dtlb_ptes(tlb_req_index) <= pteset;
1261 # dtlb_valids(tlb_req_index)(repl_way) <= '1';
1262 sync += eatag.eq(r0.req.addr[TLB_LG_PGSZ + TLB_SET_BITS:64])
1263 sync += tagset.eq(tlb_tag_way)
1264 sync += write_tlb_tag(repl_way, tagset, eatag)
1265 sync += dtlb_tags[tlb_req_index].eq(tagset)
1266 sync += pteset.eq(tlb_pte_way)
1267 sync += write_tlb_pte(repl_way, pteset, r0.req.data)
1268 sync += dtlb_ptes[tlb_req_index].eq(pteset)
1269 sync += dtlb_valids[tlb_req_index][repl_way].eq(1)
1270 # end if;
1271 # end if;
1272 # end process;
1273
1274 # -- Generate PLRUs
1275 # maybe_plrus: if NUM_WAYS > 1 generate
1276 class MaybePLRUs(Elaboratable):
1277 def __init__(self):
1278 pass
1279
1280 def elaborate(self, platform):
1281 m = Module()
1282
1283 comb = m.d.comb
1284 sync = m.d.sync
1285
1286 # begin
1287 # TODO learn translation of generate into nmgien @lkcl
1288 # plrus: for i in 0 to NUM_LINES-1 generate
1289 # -- PLRU interface
1290 # signal plru_acc : std_ulogic_vector(WAY_BITS-1 downto 0);
1291 # signal plru_acc_en : std_ulogic;
1292 # signal plru_out : std_ulogic_vector(WAY_BITS-1 downto 0);
1293 #
1294 # begin
1295 # TODO learn tranlation of entity, generic map, port map in
1296 # nmigen @lkcl
1297 # plru : entity work.plru
1298 # generic map (
1299 # BITS => WAY_BITS
1300 # )
1301 # port map (
1302 # clk => clk,
1303 # rst => rst,
1304 # acc => plru_acc,
1305 # acc_en => plru_acc_en,
1306 # lru => plru_out
1307 # );
1308 #
1309 # process(all)
1310 # begin
1311 # -- PLRU interface
1312 # if r1.hit_index = i then
1313 # PLRU interface
1314 with m.If(r1.hit_index == i):
1315 # plru_acc_en <= r1.cache_hit;
1316 comb += plru_acc_en.eq(r1.cache_hit)
1317 # else
1318 with m.Else():
1319 # plru_acc_en <= '0';
1320 comb += plru_acc_en.eq(0)
1321 # end if;
1322 # plru_acc <= std_ulogic_vector(to_unsigned(
1323 # r1.hit_way, WAY_BITS
1324 # ));
1325 # plru_victim(i) <= plru_out;
1326 comb += plru_acc.eq(r1.hit_way)
1327 comb += plru_victime[i].eq(plru_out)
1328 # end process;
1329 # end generate;
1330 # end generate;
1331
1332 # -- Cache tag RAM read port
1333 # cache_tag_read : process(clk)
1334 # Cache tag RAM read port
1335 class CacheTagRead(Elaboratable):
1336 def __init__(self):
1337 pass
1338
1339 def elaborate(self, platform):
1340 m = Module()
1341
1342 comb = m.d.comb
1343 sync = m.d.sync
1344
1345 # variable index : index_t;
1346 index = Signal(INDEX)
1347
1348 comb += index
1349
1350 # begin
1351 # if rising_edge(clk) then
1352 # if r0_stall = '1' then
1353 with m.If(r0_stall):
1354 # index := req_index;
1355 sync += index.eq(req_index)
1356
1357 # elsif m_in.valid = '1' then
1358 with m.Elif(m_in.valid):
1359 # index := get_index(m_in.addr);
1360 sync += index.eq(get_index(m_in.addr))
1361
1362 # else
1363 with m.Else():
1364 # index := get_index(d_in.addr);
1365 sync += index.eq(get_index(d_in.addr))
1366 # end if;
1367 # cache_tag_set <= cache_tags(index);
1368 sync += cache_tag_set.eq(cache_tags(index))
1369 # end if;
1370 # end process;
1371
1372 # -- Cache request parsing and hit detection
1373 # dcache_request : process(all)
1374 # Cache request parsing and hit detection
1375 class DcacheRequest(Elaboratable):
1376 def __init__(self):
1377 pass
1378
1379 def elaborate(self, platform):
1380 # variable is_hit : std_ulogic;
1381 # variable hit_way : way_t;
1382 # variable op : op_t;
1383 # variable opsel : std_ulogic_vector(2 downto 0);
1384 # variable go : std_ulogic;
1385 # variable nc : std_ulogic;
1386 # variable s_hit : std_ulogic;
1387 # variable s_tag : cache_tag_t;
1388 # variable s_pte : tlb_pte_t;
1389 # variable s_ra : std_ulogic_vector(
1390 # REAL_ADDR_BITS - 1 downto 0
1391 # );
1392 # variable hit_set : std_ulogic_vector(
1393 # TLB_NUM_WAYS - 1 downto 0
1394 # );
1395 # variable hit_way_set : hit_way_set_t;
1396 # variable rel_matches : std_ulogic_vector(
1397 # TLB_NUM_WAYS - 1 downto 0
1398 # );
1399 rel_match = Signal()
1400 is_hit = Signal()
1401 hit_way = Signal(WAY_BITS)
1402 op = Op()
1403 opsel = Signal(3)
1404 go = Signal()
1405 nc = Signal()
1406 s_hit = Signal()
1407 s_tag = Signal(CACHE_TAG)
1408 s_pte = Signal(TLB_PTE)
1409 s_ra = Signal(REAL_ADDR_BITS)
1410 hit_set = Signal(TLB_NUM_WAYS)
1411 hit_way_set = HitWaySet()
1412 rel_matches = Signal(TLB_NUM_WAYS)
1413 rel_match = Signal()
1414
1415 comb += rel_match
1416 comb += is_hit
1417 comb += hit_way
1418 comb += op
1419 comb += opsel
1420 comb += go
1421 comb += nc
1422 comb += s_hit
1423 comb += s_tag
1424 comb += s_pte
1425 comb += s_ra
1426 comb += hit_set
1427 comb += hit_way_set
1428 comb += rel_matches
1429 comb += rel_match
1430
1431 # begin
1432 # -- Extract line, row and tag from request
1433 # req_index <= get_index(r0.req.addr);
1434 # req_row <= get_row(r0.req.addr);
1435 # req_tag <= get_tag(ra);
1436 #
1437 # go := r0_valid and not (r0.tlbie or r0.tlbld)
1438 # and not r1.ls_error;
1439 # Extract line, row and tag from request
1440 comb += req_index.eq(get_index(r0.req.addr))
1441 comb += req_row.eq(get_row(r0.req.addr))
1442 comb += req_tag.eq(get_tag(ra))
1443
1444 comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
1445
1446 # -- Test if pending request is a hit on any way
1447 # -- In order to make timing in virtual mode,
1448 # -- when we are using the TLB, we compare each
1449 # --way with each of the real addresses from each way of
1450 # -- the TLB, and then decide later which match to use.
1451 # hit_way := 0;
1452 # is_hit := '0';
1453 # rel_match := '0';
1454 # Test if pending request is a hit on any way
1455 # In order to make timing in virtual mode,
1456 # when we are using the TLB, we compare each
1457 # way with each of the real addresses from each way of
1458 # the TLB, and then decide later which match to use.
1459 comb += hit_way.eq(0)
1460 comb += is_hit.eq(0)
1461 comb += rel_match.eq(0)
1462
1463 # if r0.req.virt_mode = '1' then
1464 with m.If(r0.req.virt_mode):
1465 # rel_matches := (others => '0');
1466 comb += rel_matches.eq(0)
1467 # for j in tlb_way_t loop
1468 for j in range(TLB_WAY):
1469 # hit_way_set(j) := 0;
1470 # s_hit := '0';
1471 # s_pte := read_tlb_pte(j, tlb_pte_way);
1472 # s_ra := s_pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ)
1473 # & r0.req.addr(TLB_LG_PGSZ - 1 downto 0);
1474 # s_tag := get_tag(s_ra);
1475 comb += hit_way_set[j].eq(0)
1476 comb += s_hit.eq(0)
1477 comb += s_pte.eq(read_tlb_pte(j, tlb_pte_way))
1478 comb += s_ra.eq(Cat(
1479 r0.req.addr[0:TLB_LG_PGSZ],
1480 s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]
1481 ))
1482 comb += s_tag.eq(get_tag(s_ra))
1483
1484 # for i in way_t loop
1485 for i in range(NUM_WAYS):
1486 # if go = '1' and cache_valids(req_index)(i) = '1'
1487 # and read_tag(i, cache_tag_set) = s_tag
1488 # and tlb_valid_way(j) = '1' then
1489 with m.If(go & cache_valid_bits[req_index][i] &
1490 read_tag(i, cache_tag_set) == s_tag
1491 & tlb_valid_way[j]):
1492 # hit_way_set(j) := i;
1493 # s_hit := '1';
1494 comb += hit_way_set[j].eq(i)
1495 comb += s_hit.eq(1)
1496 # end if;
1497 # end loop;
1498 # hit_set(j) := s_hit;
1499 comb += hit_set[j].eq(s_hit)
1500 # if s_tag = r1.reload_tag then
1501 with m.If(s_tag == r1.reload_tag):
1502 # rel_matches(j) := '1';
1503 comb += rel_matches[j].eq(1)
1504 # end if;
1505 # end loop;
1506 # if tlb_hit = '1' then
1507 with m.If(tlb_hit):
1508 # is_hit := hit_set(tlb_hit_way);
1509 # hit_way := hit_way_set(tlb_hit_way);
1510 # rel_match := rel_matches(tlb_hit_way);
1511 comb += is_hit.eq(hit_set[tlb_hit_way])
1512 comb += hit_way.eq(hit_way_set[tlb_hit_way])
1513 comb += rel_match.eq(rel_matches[tlb_hit_way])
1514 # end if;
1515 # else
1516 with m.Else():
1517 # s_tag := get_tag(r0.req.addr);
1518 comb += s_tag.eq(get_tag(r0.req.addr))
1519 # for i in way_t loop
1520 for i in range(NUM_WAYS):
1521 # if go = '1' and cache_valids(req_index)(i) = '1' and
1522 # read_tag(i, cache_tag_set) = s_tag then
1523 with m.If(go & cache_valid_bits[req_index][i] &
1524 read_tag(i, cache_tag_set) == s_tag):
1525 # hit_way := i;
1526 # is_hit := '1';
1527 comb += hit_way.eq(i)
1528 comb += is_hit.eq(1)
1529 # end if;
1530 # end loop;
1531 # if s_tag = r1.reload_tag then
1532 with m.If(s_tag == r1.reload_tag):
1533 # rel_match := '1';
1534 comb += rel_match.eq(1)
1535 # end if;
1536 # end if;
1537 # req_same_tag <= rel_match;
1538 comb += req_same_tag.eq(rel_match)
1539
1540 # -- See if the request matches the line currently being reloaded
1541 # if r1.state = RELOAD_WAIT_ACK and req_index = r1.store_index
1542 # and rel_match = '1' then
1543 # See if the request matches the line currently being reloaded
1544 with m.If(r1.state == State.RELOAD_WAIT_ACK & req_index ==
1545 r1.store_index & rel_match):
1546 # -- For a store, consider this a hit even if the row isn't
1547 # -- valid since it will be by the time we perform the store.
1548 # -- For a load, check the appropriate row valid bit.
1549 # For a store, consider this a hit even if the row isn't
1550 # valid since it will be by the time we perform the store.
1551 # For a load, check the appropriate row valid bit.
1552 # is_hit :=
1553 # not r0.req.load or r1.rows_valid(req_row mod ROW_PER_LINE);
1554 # hit_way := replace_way;
1555 comb += is_hit.eq(~r0.req.load
1556 | r1.rows_valid[req_row % ROW_PER_LINE])
1557 comb += hit_way.eq(replace_way)
1558 # end if;
1559
1560 # -- Whether to use forwarded data for a load or not
1561 # Whether to use forwarded data for a load or not
1562 # use_forward1_next <= '0';
1563 comb += use_forward1_next.eq(0)
1564 # if get_row(r1.req.real_addr) = req_row
1565 # and r1.req.hit_way = hit_way then
1566 with m.If(get_row(r1.req.real_addr) == req_row
1567 & r1.req.hit_way == hit_way)
1568 # -- Only need to consider r1.write_bram here, since if we
1569 # -- are writing refill data here, then we don't have a
1570 # -- cache hit this cycle on the line being refilled.
1571 # -- (There is the possibility that the load following the
1572 # -- load miss that started the refill could be to the old
1573 # -- contents of the victim line, since it is a couple of
1574 # -- cycles after the refill starts before we see the updated
1575 # -- cache tag. In that case we don't use the bypass.)
1576 # Only need to consider r1.write_bram here, since if we
1577 # are writing refill data here, then we don't have a
1578 # cache hit this cycle on the line being refilled.
1579 # (There is the possibility that the load following the
1580 # load miss that started the refill could be to the old
1581 # contents of the victim line, since it is a couple of
1582 # cycles after the refill starts before we see the updated
1583 # cache tag. In that case we don't use the bypass.)
1584 # use_forward1_next <= r1.write_bram;
1585 comb += use_forward1_next.eq(r1.write_bram)
1586 # end if;
1587 # use_forward2_next <= '0';
1588 comb += use_forward2_next.eq(0)
1589 # if r1.forward_row1 = req_row and r1.forward_way1 = hit_way then
1590 with m.If(r1.forward_row1 == req_row & r1.forward_way1 == hit_way):
1591 # use_forward2_next <= r1.forward_valid1;
1592 comb += use_forward2_next.eq(r1.forward_valid1)
1593 # end if;
1594
1595 # -- The way that matched on a hit
1596 # The way that matched on a hit
1597 # req_hit_way <= hit_way;
1598 comb += req_hit_way.eq(hit_way)
1599
1600 # -- The way to replace on a miss
1601 # The way to replace on a miss
1602 # if r1.write_tag = '1' then
1603 with m.If(r1.write_tag):
1604 # replace_way <= to_integer(unsigned(
1605 # plru_victim(r1.store_index)
1606 # ));
1607 replace_way.eq(plru_victim[r1.store_index])
1608 # else
1609 with m.Else():
1610 # replace_way <= r1.store_way;
1611 comb += replace_way.eq(r1.store_way)
1612 # end if;
1613
1614 # -- work out whether we have permission for this access
1615 # -- NB we don't yet implement AMR, thus no KUAP
1616 # work out whether we have permission for this access
1617 # NB we don't yet implement AMR, thus no KUAP
1618 # rc_ok <= perm_attr.reference and
1619 # (r0.req.load or perm_attr.changed);
1620 # perm_ok <= (r0.req.priv_mode or not perm_attr.priv) and
1621 # (perm_attr.wr_perm or (r0.req.load
1622 # and perm_attr.rd_perm));
1623 # access_ok <= valid_ra and perm_ok and rc_ok;
1624 comb += rc_ok.eq(
1625 perm_attr.reference & (r0.req.load | perm_attr.changed)
1626 )
1627 comb += perm_ok.eq((r0.req.prive_mode | ~perm_attr.priv)
1628 & perm_attr.wr_perm
1629 | (r0.req.load & perm_attr.rd_perm)
1630 )
1631 comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
1632 # -- Combine the request and cache hit status to decide what
1633 # -- operation needs to be done
1634 # nc := r0.req.nc or perm_attr.nocache;
1635 # op := OP_NONE;
1636 # Combine the request and cache hit status to decide what
1637 # operation needs to be done
1638 comb += nc.eq(r0.req.nc | perm_attr.nocache)
1639 comb += op.eq(Op.OP_NONE)
1640 # if go = '1' then
1641 with m.If(go):
1642 # if access_ok = '0' then
1643 with m.If(~access_ok):
1644 # op := OP_BAD;
1645 comb += op.eq(Op.OP_BAD)
1646 # elsif cancel_store = '1' then
1647 with m.Elif(cancel_store):
1648 # op := OP_STCX_FAIL;
1649 comb += op.eq(Op.OP_STCX_FAIL)
1650 # else
1651 with m.Else():
1652 # opsel := r0.req.load & nc & is_hit;
1653 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
1654 # case opsel is
1655 with m.Switch(opsel):
1656 # when "101" => op := OP_LOAD_HIT;
1657 # when "100" => op := OP_LOAD_MISS;
1658 # when "110" => op := OP_LOAD_NC;
1659 # when "001" => op := OP_STORE_HIT;
1660 # when "000" => op := OP_STORE_MISS;
1661 # when "010" => op := OP_STORE_MISS;
1662 # when "011" => op := OP_BAD;
1663 # when "111" => op := OP_BAD;
1664 # when others => op := OP_NONE;
1665 with m.Case(Const(0b101, 3)):
1666 comb += op.eq(Op.OP_LOAD_HIT)
1667
1668 with m.Case(Cosnt(0b100, 3)):
1669 comb += op.eq(Op.OP_LOAD_MISS)
1670
1671 with m.Case(Const(0b110, 3)):
1672 comb += op.eq(Op.OP_LOAD_NC)
1673
1674 with m.Case(Const(0b001, 3)):
1675 comb += op.eq(Op.OP_STORE_HIT)
1676
1677 with m.Case(Const(0b000, 3)):
1678 comb += op.eq(Op.OP_STORE_MISS)
1679
1680 with m.Case(Const(0b010, 3)):
1681 comb += op.eq(Op.OP_STORE_MISS)
1682
1683 with m.Case(Const(0b011, 3)):
1684 comb += op.eq(Op.OP_BAD)
1685
1686 with m.Case(Const(0b111, 3)):
1687 comb += op.eq(Op.OP_BAD)
1688
1689 with m.Default():
1690 comb += op.eq(Op.OP_NONE)
1691 # end case;
1692 # end if;
1693 # end if;
1694 # req_op <= op;
1695 # req_go <= go;
1696 comb += req_op.eq(op)
1697 comb += req_go.eq(go)
1698
1699 # -- Version of the row number that is valid one cycle earlier
1700 # -- in the cases where we need to read the cache data BRAM.
1701 # -- If we're stalling then we need to keep reading the last
1702 # -- row requested.
1703 # Version of the row number that is valid one cycle earlier
1704 # in the cases where we need to read the cache data BRAM.
1705 # If we're stalling then we need to keep reading the last
1706 # row requested.
1707 # if r0_stall = '0' then
1708 with m.If(~r0_stall):
1709 # if m_in.valid = '1' then
1710 with m.If(m_in.valid):
1711 # early_req_row <= get_row(m_in.addr);
1712 comb += early_req_row.eq(get_row(m_in.addr))
1713 # else
1714 with m.Else():
1715 # early_req_row <= get_row(d_in.addr);
1716 comb += early_req_row.eq(get_row(d_in.addr))
1717 # end if;
1718 # else
1719 with m.Else():
1720 # early_req_row <= req_row;
1721 comb += early_req_row.eq(req_row)
1722 # end if;
1723 # end process;
1724
1725 # -- Wire up wishbone request latch out of stage 1
1726 # wishbone_out <= r1.wb;
1727 # Wire up wishbone request latch out of stage 1
1728 comb += wishbone_out.eq(r1.wb)
1729
1730 # -- Handle load-with-reservation and store-conditional instructions
1731 # reservation_comb: process(all)
1732 # Handle load-with-reservation and store-conditional instructions
1733 class ReservationComb(Elaboratable):
1734 def __init__(self):
1735 pass
1736
1737 def elaborate(self, platform):
1738 m = Module()
1739
1740 comb = m.d.comb
1741 sync = m.d.sync
1742
1743 # begin
1744 # cancel_store <= '0';
1745 # set_rsrv <= '0';
1746 # clear_rsrv <= '0';
1747 # if r0_valid = '1' and r0.req.reserve = '1' then
1748 with m.If(r0_valid & r0.req.reserve):
1749
1750 # -- XXX generate alignment interrupt if address
1751 # -- is not aligned XXX or if r0.req.nc = '1'
1752 # if r0.req.load = '1' then
1753 # XXX generate alignment interrupt if address
1754 # is not aligned XXX or if r0.req.nc = '1'
1755 with m.If(r0.req.load):
1756 # -- load with reservation
1757 # set_rsrv <= '1';
1758 # load with reservation
1759 comb += set_rsrv(1)
1760 # else
1761 with m.Else():
1762 # -- store conditional
1763 # clear_rsrv <= '1';
1764 # store conditional
1765 comb += clear_rsrv.eq(1)
1766 # if reservation.valid = '0' or r0.req.addr(63
1767 # downto LINE_OFF_BITS) /= reservation.addr then
1768 with m.If(~reservation.valid
1769 | r0.req.addr[LINE_OFF_BITS:64]):
1770 # cancel_store <= '1';
1771 comb += cancel_store.eq(1)
1772 # end if;
1773 # end if;
1774 # end if;
1775 # end process;
1776
1777 # reservation_reg: process(clk)
1778 class ReservationReg(Elaboratable):
1779 def __init__(self):
1780 pass
1781
1782 def elaborate(self, platform):
1783 m = Module()
1784
1785 comb = m.d.comb
1786 sync = m.d.sync
1787
1788 # begin
1789 # if rising_edge(clk) then
1790 # if rst = '1' then
1791 # reservation.valid <= '0';
1792 # TODO understand how resets work in nmigen
1793 # elsif r0_valid = '1' and access_ok = '1' then
1794 with m.Elif(r0_valid & access_ok)""
1795 # if clear_rsrv = '1' then
1796 with m.If(clear_rsrv):
1797 # reservation.valid <= '0';
1798 sync += reservation.valid.ea(0)
1799 # elsif set_rsrv = '1' then
1800 with m.Elif(set_rsrv):
1801 # reservation.valid <= '1';
1802 # reservation.addr <=
1803 # r0.req.addr(63 downto LINE_OFF_BITS);
1804 sync += reservation.valid.eq(1)
1805 sync += reservation.addr(r0.req.addr[LINE_OFF_BITS:64])
1806 # end if;
1807 # end if;
1808 # end if;
1809 # end process;
1810 #
1811 # -- Return data for loads & completion control logic
1812 # writeback_control: process(all)
1813 # Return data for loads & completion control logic
1814 class WriteBackControl(Elaboratable):
1815 def __init__(self):
1816 pass
1817
1818 def elaborate(self, platform):
1819 m = Module()
1820
1821 comb = m.d.comb
1822 sync = m.d.sync
1823
1824 # variable data_out : std_ulogic_vector(63 downto 0);
1825 # variable data_fwd : std_ulogic_vector(63 downto 0);
1826 # variable j : integer;
1827 data_out = Signal(64)
1828 data_fwd = Signal(64)
1829 j = Signal()
1830
1831 # begin
1832 # -- Use the bypass if are reading the row that was
1833 # -- written 1 or 2 cycles ago, including for the
1834 # -- slow_valid = 1 case (i.e. completing a load
1835 # -- miss or a non-cacheable load).
1836 # if r1.use_forward1 = '1' then
1837 # Use the bypass if are reading the row that was
1838 # written 1 or 2 cycles ago, including for the
1839 # slow_valid = 1 case (i.e. completing a load
1840 # miss or a non-cacheable load).
1841 with m.If(r1.use_forward1):
1842 # data_fwd := r1.forward_data1;
1843 comb += data_fwd.eq(r1.forward_data1)
1844 # else
1845 with m.Else():
1846 # data_fwd := r1.forward_data2;
1847 comb += data_fwd.eq(r1.forward_data2)
1848 # end if;
1849
1850 # data_out := cache_out(r1.hit_way);
1851 comb += data_out.eq(cache_out[r1.hit_way])
1852
1853 # for i in 0 to 7 loop
1854 for i in range(8):
1855 # j := i * 8;
1856 comb += i * 8
1857
1858 # if r1.forward_sel(i) = '1' then
1859 with m.If(r1.forward_sel[i]):
1860 # data_out(j + 7 downto j) := data_fwd(j + 7 downto j);
1861 comb += data_out[j:j+8].eq(data_fwd[j:j+8])
1862 # end if;
1863 # end loop;
1864
1865 # d_out.valid <= r1.ls_valid;
1866 # d_out.data <= data_out;
1867 # d_out.store_done <= not r1.stcx_fail;
1868 # d_out.error <= r1.ls_error;
1869 # d_out.cache_paradox <= r1.cache_paradox;
1870 comb += d_out.valid.eq(r1.ls_valid)
1871 comb += d_out.data.eq(data_out)
1872 comb += d_out.store_done.eq(~r1.stcx_fail)
1873 comb += d_out.error.eq(r1.ls_error)
1874 comb += d_out.cache_paradox.eq(r1.cache_paradox)
1875
1876 # -- Outputs to MMU
1877 # m_out.done <= r1.mmu_done;
1878 # m_out.err <= r1.mmu_error;
1879 # m_out.data <= data_out;
1880 comb += m_out.done.eq(r1.mmu_done)
1881 comb += m_out.err.eq(r1.mmu_error)
1882 comb += m_out.data.eq(data_out)
1883
1884 # -- We have a valid load or store hit or we just completed
1885 # -- a slow op such as a load miss, a NC load or a store
1886 # --
1887 # -- Note: the load hit is delayed by one cycle. However it
1888 # -- can still not collide with r.slow_valid (well unless I
1889 # -- miscalculated) because slow_valid can only be set on a
1890 # -- subsequent request and not on its first cycle (the state
1891 # -- machine must have advanced), which makes slow_valid
1892 # -- at least 2 cycles from the previous hit_load_valid.
1893 #
1894 # -- Sanity: Only one of these must be set in any given cycle
1895 # assert (r1.slow_valid and r1.stcx_fail) /= '1'
1896 # report "unexpected slow_valid collision with stcx_fail"
1897 # severity FAILURE;
1898 # assert ((r1.slow_valid or r1.stcx_fail) and r1.hit_load_valid)
1899 # /= '1' report "unexpected hit_load_delayed collision with
1900 # slow_valid" severity FAILURE;
1901 # We have a valid load or store hit or we just completed
1902 # a slow op such as a load miss, a NC load or a store
1903 #
1904 # Note: the load hit is delayed by one cycle. However it
1905 # can still not collide with r.slow_valid (well unless I
1906 # miscalculated) because slow_valid can only be set on a
1907 # subsequent request and not on its first cycle (the state
1908 # machine must have advanced), which makes slow_valid
1909 # at least 2 cycles from the previous hit_load_valid.
1910
1911 # Sanity: Only one of these must be set in any given cycle
1912 assert (r1.slow_valid & r1.stcx_fail) != 1 "unexpected" \
1913 "slow_valid collision with stcx_fail -!- severity FAILURE"
1914
1915 assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1
1916 "unexpected hit_load_delayed collision with slow_valid -!-" \
1917 "severity FAILURE"
1918
1919 # if r1.mmu_req = '0' then
1920 with m.If(~r1._mmu_req):
1921 # -- Request came from loadstore1...
1922 # -- Load hit case is the standard path
1923 # if r1.hit_load_valid = '1' then
1924 # Request came from loadstore1...
1925 # Load hit case is the standard path
1926 with m.If(r1.hit_load_valid):
1927 # report
1928 # "completing load hit data=" & to_hstring(data_out);
1929 print(f"completing load hit data={data_out}")
1930 # end if;
1931
1932 # -- error cases complete without stalling
1933 # if r1.ls_error = '1' then
1934 # error cases complete without stalling
1935 with m.If(r1.ls_error):
1936 # report "completing ld/st with error";
1937 print("completing ld/st with error")
1938 # end if;
1939
1940 # -- Slow ops (load miss, NC, stores)
1941 # if r1.slow_valid = '1' then
1942 # Slow ops (load miss, NC, stores)
1943 with m.If(r1.slow_valid):
1944 # report
1945 # "completing store or load miss data="
1946 # & to_hstring(data_out);
1947 print(f"completing store or load miss data={data_out}")
1948 # end if;
1949
1950 # else
1951 with m.Else():
1952 # -- Request came from MMU
1953 # if r1.hit_load_valid = '1' then
1954 # Request came from MMU
1955 with m.If(r1.hit_load_valid):
1956 # report "completing load hit to MMU, data="
1957 # & to_hstring(m_out.data);
1958 print(f"completing load hit to MMU, data={m_out.data}")
1959 # end if;
1960 #
1961 # -- error cases complete without stalling
1962 # if r1.mmu_error = '1' then
1963 # report "completing MMU ld with error";
1964 # error cases complete without stalling
1965 with m.If(r1.mmu_error):
1966 print("combpleting MMU ld with error")
1967 # end if;
1968 #
1969 # -- Slow ops (i.e. load miss)
1970 # if r1.slow_valid = '1' then
1971 # Slow ops (i.e. load miss)
1972 with m.If(r1.slow_valid):
1973 # report "completing MMU load miss, data="
1974 # & to_hstring(m_out.data);
1975 print("completing MMU load miss, data={m_out.data}")
1976 # end if;
1977 # end if;
1978 # end process;
1979
1980 # begin TODO
1981 # -- Generate a cache RAM for each way. This handles the normal
1982 # -- reads, writes from reloads and the special store-hit update
1983 # -- path as well.
1984 # --
1985 # -- Note: the BRAMs have an extra read buffer, meaning the output
1986 # -- is pipelined an extra cycle. This differs from the
1987 # -- icache. The writeback logic needs to take that into
1988 # -- account by using 1-cycle delayed signals for load hits.
1989 # --
1990 # rams: for i in 0 to NUM_WAYS-1 generate
1991 # signal do_read : std_ulogic;
1992 # signal rd_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1993 # signal do_write : std_ulogic;
1994 # signal wr_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1995 # signal wr_data :
1996 # std_ulogic_vector(wishbone_data_bits-1 downto 0);
1997 # signal wr_sel : std_ulogic_vector(ROW_SIZE-1 downto 0);
1998 # signal wr_sel_m : std_ulogic_vector(ROW_SIZE-1 downto 0);
1999 # signal dout : cache_row_t;
2000 # begin
2001 # way: entity work.cache_ram
2002 # generic map (
2003 # ROW_BITS => ROW_BITS,
2004 # WIDTH => wishbone_data_bits,
2005 # ADD_BUF => true
2006 # )
2007 # port map (
2008 # clk => clk,
2009 # rd_en => do_read,
2010 # rd_addr => rd_addr,
2011 # rd_data => dout,
2012 # wr_sel => wr_sel_m,
2013 # wr_addr => wr_addr,
2014 # wr_data => wr_data
2015 # );
2016 # process(all)
2017 # end TODO
2018 class TODO(Elaboratable):
2019 def __init__(self):
2020 pass
2021
2022 def elaborate(self, platform):
2023 m = Module()
2024
2025 comb = m.d.comb
2026 sync = m.d.sync
2027
2028 # begin
2029 # -- Cache hit reads
2030 # do_read <= '1';
2031 # rd_addr <=
2032 # std_ulogic_vector(to_unsigned(early_req_row, ROW_BITS));
2033 # cache_out(i) <= dout;
2034 # Cache hit reads
2035 comb += do_read.eq(1)
2036 comb += rd_addr.eq(Signal(ROW))
2037 comb += cache_out[i].eq(dout)
2038
2039 # -- Write mux:
2040 # --
2041 # -- Defaults to wishbone read responses (cache refill)
2042 # --
2043 # -- For timing, the mux on wr_data/sel/addr is not
2044 # -- dependent on anything other than the current state.
2045 # Write mux:
2046 #
2047 # Defaults to wishbone read responses (cache refill)
2048 #
2049 # For timing, the mux on wr_data/sel/addr is not
2050 # dependent on anything other than the current state.
2051 # wr_sel_m <= (others => '0');
2052 comb += wr_sel_m.eq(0)
2053
2054 # do_write <= '0';
2055 comb += do_write.eq(0)
2056 # if r1.write_bram = '1' then
2057 with m.If(r1.write_bram):
2058 # -- Write store data to BRAM. This happens one
2059 # -- cycle after the store is in r0.
2060 # Write store data to BRAM. This happens one
2061 # cycle after the store is in r0.
2062 # wr_data <= r1.req.data;
2063 # wr_sel <= r1.req.byte_sel;
2064 # wr_addr <= std_ulogic_vector(to_unsigned(
2065 # get_row(r1.req.real_addr), ROW_BITS
2066 # ));
2067 comb += wr_data.eq(r1.req.data)
2068 comb += wr_sel.eq(r1.req.byte_sel)
2069 comb += wr_addr.eq(Signal(get_row(r1.req.real_addr)))
2070
2071 # if i = r1.req.hit_way then
2072 with m.If(i == r1.req.hit_way):
2073 # do_write <= '1';
2074 comb += do_write.eq(1)
2075 # end if;
2076 # else
2077 with m.Else():
2078 # -- Otherwise, we might be doing a reload or a DCBZ
2079 # if r1.dcbz = '1' then
2080 # Otherwise, we might be doing a reload or a DCBZ
2081 with m.If(r1.dcbz):
2082 # wr_data <= (others => '0');
2083 comb += wr_data.eq(0)
2084 # else
2085 with m.Else():
2086 # wr_data <= wishbone_in.dat;
2087 comb += wr_data.eq(wishbone_in.dat)
2088 # end if;
2089
2090 # wr_addr <= std_ulogic_vector(to_unsigned(
2091 # r1.store_row, ROW_BITS
2092 # ));
2093 # wr_sel <= (others => '1');
2094 comb += wr_addr.eq(Signal(r1.store_row))
2095 comb += wr_sel.eq(1)
2096
2097 # if r1.state = RELOAD_WAIT_ACK and
2098 # wishbone_in.ack = '1' and replace_way = i then
2099 with m.If(r1.state == State.RELOAD_WAIT_ACK & wishbone_in.ack
2100 & relpace_way == i):
2101 # do_write <= '1';
2102 comb += do_write.eq(1)
2103 # end if;
2104 # end if;
2105
2106 # -- Mask write selects with do_write since BRAM
2107 # -- doesn't have a global write-enable
2108 # if do_write = '1' then
2109 # -- Mask write selects with do_write since BRAM
2110 # -- doesn't have a global write-enable
2111 with m.If(do_write):
2112 # wr_sel_m <= wr_sel;
2113 comb += wr_sel_m.eq(wr_sel)
2114 # end if;
2115 # end process;
2116 # end generate;
2117
2118 # -- Cache hit synchronous machine for the easy case.
2119 # -- This handles load hits.
2120 # -- It also handles error cases (TLB miss, cache paradox)
2121 # dcache_fast_hit : process(clk)
2122 # Cache hit synchronous machine for the easy case.
2123 # This handles load hits.
2124 # It also handles error cases (TLB miss, cache paradox)
2125 class DcacheFastHit(Elaboratable):
2126 def __init__(self):
2127 pass
2128
2129 def elaborate(self, platform):
2130 m = Module()
2131
2132 comb = m.d.comb
2133 sync = m.d.sync
2134
2135 # begin
2136 # if rising_edge(clk) then
2137 # if req_op /= OP_NONE then
2138 with m.If(req_op != Op.OP_NONE):
2139 # report "op:" & op_t'image(req_op) &
2140 # " addr:" & to_hstring(r0.req.addr) &
2141 # " nc:" & std_ulogic'image(r0.req.nc) &
2142 # " idx:" & integer'image(req_index) &
2143 # " tag:" & to_hstring(req_tag) &
2144 # " way: " & integer'image(req_hit_way);
2145 print(f"op:{req_op} addr:{r0.req.addr} nc: {r0.req.nc}" \
2146 f"idx:{req_index} tag:{req_tag} way: {req_hit_way}"
2147 )
2148 # end if;
2149 # if r0_valid = '1' then
2150 with m.If(r0_valid):
2151 # r1.mmu_req <= r0.mmu_req;
2152 sync += r1.mmu_req.eq(r0.mmu_req)
2153 # end if;
2154
2155 # -- Fast path for load/store hits.
2156 # -- Set signals for the writeback controls.
2157 # r1.hit_way <= req_hit_way;
2158 # r1.hit_index <= req_index;
2159 # Fast path for load/store hits.
2160 # Set signals for the writeback controls.
2161 sync += r1.hit_way.eq(req_hit_way)
2162 sync += r1.hit_index.eq(req_index)
2163
2164 # if req_op = OP_LOAD_HIT then
2165 with m.If(req_op == Op.OP_LOAD_HIT):
2166 # r1.hit_load_valid <= '1';
2167 sync += r1.hit_load_valid.eq(1)
2168
2169 # else
2170 with m.Else():
2171 # r1.hit_load_valid <= '0';
2172 sync += r1.hit_load_valid.eq(0)
2173 # end if;
2174
2175 # if req_op = OP_LOAD_HIT or req_op = OP_STORE_HIT then
2176 with m.If(req_op == Op.OP_LOAD_HIT | req_op == Op.OP_STORE_HIT):
2177 # r1.cache_hit <= '1';
2178 sync += r1.cache_hit.eq(1)
2179 # else
2180 with m.Else():
2181 # r1.cache_hit <= '0';
2182 sync += r1.cache_hit.eq(0)
2183 # end if;
2184
2185 # if req_op = OP_BAD then
2186 with m.If(req_op == Op.OP_BAD):
2187 # report "Signalling ld/st error valid_ra=" &
2188 # std_ulogic'image(valid_ra) & " rc_ok=" &
2189 # std_ulogic'image(rc_ok) & " perm_ok=" &
2190 # std_ulogic'image(perm_ok);
2191 print(f"Signalling ld/st error valid_ra={valid_ra}"
2192 f"rc_ok={rc_ok} perm_ok={perm_ok}"
2193
2194 # r1.ls_error <= not r0.mmu_req;
2195 # r1.mmu_error <= r0.mmu_req;
2196 # r1.cache_paradox <= access_ok;
2197 sync += r1.ls_error.eq(~r0.mmu_req)
2198 sync += r1.mmu_error.eq(r0.mmu_req)
2199 sync += r1.cache_paradox.eq(access_ok)
2200
2201 # else
2202 with m.Else():
2203 # r1.ls_error <= '0';
2204 # r1.mmu_error <= '0';
2205 # r1.cache_paradox <= '0';
2206 sync += r1.ls_error.eq(0)
2207 sync += r1.mmu_error.eq(0)
2208 sync += r1.cache_paradox.eq(0)
2209 # end if;
2210 #
2211 # if req_op = OP_STCX_FAIL then
2212 with m.If(req_op == Op.OP_STCX_FAIL):
2213 # r1.stcx_fail <= '1';
2214 r1.stcx_fail.eq(1)
2215
2216 # else
2217 with m.Else():
2218 # r1.stcx_fail <= '0';
2219 sync += r1.stcx_fail.eq(0)
2220 # end if;
2221 #
2222 # -- Record TLB hit information for updating TLB PLRU
2223 # r1.tlb_hit <= tlb_hit;
2224 # r1.tlb_hit_way <= tlb_hit_way;
2225 # r1.tlb_hit_index <= tlb_req_index;
2226 # Record TLB hit information for updating TLB PLRU
2227 sync += r1.tlb_hit.eq(tlb_hit)
2228 sync += r1.tlb_hit_way.eq(tlb_hit_way)
2229 sync += r1.tlb_hit_index.eq(tlb_req_index)
2230 # end if;
2231 # end process;
2232
2233 # -- Memory accesses are handled by this state machine:
2234 # --
2235 # -- * Cache load miss/reload (in conjunction with "rams")
2236 # -- * Load hits for non-cachable forms
2237 # -- * Stores (the collision case is handled in "rams")
2238 # --
2239 # -- All wishbone requests generation is done here.
2240 # -- This machine operates at stage 1.
2241 # dcache_slow : process(clk)
2242 # Memory accesses are handled by this state machine:
2243 #
2244 # * Cache load miss/reload (in conjunction with "rams")
2245 # * Load hits for non-cachable forms
2246 # * Stores (the collision case is handled in "rams")
2247 #
2248 # All wishbone requests generation is done here.
2249 # This machine operates at stage 1.
2250 class DcacheSlow(Elaboratable):
2251 def __init__(self):
2252 pass
2253
2254 def elaborate(self, platform):
2255 m = Module()
2256
2257 comb = m.d.comb
2258 sync = m.d.sync
2259
2260 # variable stbs_done : boolean;
2261 # variable req : mem_access_request_t;
2262 # variable acks : unsigned(2 downto 0);
2263 stbs_done = Signal()
2264 req = MemAccessRequest()
2265 acks = Signal(3)
2266
2267 comb += stbs_done
2268 comb += req
2269 comb += acks
2270
2271 # begin
2272 # if rising_edge(clk) then
2273 # r1.use_forward1 <= use_forward1_next;
2274 # r1.forward_sel <= (others => '0');
2275 sync += r1.use_forward1.eq(use_forward1_next)
2276 sync += r1.forward_sel.eq(0)
2277
2278 # if use_forward1_next = '1' then
2279 with m.If(use_forward1_next):
2280 # r1.forward_sel <= r1.req.byte_sel;
2281 sync += r1.forward_sel.eq(r1.req.byte_sel)
2282
2283 # elsif use_forward2_next = '1' then
2284 with m.Elif(use_forward2_next):
2285 # r1.forward_sel <= r1.forward_sel1;
2286 sync += r1.forward_sel.eq(r1.forward_sel1)
2287 # end if;
2288
2289 # r1.forward_data2 <= r1.forward_data1;
2290 sync += r1.forward_data2.eq(r1.forward_data1)
2291
2292 # if r1.write_bram = '1' then
2293 with m.If(r1.write_bram):
2294 # r1.forward_data1 <= r1.req.data;
2295 # r1.forward_sel1 <= r1.req.byte_sel;
2296 # r1.forward_way1 <= r1.req.hit_way;
2297 # r1.forward_row1 <= get_row(r1.req.real_addr);
2298 # r1.forward_valid1 <= '1';
2299 sync += r1.forward_data1.eq(r1.req.data)
2300 sync += r1.forward_sel1.eq(r1.req.byte_sel)
2301 sync += r1.forward_way1.eq(r1.req.hit_way)
2302 sync += r1.forward_row1.eq(get_row(r1.req.real_addr))
2303 sync += r1.forward_valid1.eq(1)
2304 # else
2305 with m.Else():
2306
2307 # if r1.dcbz = '1' then
2308 with m.If(r1.bcbz):
2309 # r1.forward_data1 <= (others => '0');
2310 sync += r1.forward_data1.eq(0)
2311
2312 # else
2313 with m.Else():
2314 # r1.forward_data1 <= wishbone_in.dat;
2315 sync += r1.forward_data1.eq(wb_in.dat)
2316 # end if;
2317
2318 # r1.forward_sel1 <= (others => '1');
2319 # r1.forward_way1 <= replace_way;
2320 # r1.forward_row1 <= r1.store_row;
2321 # r1.forward_valid1 <= '0';
2322 sync += r1.forward_sel1.eq(1)
2323 sync += r1.forward_way1.eq(replace_way)
2324 sync += r1.forward_row1.eq(r1.store_row)
2325 sync += r1.forward_valid1.eq(0)
2326 # end if;
2327
2328 # -- On reset, clear all valid bits to force misses
2329 # if rst = '1' then
2330 # On reset, clear all valid bits to force misses
2331 # TODO figure out how reset signal works in nmigeni
2332 with m.If("""TODO RST???"""):
2333 # for i in index_t loop
2334 for i in range(INDEX):
2335 # cache_valids(i) <= (others => '0');
2336 sync += cache_valid_bits[i].eq(0)
2337 # end loop;
2338
2339 # r1.state <= IDLE;
2340 # r1.full <= '0';
2341 # r1.slow_valid <= '0';
2342 # r1.wb.cyc <= '0';
2343 # r1.wb.stb <= '0';
2344 # r1.ls_valid <= '0';
2345 # r1.mmu_done <= '0';
2346 sync += r1.state.eq(State.IDLE)
2347 sync += r1.full.eq(0)
2348 sync += r1.slow_valid.eq(0)
2349 sync += r1.wb.cyc.eq(0)
2350 sync += r1.wb.stb.eq(0)
2351 sync += r1.ls_valid.eq(0)
2352 sync += r1.mmu_done.eq(0)
2353
2354 # -- Not useful normally but helps avoiding
2355 # -- tons of sim warnings
2356 # Not useful normally but helps avoiding
2357 # tons of sim warnings
2358 # r1.wb.adr <= (others => '0');
2359 sync += r1.wb.adr.eq(0)
2360 # else
2361 with m.Else():
2362 # -- One cycle pulses reset
2363 # r1.slow_valid <= '0';
2364 # r1.write_bram <= '0';
2365 # r1.inc_acks <= '0';
2366 # r1.dec_acks <= '0';
2367 #
2368 # r1.ls_valid <= '0';
2369 # -- complete tlbies and TLB loads in the third cycle
2370 # r1.mmu_done <= r0_valid and (r0.tlbie or r0.tlbld);
2371 # One cycle pulses reset
2372 sync += r1.slow_valid.eq(0)
2373 sync += r1.write_bram.eq(0)
2374 sync += r1.inc_acks.eq(0)
2375 sync += r1.dec_acks.eq(0)
2376
2377 sync += r1.ls_valid.eq(0)
2378 # complete tlbies and TLB loads in the third cycle
2379 sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
2380
2381 # if req_op = OP_LOAD_HIT or req_op = OP_STCX_FAIL then
2382 with m.If(req_op == Op.OP_LOAD_HIT | req_op == Op.OP_STCX_FAIL)
2383 # if r0.mmu_req = '0' then
2384 with m.If(~r0.mmu_req):
2385 # r1.ls_valid <= '1';
2386 sync += r1.ls_valid.eq(1)
2387 # else
2388 with m.Else():
2389 # r1.mmu_done <= '1';
2390 sync += r1.mmu_done.eq(1)
2391 # end if;
2392 # end if;
2393
2394 # if r1.write_tag = '1' then
2395 with m.If(r1.write_tag):
2396 # -- Store new tag in selected way
2397 # for i in 0 to NUM_WAYS-1 loop
2398 # Store new tag in selected way
2399 for i in range(NUM_WAYS):
2400 # if i = replace_way then
2401 with m.If(i == replace_way):
2402 # cache_tags(r1.store_index)(
2403 # (i + 1) * TAG_WIDTH - 1
2404 # downto i * TAG_WIDTH
2405 # ) <=
2406 # (TAG_WIDTH - 1 downto TAG_BITS => '0')
2407 # & r1.reload_tag;
2408 sync += cache_tag[
2409 r1.store_index
2410 ][i * TAG_WIDTH:(i +1) * TAG_WIDTH].eq(
2411 Const(TAG_WIDTH, TAG_WIDTH)
2412 & r1.reload_tag
2413 )
2414 # end if;
2415 # end loop;
2416 # r1.store_way <= replace_way;
2417 # r1.write_tag <= '0';
2418 sync += r1.store_way.eq(replace_way)
2419 sync += r1.write_tag.eq(0)
2420 # end if;
2421
2422 # -- Take request from r1.req if there is one there,
2423 # -- else from req_op, ra, etc.
2424 # if r1.full = '1' then
2425 # Take request from r1.req if there is one there,
2426 # else from req_op, ra, etc.
2427 with m.If(r1.full)
2428 # req := r1.req;
2429 sync += req.eq(r1.req)
2430
2431 # else
2432 with m.Else():
2433 # req.op := req_op;
2434 # req.valid := req_go;
2435 # req.mmu_req := r0.mmu_req;
2436 # req.dcbz := r0.req.dcbz;
2437 # req.real_addr := ra;
2438 sync += req.op.eq(req_op)
2439 sync += req.valid.eq(req_go)
2440 sync += req.mmu_req.eq(r0.mmu_req)
2441 sync += req.dcbz.eq(r0.req.dcbz)
2442 sync += req.real_addr.eq(ra)
2443
2444 # -- Force data to 0 for dcbz
2445 # if r0.req.dcbz = '0' then
2446 with m.If(~r0.req.dcbz):
2447 # req.data := r0.req.data;
2448 sync += req.data.eq(r0.req.data)
2449
2450 # else
2451 with m.Else():
2452 # req.data := (others => '0');
2453 sync += req.data.eq(0)
2454 # end if;
2455
2456 # -- Select all bytes for dcbz
2457 # -- and for cacheable loads
2458 # if r0.req.dcbz = '1'
2459 # or (r0.req.load = '1' and r0.req.nc = '0') then
2460 # Select all bytes for dcbz
2461 # and for cacheable loads
2462 with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc):
2463 # req.byte_sel := (others => '1');
2464 sync += req.byte_sel.eq(1)
2465
2466 # else
2467 with m.Else():
2468 # req.byte_sel := r0.req.byte_sel;
2469 sync += req.byte_sel.eq(r0.req.byte_sel)
2470 # end if;
2471
2472 # req.hit_way := req_hit_way;
2473 # req.same_tag := req_same_tag;
2474 sync += req.hit_way.eq(req_hit_way)
2475 sync += req.same_tag.eq(req_same_tag)
2476
2477 # -- Store the incoming request from r0,
2478 # -- if it is a slow request
2479 # -- Note that r1.full = 1 implies req_op = OP_NONE
2480 # if req_op = OP_LOAD_MISS or req_op = OP_LOAD_NC
2481 # or req_op = OP_STORE_MISS
2482 # or req_op = OP_STORE_HIT then
2483 # Store the incoming request from r0,
2484 # if it is a slow request
2485 # Note that r1.full = 1 implies req_op = OP_NONE
2486 with m.If(req_op == Op.OP_LOAD_MISS
2487 | req_op == Op.OP_LOAD_NC
2488 | req_op == Op.OP_STORE_MISS
2489 | req_op == Op.OP_STORE_HIT):
2490 # r1.req <= req;
2491 # r1.full <= '1';
2492 sync += r1.req(req)
2493 sync += r1.full.eq(1)
2494 # end if;
2495 # end if;
2496 #
2497 # -- Main state machine
2498 # case r1.state is
2499 # Main state machine
2500 with m.Switch(r1.state):
2501
2502 # when IDLE =>
2503 with m.Case(State.IDLE)
2504 # r1.wb.adr <= req.real_addr(r1.wb.adr'left downto 0);
2505 # r1.wb.sel <= req.byte_sel;
2506 # r1.wb.dat <= req.data;
2507 # r1.dcbz <= req.dcbz;
2508 #
2509 # -- Keep track of our index and way
2510 # -- for subsequent stores.
2511 # r1.store_index <= get_index(req.real_addr);
2512 # r1.store_row <= get_row(req.real_addr);
2513 # r1.end_row_ix <=
2514 # get_row_of_line(get_row(req.real_addr)) - 1;
2515 # r1.reload_tag <= get_tag(req.real_addr);
2516 # r1.req.same_tag <= '1';
2517 sync += r1.wb.adr.eq(req.real_addr[0:r1.wb.adr])
2518 sync += r1.wb.sel.eq(req.byte_sel)
2519 sync += r1.wb.dat.eq(req.data)
2520 sync += r1.dcbz.eq(req.dcbz)
2521
2522 # Keep track of our index and way
2523 # for subsequent stores.
2524 sync += r1.store_index.eq(get_index(req.real_addr))
2525 sync += r1.store_row.eq(get_row(req.real_addr))
2526 sync += r1.end_row_ix.eq(
2527 get_row_of_line(get_row(req.real_addr))
2528 )
2529 sync += r1.reload_tag.eq(get_tag(req.real_addr))
2530 sync += r1.req.same_tag.eq(1)
2531
2532 # if req.op = OP_STORE_HIT theni
2533 with m.If(req.op == Op.OP_STORE_HIT):
2534 # r1.store_way <= req.hit_way;
2535 sync += r1.store_way.eq(req.hit_way)
2536 # end if;
2537
2538 # -- Reset per-row valid bits,
2539 # -- ready for handling OP_LOAD_MISS
2540 # for i in 0 to ROW_PER_LINE - 1 loop
2541 # Reset per-row valid bits,
2542 # ready for handling OP_LOAD_MISS
2543 for i in range(ROW_PER_LINE):
2544 # r1.rows_valid(i) <= '0';
2545 sync += r1.rows_valid[i].eq(0)
2546 # end loop;
2547
2548 # case req.op is
2549 with m.Switch(req.op):
2550 # when OP_LOAD_HIT =>
2551 with m.Case(Op.OP_LOAD_HIT):
2552 # -- stay in IDLE state
2553 # stay in IDLE state
2554 pass
2555
2556 # when OP_LOAD_MISS =>
2557 with m.Case(Op.OP_LOAD_MISS):
2558 # -- Normal load cache miss,
2559 # -- start the reload machine
2560 # report "cache miss real addr:" &
2561 # to_hstring(req.real_addr) & " idx:" &
2562 # integer'image(get_index(req.real_addr)) &
2563 # " tag:" & to_hstring(get_tag(req.real_addr));
2564 # Normal load cache miss,
2565 # start the reload machine
2566 print(f"cache miss real addr:{req_real_addr}" \
2567 f" idx:{get_index(req_real_addr)}" \
2568 f" tag:{get_tag(req.real_addr)}")
2569
2570 # -- Start the wishbone cycle
2571 # r1.wb.we <= '0';
2572 # r1.wb.cyc <= '1';
2573 # r1.wb.stb <= '1';
2574 # Start the wishbone cycle
2575 sync += r1.wb.we.eq(0)
2576 sync += r1.wb.cyc.eq(1)
2577 sync += r1.wb.stb.eq(1)
2578
2579 # -- Track that we had one request sent
2580 # r1.state <= RELOAD_WAIT_ACK;
2581 # r1.write_tag <= '1';
2582 # Track that we had one request sent
2583 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
2584 sync += r1.write_tag.eq(1)
2585
2586 # when OP_LOAD_NC =>
2587 with m.Case(Op.OP_LOAD_NC):
2588 # r1.wb.cyc <= '1';
2589 # r1.wb.stb <= '1';
2590 # r1.wb.we <= '0';
2591 # r1.state <= NC_LOAD_WAIT_ACK;
2592 sync += r1.wb.cyc.eq(1)
2593 sync += r1.wb.stb.eq(1)
2594 sync += r1.wb.we.eq(0)
2595 sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
2596
2597 # when OP_STORE_HIT | OP_STORE_MISS =>
2598 with m.Case(Op.OP_STORE_HIT | Op.OP_STORE_MISS):
2599 # if req.dcbz = '0' then
2600 with m.If(~req.bcbz):
2601 # r1.state <= STORE_WAIT_ACK;
2602 # r1.acks_pending <= to_unsigned(1, 3);
2603 # r1.full <= '0';
2604 # r1.slow_valid <= '1';
2605 sync += r1.state.eq(State.STORE_WAIT_ACK)
2606 sync += r1.acks_pending.eq(
2607 '''TODO to_unsignes(1,3)'''
2608 )
2609 sync += r1.full.eq(0)
2610 sync += r1.slow_valid.eq(1)
2611
2612 # if req.mmu_req = '0' then
2613 with m.If(~req.mmu_req):
2614 # r1.ls_valid <= '1';
2615 sync += r1.ls_valid.eq(1)
2616 # else
2617 with m.Else():
2618 # r1.mmu_done <= '1';
2619 sync += r1.mmu_done.eq(1)
2620 # end if;
2621
2622 # if req.op = OP_STORE_HIT then
2623 with m.If(req.op == Op.OP_STORE_HIT):
2624 # r1.write_bram <= '1';
2625 sync += r1.write_bram.eq(1)
2626 # end if;
2627
2628 # else
2629 with m.Else():
2630 # -- dcbz is handled much like a load
2631 # -- miss except that we are writing
2632 # -- to memory instead of reading
2633 # r1.state <= RELOAD_WAIT_ACK;
2634 # dcbz is handled much like a load
2635 # miss except that we are writing
2636 # to memory instead of reading
2637 sync += r1.state.eq(Op.RELOAD_WAIT_ACK)
2638
2639 # if req.op = OP_STORE_MISS then
2640 with m.If(req.op == Op.OP_STORE_MISS):
2641 # r1.write_tag <= '1';
2642 sync += r1.write_tag.eq(1)
2643 # end if;
2644 # end if;
2645
2646 # r1.wb.we <= '1';
2647 # r1.wb.cyc <= '1';
2648 # r1.wb.stb <= '1';
2649 sync += r1.wb.we.eq(1)
2650 sync += r1.wb.cyc.eq(1)
2651 sync += r1.wb.stb.eq(1)
2652
2653 # -- OP_NONE and OP_BAD do nothing
2654 # -- OP_BAD & OP_STCX_FAIL were handled above already
2655 # when OP_NONE =>
2656 # when OP_BAD =>
2657 # when OP_STCX_FAIL =>
2658 # OP_NONE and OP_BAD do nothing
2659 # OP_BAD & OP_STCX_FAIL were handled above already
2660 with m.Case(Op.OP_NONE):
2661 pass
2662
2663 with m.Case(OP_BAD):
2664 pass
2665
2666 with m.Case(OP_STCX_FAIL):
2667 pass
2668 # end case;
2669
2670 # when RELOAD_WAIT_ACK =>
2671 with m.Case(State.RELOAD_WAIT_ACK):
2672 # -- Requests are all sent if stb is 0
2673 # Requests are all sent if stb is 0
2674 sync += stbs_done.eq(~r1.wb.stb)
2675 # stbs_done := r1.wb.stb = '0';
2676
2677 # -- If we are still sending requests,
2678 # -- was one accepted?
2679 # if wishbone_in.stall = '0' and not stbs_done then
2680 # If we are still sending requests,
2681 # was one accepted?
2682 with m.If(~wb_in.stall & ~stbs_done):
2683 # -- That was the last word ? We are done sending.
2684 # -- Clear stb and set stbs_done so we can handle
2685 # -- an eventual last ack on the same cycle.
2686 # if is_last_row_addr(r1.wb.adr, r1.end_row_ix) then
2687 # That was the last word ? We are done sending.
2688 # Clear stb and set stbs_done so we can handle
2689 # an eventual last ack on the same cycle.
2690 with m.If(is_last_row_addr(
2691 r1.wb.adr, r1.end_row_ix)):
2692 # r1.wb.stb <= '0';
2693 # stbs_done := true;
2694 sync += r1.wb.stb.eq(0)
2695 sync += stbs_done.eq(0)
2696 # end if;
2697
2698 # -- Calculate the next row address
2699 # r1.wb.adr <= next_row_addr(r1.wb.adr);
2700 # Calculate the next row address
2701 sync += r1.wb.adr.eq(next_row_addr(r1.wb.adr))
2702 # end if;
2703
2704 # -- Incoming acks processing
2705 # r1.forward_valid1 <= wishbone_in.ack;
2706 # Incoming acks processing
2707 sync += r1.forward_valid1.eq(wb_in.ack)
2708
2709 # if wishbone_in.ack = '1' then
2710 with m.If(wb_in.ack):
2711 # r1.rows_valid(
2712 # r1.store_row mod ROW_PER_LINE
2713 # ) <= '1';
2714 sync += r1.rows_valid[
2715 r1.store_row % ROW_PER_LINE
2716 ].eq(1)
2717
2718 # -- If this is the data we were looking for,
2719 # -- we can complete the request next cycle.
2720 # -- Compare the whole address in case the
2721 # -- request in r1.req is not the one that
2722 # -- started this refill.
2723 # if r1.full = '1' and r1.req.same_tag = '1'
2724 # and ((r1.dcbz = '1' and r1.req.dcbz = '1')
2725 # or (r1.dcbz = '0' and r1.req.op = OP_LOAD_MISS))
2726 # and r1.store_row = get_row(r1.req.real_addr) then
2727 # If this is the data we were looking for,
2728 # we can complete the request next cycle.
2729 # Compare the whole address in case the
2730 # request in r1.req is not the one that
2731 # started this refill.
2732 with m.If(r1.full & r1.req.same_tag &
2733 ((r1.dcbz & r1.req.dcbz)
2734 (~r1.dcbz &
2735 r1.req.op == Op.OP_LOAD_MISS)
2736 ) &
2737 r1.store_row
2738 == get_row(r1.req.real_addr):
2739 # r1.full <= '0';
2740 # r1.slow_valid <= '1';
2741 sync += r1.full.eq(0)
2742 sync += r1.slow_valid.eq(1)
2743
2744 # if r1.mmu_req = '0' then
2745 with m.If(~r1.mmu_req):
2746 # r1.ls_valid <= '1';
2747 sync += r1.ls_valid.eq(1)
2748 # else
2749 with m.Else():
2750 # r1.mmu_done <= '1';
2751 sync += r1.mmu_done.eq(1)
2752 # end if;
2753 # r1.forward_sel <= (others => '1');
2754 # r1.use_forward1 <= '1';
2755 sync += r1.forward_sel.eq(1)
2756 sync += r1.use_forward1.eq(1)
2757 # end if;
2758
2759 # -- Check for completion
2760 # if stbs_done and is_last_row(r1.store_row,
2761 # r1.end_row_ix) then
2762 # Check for completion
2763 with m.If(stbs_done &
2764 is_last_row(r1.store_row,
2765 r1.end_row_ix)):
2766
2767 # -- Complete wishbone cycle
2768 # r1.wb.cyc <= '0';
2769 # Complete wishbone cycle
2770 sync += r1.wb.cyc.eq(0)
2771
2772 # -- Cache line is now valid
2773 # cache_valids(r1.store_index)(
2774 # r1.store_way
2775 # ) <= '1';
2776 # Cache line is now valid
2777 sync += cache_valid_bits[
2778 r1.store_index
2779 ][r1.store_way].eq(1)
2780
2781 # r1.state <= IDLE;
2782 sync += r1.state.eq(State.IDLE)
2783 # end if;
2784
2785 # -- Increment store row counter
2786 # r1.store_row <= next_row(r1.store_row);
2787 # Increment store row counter
2788 sync += r1.store_row.eq(next_row(r1.store_row))
2789 # end if;
2790
2791 # when STORE_WAIT_ACK =>
2792 with m.Case(State.STORE_WAIT_ACK):
2793 # stbs_done := r1.wb.stb = '0';
2794 # acks := r1.acks_pending;
2795 sync += stbs_done.eq(~r1.wb.stb)
2796 sync += acks.eq(r1.acks_pending)
2797
2798 # if r1.inc_acks /= r1.dec_acks then
2799 with m.If(r1.inc_acks != r1.dec_acks):
2800
2801 # if r1.inc_acks = '1' then
2802 with m.If(r1.inc_acks):
2803 # acks := acks + 1;
2804 sync += acks.eq(acks + 1)
2805
2806 # else
2807 with m.Else():
2808 # acks := acks - 1;
2809 sync += acks.eq(acks - 1)
2810 # end if;
2811 # end if;
2812
2813 # r1.acks_pending <= acks;
2814 sync += r1.acks_pending.eq(acks)
2815
2816 # -- Clear stb when slave accepted request
2817 # if wishbone_in.stall = '0' then
2818 # Clear stb when slave accepted request
2819 with m.If(~wb_in.stall):
2820 # -- See if there is another store waiting
2821 # -- to be done which is in the same real page.
2822 # if req.valid = '1' then
2823 # See if there is another store waiting
2824 # to be done which is in the same real page.
2825 with m.If(req.valid):
2826 # r1.wb.adr(
2827 # SET_SIZE_BITS - 1 downto 0
2828 # ) <= req.real_addr(
2829 # SET_SIZE_BITS - 1 downto 0
2830 # );
2831 # r1.wb.dat <= req.data;
2832 # r1.wb.sel <= req.byte_sel;
2833 sync += r1.wb.adr[0:SET_SIZE_BITS].eq(
2834 req.real_addr[0:SET_SIZE_BITS]
2835 )
2836 # end if;
2837
2838 # if acks < 7 and req.same_tag = '1'
2839 # and (req.op = OP_STORE_MISS
2840 # or req.op = OP_STORE_HIT) then
2841 with m.Elif(acks < 7 & req.same_tag &
2842 (req.op == Op.Op_STORE_MISS
2843 | req.op == Op.OP_SOTRE_HIT)):
2844 # r1.wb.stb <= '1';
2845 # stbs_done := false;
2846 sync += r1.wb.stb.eq(1)
2847 sync += stbs_done.eq(0)
2848
2849 # if req.op = OP_STORE_HIT then
2850 with m.If(req.op == Op.OP_STORE_HIT):
2851 # r1.write_bram <= '1';
2852 sync += r1.write_bram.eq(1)
2853 # end if;
2854 # r1.full <= '0';
2855 # r1.slow_valid <= '1';
2856 sync += r1.full.eq(0)
2857 sync += r1.slow_valid.eq(1)
2858
2859 # -- Store requests never come from the MMU
2860 # r1.ls_valid <= '1';
2861 # stbs_done := false;
2862 # r1.inc_acks <= '1';
2863 # Store request never come from the MMU
2864 sync += r1.ls_valid.eq(1)
2865 sync += stbs_done.eq(0)
2866 sync += r1.inc_acks.eq(1)
2867 # else
2868 with m.Else():
2869 # r1.wb.stb <= '0';
2870 # stbs_done := true;
2871 sync += r1.wb.stb.eq(0)
2872 sync += stbs_done.eq(1)
2873 # end if;
2874 # end if;
2875
2876 # -- Got ack ? See if complete.
2877 # if wishbone_in.ack = '1' then
2878 # Got ack ? See if complete.
2879 with m.If(wb_in.ack):
2880 # if stbs_done and acks = 1 then
2881 with m.If(stbs_done & acks)
2882 # r1.state <= IDLE;
2883 # r1.wb.cyc <= '0';
2884 # r1.wb.stb <= '0';
2885 sync += r1.state.eq(State.IDLE)
2886 sync += r1.wb.cyc.eq(0)
2887 sync += r1.wb.stb.eq(0)
2888 # end if;
2889 # r1.dec_acks <= '1';
2890 sync += r1.dec_acks.eq(1)
2891 # end if;
2892
2893 # when NC_LOAD_WAIT_ACK =>
2894 with m.Case(State.NC_LOAD_WAIT_ACK):
2895 # -- Clear stb when slave accepted request
2896 # if wishbone_in.stall = '0' then
2897 # Clear stb when slave accepted request
2898 with m.If(~wb_in.stall):
2899 # r1.wb.stb <= '0';
2900 sync += r1.wb.stb.eq(0)
2901 # end if;
2902
2903 # -- Got ack ? complete.
2904 # if wishbone_in.ack = '1' then
2905 # Got ack ? complete.
2906 with m.If(wb_in.ack):
2907 # r1.state <= IDLE;
2908 # r1.full <= '0';
2909 # r1.slow_valid <= '1';
2910 sync += r1.state.eq(State.IDLE)
2911 sync += r1.full.eq(0)
2912 sync += r1.slow_valid.eq(1)
2913
2914 # if r1.mmu_req = '0' then
2915 with m.If(~r1.mmu_req):
2916 # r1.ls_valid <= '1';
2917 sync += r1.ls_valid.eq(1)
2918
2919 # else
2920 with m.Else():
2921 # r1.mmu_done <= '1';
2922 sync += r1.mmu_done.eq(1)
2923 # end if;
2924
2925 # r1.forward_sel <= (others => '1');
2926 # r1.use_forward1 <= '1';
2927 # r1.wb.cyc <= '0';
2928 # r1.wb.stb <= '0';
2929 sync += r1.forward_sel.eq(1)
2930 sync += r1.use_forward1.eq(1)
2931 sync += r1.wb.cyc.eq(0)
2932 sync += r1.wb.stb.eq(0)
2933 # end if;
2934 # end case;
2935 # end if;
2936 # end if;
2937 # end process;
2938
2939 # dc_log: if LOG_LENGTH > 0 generate
2940 # TODO learn how to tranlate vhdl generate into nmigen
2941 class DcacheLog(Elaborate):
2942 def __init__(self):
2943 pass
2944
2945 def elaborate(self, platform):
2946 m = Module()
2947
2948 comb = m.d.comb
2949 sync = m.d.sync
2950
2951 # signal log_data : std_ulogic_vector(19 downto 0);
2952 log_data = Signal(20)
2953
2954 comb += log_data
2955
2956 # begin
2957 # dcache_log: process(clk)
2958 # begin
2959 # if rising_edge(clk) then
2960 # log_data <= r1.wb.adr(5 downto 3) &
2961 # wishbone_in.stall &
2962 # wishbone_in.ack &
2963 # r1.wb.stb & r1.wb.cyc &
2964 # d_out.error &
2965 # d_out.valid &
2966 # std_ulogic_vector(
2967 # to_unsigned(op_t'pos(req_op), 3)) &
2968 # stall_out &
2969 # std_ulogic_vector(
2970 # to_unsigned(tlb_hit_way, 3)) &
2971 # valid_ra &
2972 # std_ulogic_vector(
2973 # to_unsigned(state_t'pos(r1.state), 3));
2974 sync += log_data.eq(Cat(
2975 Const(r1.state, 3), valid_ra, Const(tlb_hit_way, 3),
2976 stall_out, Const(req_op, 3), d_out.valid, d_out.error,
2977 r1.wb.cyc, r1.wb.stb, wb_in.ack, wb_in.stall,
2978 r1.wb.adr[3:6]
2979 ))
2980 # end if;
2981 # end process;
2982 # log_out <= log_data;
2983 # TODO ??? I am very confused need help
2984 comb += log_out.eq(log_data)
2985 # end generate;
2986 # end;