starting on dcache syntax errors
[soc.git] / src / soc / experiment / dcache.py
1 """DCache
2
3 based on Anton Blanchard microwatt dcache.vhdl
4
5 """
6
7 from enum import Enum, unique
8
9 from nmigen import Module, Signal, Elaboratable, Cat, Repl, Array, Const
10 from nmigen.cli import main
11 from nmutil.iocontrol import RecordObject
12 from nmigen.utils import log2_int
13 from nmigen.cli import rtlil
14
15
16 from soc.experiment.mem_types import (LoadStore1ToDCacheType,
17 DCacheToLoadStore1Type,
18 MMUToDCacheType,
19 DCacheToMMUType)
20
21 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
22 WBAddrType, WBDataType, WBSelType,
23 WBMasterOut, WBSlaveOut,
24 WBMasterOutVector, WBSlaveOutVector,
25 WBIOMasterOut, WBIOSlaveOut)
26
27 from soc.experiment.cache_ram import CacheRam
28 from soc.experiment.plru import PLRU
29
30
31 # TODO: make these parameters of DCache at some point
32 LINE_SIZE = 64 # Line size in bytes
33 NUM_LINES = 32 # Number of lines in a set
34 NUM_WAYS = 4 # Number of ways
35 TLB_SET_SIZE = 64 # L1 DTLB entries per set
36 TLB_NUM_WAYS = 2 # L1 DTLB number of sets
37 TLB_LG_PGSZ = 12 # L1 DTLB log_2(page_size)
38 LOG_LENGTH = 0 # Non-zero to enable log data collection
39
40 # BRAM organisation: We never access more than
41 # -- WB_DATA_BITS at a time so to save
42 # -- resources we make the array only that wide, and
43 # -- use consecutive indices for to make a cache "line"
44 # --
45 # -- ROW_SIZE is the width in bytes of the BRAM
46 # -- (based on WB, so 64-bits)
47 ROW_SIZE = WB_DATA_BITS // 8;
48
49 # ROW_PER_LINE is the number of row (wishbone
50 # transactions) in a line
51 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
52
53 # BRAM_ROWS is the number of rows in BRAM needed
54 # to represent the full dcache
55 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
56
57
58 # Bit fields counts in the address
59
60 # REAL_ADDR_BITS is the number of real address
61 # bits that we store
62 REAL_ADDR_BITS = 56
63
64 # ROW_BITS is the number of bits to select a row
65 ROW_BITS = log2_int(BRAM_ROWS)
66
67 # ROW_LINE_BITS is the number of bits to select
68 # a row within a line
69 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
70
71 # LINE_OFF_BITS is the number of bits for
72 # the offset in a cache line
73 LINE_OFF_BITS = log2_int(LINE_SIZE)
74
75 # ROW_OFF_BITS is the number of bits for
76 # the offset in a row
77 ROW_OFF_BITS = log2_int(ROW_SIZE)
78
79 # INDEX_BITS is the number if bits to
80 # select a cache line
81 INDEX_BITS = log2_int(NUM_LINES)
82
83 # SET_SIZE_BITS is the log base 2 of the set size
84 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
85
86 # TAG_BITS is the number of bits of
87 # the tag part of the address
88 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
89
90 # TAG_WIDTH is the width in bits of each way of the tag RAM
91 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
92
93 # WAY_BITS is the number of bits to select a way
94 WAY_BITS = log2_int(NUM_WAYS)
95
96 # Example of layout for 32 lines of 64 bytes:
97 #
98 # .. tag |index| line |
99 # .. | row | |
100 # .. | |---| | ROW_LINE_BITS (3)
101 # .. | |--- - --| LINE_OFF_BITS (6)
102 # .. | |- --| ROW_OFF_BITS (3)
103 # .. |----- ---| | ROW_BITS (8)
104 # .. |-----| | INDEX_BITS (5)
105 # .. --------| | TAG_BITS (45)
106
107 TAG_RAM_WIDTH = TAG_WIDTH * NUM_WAYS
108
109 def CacheTagArray():
110 return Array(Signal(TAG_RAM_WIDTH) for x in range(NUM_LINES))
111
112 def CacheValidBitsArray():
113 return Array(Signal(INDEX_BITS) for x in range(NUM_LINES))
114
115 def RowPerLineValidArray():
116 return Array(Signal() for x in range(ROW_PER_LINE))
117
118 # L1 TLB
119 TLB_SET_BITS = log2_int(TLB_SET_SIZE)
120 TLB_WAY_BITS = log2_int(TLB_NUM_WAYS)
121 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_SET_BITS)
122 TLB_TAG_WAY_BITS = TLB_NUM_WAYS * TLB_EA_TAG_BITS
123 TLB_PTE_BITS = 64
124 TLB_PTE_WAY_BITS = TLB_NUM_WAYS * TLB_PTE_BITS;
125
126 assert (LINE_SIZE % ROW_SIZE) == 0, "LINE_SIZE not multiple of ROW_SIZE"
127 assert (LINE_SIZE % 2) == 0, "LINE_SIZE not power of 2"
128 assert (NUM_LINES % 2) == 0, "NUM_LINES not power of 2"
129 assert (ROW_PER_LINE % 2) == 0, "ROW_PER_LINE not power of 2"
130 assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS), "geometry bits don't add up"
131 assert (LINE_OFF_BITS == ROW_OFF_BITS + ROW_LINE_BITS), \
132 "geometry bits don't add up"
133 assert REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS), \
134 "geometry bits don't add up"
135 assert REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS), \
136 "geometry bits don't add up"
137 assert 64 == WB_DATA_BITS, "Can't yet handle wb width that isn't 64-bits"
138 assert SET_SIZE_BITS <= TLB_LG_PGSZ, "Set indexed by virtual address"
139
140
141 def TLBValidBitsArray():
142 return Array(Signal(TLB_NUM_WAYS) for x in range(TLB_SET_SIZE))
143
144 def TLBTagsArray():
145 return Array(Signal(TLB_TAG_WAY_BITS) for x in range (TLB_SET_SIZE))
146
147 def TLBPtesArray():
148 return Array(Signal(TLB_PTE_WAY_BITS) for x in range(TLB_SET_SIZE))
149
150 def HitWaySet():
151 return Array(Signal(NUM_WAYS) for x in range(TLB_NUM_WAYS))
152
153 # Cache RAM interface
154 def CacheRamOut():
155 return Array(Signal(WB_DATA_BITS) for x in range(NUM_WAYS))
156
157 # PLRU output interface
158 def PLRUOut():
159 return Array(Signal(WAY_BITS) for x in range(NUM_LINES))
160
161 # TLB PLRU output interface
162 def TLBPLRUOut():
163 return Array(Signal(TLB_WAY_BITS) for x in range(TLB_SET_SIZE))
164
165 # Helper functions to decode incoming requests
166 #
167 # Return the cache line index (tag index) for an address
168 def get_index(addr):
169 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
170
171 # Return the cache row index (data memory) for an address
172 def get_row(addr):
173 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
174
175 # Return the index of a row within a line
176 def get_row_of_line(row):
177 row_v = Signal(ROW_BITS)
178 row_v = Signal(row)
179 return row_v[0:ROW_LINE_BITS]
180
181 # Returns whether this is the last row of a line
182 def is_last_row_addr(addr, last):
183 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
184
185 # Returns whether this is the last row of a line
186 def is_last_row(row, last):
187 return get_row_of_line(row) == last
188
189 # Return the address of the next row in the current cache line
190 def next_row_addr(addr):
191 row_idx = Signal(ROW_LINE_BITS)
192 result = WBAddrType()
193 # Is there no simpler way in VHDL to
194 # generate that 3 bits adder ?
195 row_idx = addr[ROW_OFF_BITS:LINE_OFF_BITS]
196 row_idx = Signal(row_idx + 1)
197 result = addr
198 result[ROW_OFF_BITS:LINE_OFF_BITS] = row_idx
199 return result
200
201 # Return the next row in the current cache line. We use a
202 # dedicated function in order to limit the size of the
203 # generated adder to be only the bits within a cache line
204 # (3 bits with default settings)
205 def next_row(row):
206 row_v = row[0:ROW_LINE_BITS] + 1
207 return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
208
209 # Get the tag value from the address
210 def get_tag(addr):
211 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
212
213 # Read a tag from a tag memory row
214 def read_tag(way, tagset):
215 return tagset[way *TAG_WIDTH:way * TAG_WIDTH + TAG_BITS]
216
217 # Read a TLB tag from a TLB tag memory row
218 def read_tlb_tag(way, tags):
219 j = way * TLB_EA_TAG_BITS
220 return tags[j:j + TLB_EA_TAG_BITS]
221
222 # Write a TLB tag to a TLB tag memory row
223 def write_tlb_tag(way, tags, tag):
224 j = way * TLB_EA_TAG_BITS
225 tags[j:j + TLB_EA_TAG_BITS] = tag
226
227 # Read a PTE from a TLB PTE memory row
228 def read_tlb_pte(way, ptes):
229 j = way * TLB_PTE_BITS
230 return ptes.bit_select(j, TLB_PTE_BITS)
231
232 def write_tlb_pte(way, ptes,newpte):
233 j = way * TLB_PTE_BITS
234 return ptes[j:j + TLB_PTE_BITS].eq(newpte)
235
236
237 # Record for storing permission, attribute, etc. bits from a PTE
238 class PermAttr(RecordObject):
239 def __init__(self):
240 super().__init__()
241 self.reference = Signal()
242 self.changed = Signal()
243 self.nocache = Signal()
244 self.priv = Signal()
245 self.rd_perm = Signal()
246 self.wr_perm = Signal()
247
248
249 def extract_perm_attr(pte):
250 pa = PermAttr()
251 pa.reference = pte[8]
252 pa.changed = pte[7]
253 pa.nocache = pte[5]
254 pa.priv = pte[3]
255 pa.rd_perm = pte[2]
256 pa.wr_perm = pte[1]
257 return pa;
258
259
260 # Type of operation on a "valid" input
261 @unique
262 class Op(Enum):
263 OP_NONE = 0
264 OP_BAD = 1 # NC cache hit, TLB miss, prot/RC failure
265 OP_STCX_FAIL = 2 # conditional store w/o reservation
266 OP_LOAD_HIT = 3 # Cache hit on load
267 OP_LOAD_MISS = 4 # Load missing cache
268 OP_LOAD_NC = 5 # Non-cachable load
269 OP_STORE_HIT = 6 # Store hitting cache
270 OP_STORE_MISS = 7 # Store missing cache
271
272
273 # Cache state machine
274 @unique
275 class State(Enum):
276 IDLE = 0 # Normal load hit processing
277 RELOAD_WAIT_ACK = 1 # Cache reload wait ack
278 STORE_WAIT_ACK = 2 # Store wait ack
279 NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
280
281
282 # Dcache operations:
283 #
284 # In order to make timing, we use the BRAMs with
285 # an output buffer, which means that the BRAM
286 # output is delayed by an extra cycle.
287 #
288 # Thus, the dcache has a 2-stage internal pipeline
289 # for cache hits with no stalls.
290 #
291 # All other operations are handled via stalling
292 # in the first stage.
293 #
294 # The second stage can thus complete a hit at the same
295 # time as the first stage emits a stall for a complex op.
296 #
297 # Stage 0 register, basically contains just the latched request
298
299 class RegStage0(RecordObject):
300 def __init__(self):
301 super().__init__()
302 self.req = LoadStore1ToDCacheType()
303 self.tlbie = Signal()
304 self.doall = Signal()
305 self.tlbld = Signal()
306 self.mmu_req = Signal() # indicates source of request
307
308
309 class MemAccessRequest(RecordObject):
310 def __init__(self):
311 super().__init__()
312 self.op = Signal(Op)
313 self.valid = Signal()
314 self.dcbz = Signal()
315 self.real_addr = Signal(REAL_ADDR_BITS)
316 self.data = Signal(64)
317 self.byte_sel = Signal(8)
318 self.hit_way = Signal(WAY_BITS)
319 self.same_tag = Signal()
320 self.mmu_req = Signal()
321
322
323 # First stage register, contains state for stage 1 of load hits
324 # and for the state machine used by all other operations
325 class RegStage1(RecordObject):
326 def __init__(self):
327 super().__init__()
328 # Info about the request
329 self.full = Signal() # have uncompleted request
330 self.mmu_req = Signal() # request is from MMU
331 self.req = MemAccessRequest()
332
333 # Cache hit state
334 self.hit_way = Signal(WAY_BITS)
335 self.hit_load_valid = Signal()
336 self.hit_index = Signal(NUM_LINES)
337 self.cache_hit = Signal()
338
339 # TLB hit state
340 self.tlb_hit = Signal()
341 self.tlb_hit_way = Signal(TLB_NUM_WAYS)
342 self.tlb_hit_index = Signal(TLB_WAY_BITS)
343
344 # 2-stage data buffer for data forwarded from writes to reads
345 self.forward_data1 = Signal(64)
346 self.forward_data2 = Signal(64)
347 self.forward_sel1 = Signal(8)
348 self.forward_valid1 = Signal()
349 self.forward_way1 = Signal(WAY_BITS)
350 self.forward_row1 = Signal(ROW_BITS)
351 self.use_forward1 = Signal()
352 self.forward_sel = Signal(8)
353
354 # Cache miss state (reload state machine)
355 self.state = Signal(State)
356 self.dcbz = Signal()
357 self.write_bram = Signal()
358 self.write_tag = Signal()
359 self.slow_valid = Signal()
360 self.wb = WBMasterOut()
361 self.reload_tag = Signal(TAG_BITS)
362 self.store_way = Signal(WAY_BITS)
363 self.store_row = Signal(ROW_BITS)
364 self.store_index = Signal(INDEX_BITS)
365 self.end_row_ix = Signal(log2_int(ROW_LINE_BITS, False))
366 self.rows_valid = RowPerLineValidArray()
367 self.acks_pending = Signal(3)
368 self.inc_acks = Signal()
369 self.dec_acks = Signal()
370
371 # Signals to complete (possibly with error)
372 self.ls_valid = Signal()
373 self.ls_error = Signal()
374 self.mmu_done = Signal()
375 self.mmu_error = Signal()
376 self.cache_paradox = Signal()
377
378 # Signal to complete a failed stcx.
379 self.stcx_fail = Signal()
380
381
382 # Reservation information
383 class Reservation(RecordObject):
384 def __init__(self):
385 super().__init__()
386 self.valid = Signal()
387 self.addr = Signal(64-LINE_OFF_BITS)
388
389
390 class DCache(Elaboratable):
391 """Set associative dcache write-through
392 TODO (in no specific order):
393 * See list in icache.vhdl
394 * Complete load misses on the cycle when WB data comes instead of
395 at the end of line (this requires dealing with requests coming in
396 while not idle...)
397 """
398 def __init__(self):
399 self.d_in = LoadStore1ToDCacheType()
400 self.d_out = DCacheToLoadStore1Type()
401
402 self.m_in = MMUToDCacheType()
403 self.m_out = DCacheToMMUType()
404
405 self.stall_out = Signal()
406
407 self.wb_out = WBMasterOut()
408 self.wb_in = WBSlaveOut()
409
410 self.log_out = Signal(20)
411
412 def stage_0(self, m, r0, r1, r0_full):
413 """Latch the request in r0.req as long as we're not stalling
414 """
415 comb = m.d.comb
416 sync = m.d.sync
417 d_in, d_out, m_in = self.d_in, self.d_out, self.m_in
418
419 r = RegStage0()
420
421 # TODO, this goes in unit tests and formal proofs
422 with m.If(~(d_in.valid & m_in.valid)):
423 #sync += Display("request collision loadstore vs MMU")
424 pass
425
426 with m.If(m_in.valid):
427 sync += r.req.valid.eq(1)
428 sync += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))
429 sync += r.req.dcbz.eq(0)
430 sync += r.req.nc.eq(0)
431 sync += r.req.reserve.eq(0)
432 sync += r.req.virt_mode.eq(1)
433 sync += r.req.priv_mode.eq(1)
434 sync += r.req.addr.eq(m_in.addr)
435 sync += r.req.data.eq(m_in.pte)
436 sync += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
437 sync += r.tlbie.eq(m_in.tlbie)
438 sync += r.doall.eq(m_in.doall)
439 sync += r.tlbld.eq(m_in.tlbld)
440 sync += r.mmu_req.eq(1)
441 with m.Else():
442 sync += r.req.eq(d_in)
443 sync += r.tlbie.eq(0)
444 sync += r.doall.eq(0)
445 sync += r.tlbld.eq(0)
446 sync += r.mmu_req.eq(0)
447 with m.If(~(r1.full & r0_full)):
448 sync += r0.eq(r)
449 sync += r0_full.eq(r.req.valid)
450
451 def tlb_read(self, m, r0_stall, tlb_valid_way,
452 tlb_tag_way, tlb_pte_way, dtlb_valid_bits,
453 dtlb_tags, dtlb_ptes):
454 """TLB
455 Operates in the second cycle on the request latched in r0.req.
456 TLB updates write the entry at the end of the second cycle.
457 """
458 comb = m.d.comb
459 sync = m.d.sync
460 m_in, d_in = self.m_in, self.d_in
461
462 index = Signal(TLB_SET_BITS)
463 addrbits = Signal(TLB_SET_BITS)
464
465 amin = TLB_LG_PGSZ
466 amax = TLB_LG_PGSZ + TLB_SET_BITS
467
468 with m.If(m_in.valid):
469 comb += addrbits.eq(m_in.addr[amin : amax])
470 with m.Else():
471 comb += addrbits.eq(d_in.addr[amin : amax])
472 comb += index.eq(addrbits)
473
474 # If we have any op and the previous op isn't finished,
475 # then keep the same output for next cycle.
476 with m.If(~r0_stall):
477 sync += tlb_valid_way.eq(dtlb_valid_bits[index])
478 sync += tlb_tag_way.eq(dtlb_tags[index])
479 sync += tlb_pte_way.eq(dtlb_ptes[index])
480
481 def maybe_tlb_plrus(self, m, r1, tlb_plru_victim, acc, acc_en, lru):
482 """Generate TLB PLRUs
483 """
484 comb = m.d.comb
485 sync = m.d.sync
486
487 with m.If(TLB_NUM_WAYS > 1):
488 for i in range(TLB_SET_SIZE):
489 # TLB PLRU interface
490 tlb_plru = PLRU(TLB_WAY_BITS)
491 setattr(m.submodules, "maybe_plru_%d" % i, tlb_plru)
492 tlb_plru_acc = Signal(TLB_WAY_BITS)
493 tlb_plru_acc_en = Signal()
494 tlb_plru_out = Signal(TLB_WAY_BITS)
495
496 comb += tlb_plru.acc.eq(tlb_plru_acc)
497 comb += tlb_plru.acc_en.eq(tlb_plru_acc_en)
498 comb += tlb_plru.lru.eq(tlb_plru_out)
499
500 # PLRU interface
501 with m.If(r1.tlb_hit_index == i):
502 comb += tlb_plru.acc_en.eq(r1.tlb_hit)
503 with m.Else():
504 comb += tlb_plru.acc_en.eq(0)
505 comb += tlb_plru.acc.eq(r1.tlb_hit_way)
506
507 comb += tlb_plru_victim[i].eq(tlb_plru.lru)
508
509 def tlb_search(self, m, tlb_req_index, r0, r0_valid,
510 tlb_valid_way, tlb_tag_way, tlb_hit_way,
511 tlb_pte_way, pte, tlb_hit, valid_ra, perm_attr, ra):
512
513 comb = m.d.comb
514 sync = m.d.sync
515
516 hitway = Signal(TLB_WAY_BITS)
517 hit = Signal()
518 eatag = Signal(TLB_EA_TAG_BITS)
519
520 TLB_LG_END = TLB_LG_PGSZ + TLB_SET_BITS
521 comb += tlb_req_index.eq(r0.req.addr[TLB_LG_PGSZ : TLB_LG_END])
522 comb += eatag.eq(r0.req.addr[TLB_LG_END : 64 ])
523
524 for i in range(TLB_NUM_WAYS):
525 with m.If(tlb_valid_way[i]
526 & read_tlb_tag(i, tlb_tag_way) == eatag):
527 comb += hitway.eq(i)
528 comb += hit.eq(1)
529
530 comb += tlb_hit.eq(hit & r0_valid)
531 comb += tlb_hit_way.eq(hitway)
532
533 with m.If(tlb_hit):
534 comb += pte.eq(read_tlb_pte(hitway, tlb_pte_way))
535 with m.Else():
536 comb += pte.eq(0)
537 comb += valid_ra.eq(tlb_hit | ~r0.req.virt_mode)
538 with m.If(r0.req.virt_mode):
539 comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
540 r0.req.addr[ROW_OFF_BITS:TLB_LG_PGSZ],
541 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
542 comb += perm_attr.eq(extract_perm_attr(pte))
543 with m.Else():
544 comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
545 r0.req.addr[ROW_OFF_BITS:REAL_ADDR_BITS]))
546
547 comb += perm_attr.reference.eq(1)
548 comb += perm_attr.changed.eq(1)
549 comb += perm_attr.priv.eq(1)
550 comb += perm_attr.nocache.eq(0)
551 comb += perm_attr.rd_perm.eq(1)
552 comb += perm_attr.wr_perm.eq(1)
553
554 def tlb_update(self, m, r0_valid, r0, dtlb_valid_bits, tlb_req_index,
555 tlb_hit_way, tlb_hit, tlb_plru_victim, tlb_tag_way,
556 dtlb_tags, tlb_pte_way, dtlb_ptes):
557
558 comb = m.d.comb
559 sync = m.d.sync
560
561 tlbie = Signal()
562 tlbwe = Signal()
563 repl_way = Signal(TLB_WAY_BITS)
564 eatag = Signal(TLB_EA_TAG_BITS)
565 tagset = Signal(TLB_TAG_WAY_BITS)
566 pteset = Signal(TLB_PTE_WAY_BITS)
567
568 comb += tlbie.eq(r0_valid & r0.tlbie)
569 comb += tlbwe.eq(r0_valid & r0.tlbld)
570
571 with m.If(tlbie & r0.doall):
572 # clear all valid bits at once
573 for i in range(TLB_SET_SIZE):
574 sync += dtlb_valid_bits[i].eq(0)
575
576 with m.Elif(tlbie):
577 with m.If(tlb_hit):
578 sync += dtlb_valid_bits[tlb_req_index][tlb_hit_way].eq(0)
579 with m.Elif(tlbwe):
580 with m.If(tlb_hit):
581 comb += repl_way.eq(tlb_hit_way)
582 with m.Else():
583 comb += repl_way.eq(tlb_plru_victim[tlb_req_index])
584 comb += eatag.eq(r0.req.addr[TLB_LG_PGSZ + TLB_SET_BITS:64])
585 comb += tagset.eq(tlb_tag_way)
586 sync += write_tlb_tag(repl_way, tagset, eatag)
587 sync += dtlb_tags[tlb_req_index].eq(tagset)
588 comb += pteset.eq(tlb_pte_way)
589 sync += write_tlb_pte(repl_way, pteset, r0.req.data)
590 sync += dtlb_ptes[tlb_req_index].eq(pteset)
591 sync += dtlb_valid_bits[tlb_req_index][repl_way].eq(1)
592
593 def maybe_plrus(self, r1):
594 """Generate PLRUs
595 """
596 comb = m.d.comb
597 sync = m.d.sync
598
599 for i in range(NUM_LINES):
600 # PLRU interface
601 plru = PLRU(TLB_WAY_BITS)
602 setattr(m.submodules, "plru%d" % i, plru)
603 plru_acc = Signal(WAY_BITS)
604 plru_acc_en = Signal()
605 plru_out = Signal(WAY_BITS)
606
607 comb += plru.acc.eq(plru_acc)
608 comb += plru.acc_en.eq(plru_acc_en)
609 comb += plru.lru.eq(plru_out)
610
611 with m.If(r1.hit_index == i):
612 comb += plru_acc_en.eq(r1.cache_hit)
613
614 comb += plru_acc.eq(r1.hit_way)
615 comb += plru_victim[i].eq(plru_out)
616
617 def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set, cache_tags):
618 """Cache tag RAM read port
619 """
620 comb = m.d.comb
621 sync = m.d.sync
622 m_in, d_in = self.m_in, self.d_in
623
624 index = Signal(INDEX_BITS)
625
626 with m.If(r0_stall):
627 comb += index.eq(req_index)
628 with m.Elif(m_in.valid):
629 comb += index.eq(get_index(m_in.addr))
630 with m.Else():
631 comb += index.eq(get_index(d_in.addr))
632 sync += cache_tag_set.eq(cache_tags[index])
633
634 def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
635 r0_valid, r1, cache_valid_bits, replace_way,
636 use_forward1_next, use_forward2_next,
637 req_hit_way, plru_victim, rc_ok, perm_attr,
638 valid_ra, perm_ok, access_ok, req_op, req_ok,
639 r0_stall, early_req_row):
640 """Cache request parsing and hit detection
641 """
642
643 comb = m.d.comb
644 sync = m.d.sync
645 m_in, d_in = self.m_in, self.d_in
646
647 is_hit = Signal()
648 hit_way = Signal(WAY_BITS)
649 op = Signal(Op)
650 opsel = Signal(3)
651 go = Signal()
652 nc = Signal()
653 s_hit = Signal()
654 s_tag = Signal(TAG_BITS)
655 s_pte = Signal(TLB_PTE_BITS)
656 s_ra = Signal(REAL_ADDR_BITS)
657 hit_set = Signal(TLB_NUM_WAYS)
658 hit_way_set = HitWaySet()
659 rel_matches = Signal(TLB_NUM_WAYS)
660 rel_match = Signal()
661
662 # Extract line, row and tag from request
663 comb += req_index.eq(get_index(r0.req.addr))
664 comb += req_row.eq(get_row(r0.req.addr))
665 comb += req_tag.eq(get_tag(ra))
666
667 comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
668
669 # Test if pending request is a hit on any way
670 # In order to make timing in virtual mode,
671 # when we are using the TLB, we compare each
672 # way with each of the real addresses from each way of
673 # the TLB, and then decide later which match to use.
674
675 with m.If(r0.req.virt_mode):
676 comb += rel_matches.eq(0)
677 for j in range(TLB_NUM_WAYS):
678 comb += s_pte.eq(read_tlb_pte(j, tlb_pte_way))
679 comb += s_ra.eq(Cat(r0.req.addr[0:TLB_LG_PGSZ],
680 s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
681 comb += s_tag.eq(get_tag(s_ra))
682
683 for i in range(NUM_WAYS):
684 with m.If(go & cache_valid_bits[req_index][i] &
685 read_tag(i, cache_tag_set) == s_tag
686 & tlb_valid_way[j]):
687 comb += hit_way_set[j].eq(i)
688 comb += s_hit.eq(1)
689 comb += hit_set[j].eq(s_hit)
690 with m.If(s_tag == r1.reload_tag):
691 comb += rel_matches[j].eq(1)
692 with m.If(tlb_hit):
693 comb += is_hit.eq(hit_set[tlb_hit_way])
694 comb += hit_way.eq(hit_way_set[tlb_hit_way])
695 comb += rel_match.eq(rel_matches[tlb_hit_way])
696 with m.Else():
697 comb += s_tag.eq(get_tag(r0.req.addr))
698 for i in range(NUM_WAYS):
699 with m.If(go & cache_valid_bits[req_index][i] &
700 read_tag(i, cache_tag_set) == s_tag):
701 comb += hit_way.eq(i)
702 comb += is_hit.eq(1)
703 with m.If(s_tag == r1.reload_tag):
704 comb += rel_match.eq(1)
705 comb += req_same_tag.eq(rel_match)
706
707 # See if the request matches the line currently being reloaded
708 with m.If((r1.state == State.RELOAD_WAIT_ACK) &
709 (req_index == r1.store_index) & rel_match):
710 # For a store, consider this a hit even if the row isn't
711 # valid since it will be by the time we perform the store.
712 # For a load, check the appropriate row valid bit.
713 valid = r1.rows_valid[req_row % ROW_PER_LINE]
714 comb += is_hit.eq(~r0.req.load | valid)
715 comb += hit_way.eq(replace_way)
716
717 # Whether to use forwarded data for a load or not
718 comb += use_forward1_next.eq(0)
719 with m.If((get_row(r1.req.real_addr) == req_row) &
720 (r1.req.hit_way == hit_way)):
721 # Only need to consider r1.write_bram here, since if we
722 # are writing refill data here, then we don't have a
723 # cache hit this cycle on the line being refilled.
724 # (There is the possibility that the load following the
725 # load miss that started the refill could be to the old
726 # contents of the victim line, since it is a couple of
727 # cycles after the refill starts before we see the updated
728 # cache tag. In that case we don't use the bypass.)
729 comb += use_forward1_next.eq(r1.write_bram)
730 comb += use_forward2_next.eq(0)
731 with m.If((r1.forward_row1 == req_row) & (r1.forward_way1 == hit_way)):
732 comb += use_forward2_next.eq(r1.forward_valid1)
733
734 # The way that matched on a hit
735 comb += req_hit_way.eq(hit_way)
736
737 # The way to replace on a miss
738 with m.If(r1.write_tag):
739 replace_way.eq(plru_victim[r1.store_index])
740 with m.Else():
741 comb += replace_way.eq(r1.store_way)
742
743 # work out whether we have permission for this access
744 # NB we don't yet implement AMR, thus no KUAP
745 comb += rc_ok.eq(perm_attr.reference
746 & (r0.req.load | perm_attr.changed)
747 )
748 comb += perm_ok.eq((r0.req.prive_mode | ~perm_attr.priv)
749 & perm_attr.wr_perm
750 | (r0.req.load & perm_attr.rd_perm)
751 )
752 comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
753 # Combine the request and cache hit status to decide what
754 # operation needs to be done
755 comb += nc.eq(r0.req.nc | perm_attr.nocache)
756 comb += op.eq(Op.OP_NONE)
757 with m.If(go):
758 with m.If(~access_ok):
759 comb += op.eq(Op.OP_BAD)
760 with m.Elif(cancel_store):
761 comb += op.eq(Op.OP_STCX_FAIL)
762 with m.Else():
763 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
764 with m.Switch(opsel):
765 with m.Case(Const(0b101, 3)):
766 comb += op.eq(Op.OP_LOAD_HIT)
767 with m.Case(Cosnt(0b100, 3)):
768 comb += op.eq(Op.OP_LOAD_MISS)
769 with m.Case(Const(0b110, 3)):
770 comb += op.eq(Op.OP_LOAD_NC)
771 with m.Case(Const(0b001, 3)):
772 comb += op.eq(Op.OP_STORE_HIT)
773 with m.Case(Const(0b000, 3)):
774 comb += op.eq(Op.OP_STORE_MISS)
775 with m.Case(Const(0b010, 3)):
776 comb += op.eq(Op.OP_STORE_MISS)
777 with m.Case(Const(0b011, 3)):
778 comb += op.eq(Op.OP_BAD)
779 with m.Case(Const(0b111, 3)):
780 comb += op.eq(Op.OP_BAD)
781 with m.Default():
782 comb += op.eq(Op.OP_NONE)
783 comb += req_op.eq(op)
784 comb += req_go.eq(go)
785
786 # Version of the row number that is valid one cycle earlier
787 # in the cases where we need to read the cache data BRAM.
788 # If we're stalling then we need to keep reading the last
789 # row requested.
790 with m.If(~r0_stall):
791 with m.If(m_in.valid):
792 comb += early_req_row.eq(get_row(m_in.addr))
793 with m.Else():
794 comb += early_req_row.eq(get_row(d_in.addr))
795 with m.Else():
796 comb += early_req_row.eq(req_row)
797
798 def reservation_comb(self, m, cancel_store, set_rsrv, clear_rsrv,
799 r0_valid, r0, reservation):
800 """Handle load-with-reservation and store-conditional instructions
801 """
802 comb = m.d.comb
803 sync = m.d.sync
804
805 with m.If(r0_valid & r0.req.reserve):
806
807 # XXX generate alignment interrupt if address
808 # is not aligned XXX or if r0.req.nc = '1'
809 with m.If(r0.req.load):
810 comb += set_rsrv(1) # load with reservation
811 with m.Else():
812 comb += clear_rsrv.eq(1) # store conditional
813 with m.If(~reservation.valid | r0.req.addr[LINE_OFF_BITS:64]):
814 comb += cancel_store.eq(1)
815
816 def reservation_reg(self, m, r0_valid, access_ok, clear_rsrv,
817 reservation, r0):
818
819 comb = m.d.comb
820 sync = m.d.sync
821
822 with m.If(r0_valid & access_ok):
823 with m.If(clear_rsrv):
824 sync += reservation.valid.eq(0)
825 with m.Elif(set_rsrv):
826 sync += reservation.valid.eq(1)
827 sync += reservation.addr.eq(r0.req.addr[LINE_OFF_BITS:64])
828
829 def writeback_control(self, m, r1, cache_out):
830 """Return data for loads & completion control logic
831 """
832 comb = m.d.comb
833 sync = m.d.sync
834 d_out, m_out = self.d_out, self.m_out
835
836 data_out = Signal(64)
837 data_fwd = Signal(64)
838
839 # Use the bypass if are reading the row that was
840 # written 1 or 2 cycles ago, including for the
841 # slow_valid = 1 case (i.e. completing a load
842 # miss or a non-cacheable load).
843 with m.If(r1.use_forward1):
844 comb += data_fwd.eq(r1.forward_data1)
845 with m.Else():
846 comb += data_fwd.eq(r1.forward_data2)
847
848 comb += data_out.eq(cache_out[r1.hit_way])
849
850 for i in range(8):
851 with m.If(r1.forward_sel[i]):
852 dsel = data_fwd.word_select(i, 8)
853 comb += data_out.word_select(i, 8).eq(dsel)
854
855 comb += d_out.valid.eq(r1.ls_valid)
856 comb += d_out.data.eq(data_out)
857 comb += d_out.store_done.eq(~r1.stcx_fail)
858 comb += d_out.error.eq(r1.ls_error)
859 comb += d_out.cache_paradox.eq(r1.cache_paradox)
860
861 # Outputs to MMU
862 comb += m_out.done.eq(r1.mmu_done)
863 comb += m_out.err.eq(r1.mmu_error)
864 comb += m_out.data.eq(data_out)
865
866 # We have a valid load or store hit or we just completed
867 # a slow op such as a load miss, a NC load or a store
868 #
869 # Note: the load hit is delayed by one cycle. However it
870 # can still not collide with r.slow_valid (well unless I
871 # miscalculated) because slow_valid can only be set on a
872 # subsequent request and not on its first cycle (the state
873 # machine must have advanced), which makes slow_valid
874 # at least 2 cycles from the previous hit_load_valid.
875
876 # Sanity: Only one of these must be set in any given cycle
877
878 if False: # TODO: need Display to get this to work
879 assert (r1.slow_valid & r1.stcx_fail) != 1, \
880 "unexpected slow_valid collision with stcx_fail"
881
882 assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1, \
883 "unexpected hit_load_delayed collision with slow_valid"
884
885 with m.If(~r1._mmu_req):
886 # Request came from loadstore1...
887 # Load hit case is the standard path
888 with m.If(r1.hit_load_valid):
889 #Display(f"completing load hit data={data_out}")
890 pass
891
892 # error cases complete without stalling
893 with m.If(r1.ls_error):
894 # Display("completing ld/st with error")
895 pass
896
897 # Slow ops (load miss, NC, stores)
898 with m.If(r1.slow_valid):
899 #Display(f"completing store or load miss data={data_out}")
900 pass
901
902 with m.Else():
903 # Request came from MMU
904 with m.If(r1.hit_load_valid):
905 # Display(f"completing load hit to MMU, data={m_out.data}")
906 pass
907 # error cases complete without stalling
908 with m.If(r1.mmu_error):
909 #Display("combpleting MMU ld with error")
910 pass
911
912 # Slow ops (i.e. load miss)
913 with m.If(r1.slow_valid):
914 #Display("completing MMU load miss, data={m_out.data}")
915 pass
916
917 def rams(self, m, r1):
918 """rams
919 Generate a cache RAM for each way. This handles the normal
920 reads, writes from reloads and the special store-hit update
921 path as well.
922
923 Note: the BRAMs have an extra read buffer, meaning the output
924 is pipelined an extra cycle. This differs from the
925 icache. The writeback logic needs to take that into
926 account by using 1-cycle delayed signals for load hits.
927 """
928 comb = m.d.comb
929 wb_in = self.wb_in
930
931 for i in range(NUM_WAYS):
932 do_read = Signal()
933 rd_addr = Signal(ROW_BITS)
934 do_write = Signal()
935 wr_addr = Signal(ROW_BITS)
936 wr_data = Signal(WB_DATA_BITS)
937 wr_sel = Signal(ROW_SIZE)
938 wr_sel_m = Signal(ROW_SIZE)
939 _d_out = Signal(WB_DATA_BITS)
940
941 way = CacheRam(ROW_BITS, WB_DATA_BITS, True)
942 setattr(m.submodules, "cacheram_%d" % i, way)
943
944 comb += way.rd_en.eq(do_read)
945 comb += way.rd_addr.eq(rd_addr)
946 comb += _d_out.eq(way.rd_data)
947 comb += way.wr_sel.eq(wr_sel_m)
948 comb += way.wr_addr.eq(wr_addr)
949 comb += way.wr_data.eq(wr_data)
950
951 # Cache hit reads
952 comb += do_read.eq(1)
953 comb += rd_addr.eq(early_req_row)
954 comb += cache_out[i].eq(_d_out)
955
956 # Write mux:
957 #
958 # Defaults to wishbone read responses (cache refill)
959 #
960 # For timing, the mux on wr_data/sel/addr is not
961 # dependent on anything other than the current state.
962
963 with m.If(r1.write_bram):
964 # Write store data to BRAM. This happens one
965 # cycle after the store is in r0.
966 comb += wr_data.eq(r1.req.data)
967 comb += wr_sel.eq(r1.req.byte_sel)
968 comb += wr_addr.eq(get_row(r1.req.real_addr))
969
970 with m.If(i == r1.req.hit_way):
971 comb += do_write.eq(1)
972 with m.Else():
973 # Otherwise, we might be doing a reload or a DCBZ
974 with m.If(r1.dcbz):
975 comb += wr_data.eq(0)
976 with m.Else():
977 comb += wr_data.eq(wb_in.dat)
978 comb += wr_addr.eq(r1.store_row)
979 comb += wr_sel.eq(~0) # all 1s
980
981 with m.If((r1.state == State.RELOAD_WAIT_ACK)
982 & wb_in.ack & (replace_way == i)):
983 comb += do_write.eq(1)
984
985 # Mask write selects with do_write since BRAM
986 # doesn't have a global write-enable
987 with m.If(do_write):
988 comb += wr_sel_m.eq(wr_sel)
989
990 # Cache hit synchronous machine for the easy case.
991 # This handles load hits.
992 # It also handles error cases (TLB miss, cache paradox)
993 def dcache_fast_hit(self, m, req_op, r0_valid, r1):
994
995 comb = m.d.comb
996 sync = m.d.sync
997
998 with m.If(req_op != Op.OP_NONE):
999 #Display(f"op:{req_op} addr:{r0.req.addr} nc: {r0.req.nc}" \
1000 # f"idx:{req_index} tag:{req_tag} way: {req_hit_way}"
1001 # )
1002 pass
1003
1004 with m.If(r0_valid):
1005 sync += r1.mmu_req.eq(r0.mmu_req)
1006
1007 # Fast path for load/store hits.
1008 # Set signals for the writeback controls.
1009 sync += r1.hit_way.eq(req_hit_way)
1010 sync += r1.hit_index.eq(req_index)
1011
1012 with m.If(req_op == Op.OP_LOAD_HIT):
1013 sync += r1.hit_load_valid.eq(1)
1014 with m.Else():
1015 sync += r1.hit_load_valid.eq(0)
1016
1017 with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STORE_HIT)):
1018 sync += r1.cache_hit.eq(1)
1019 with m.Else():
1020 sync += r1.cache_hit.eq(0)
1021
1022 with m.If(req_op == Op.OP_BAD):
1023 # Display(f"Signalling ld/st error valid_ra={valid_ra}"
1024 # f"rc_ok={rc_ok} perm_ok={perm_ok}"
1025 sync += r1.ls_error.eq(~r0.mmu_req)
1026 sync += r1.mmu_error.eq(r0.mmu_req)
1027 sync += r1.cache_paradox.eq(access_ok)
1028
1029 with m.Else():
1030 sync += r1.ls_error.eq(0)
1031 sync += r1.mmu_error.eq(0)
1032 sync += r1.cache_paradox.eq(0)
1033
1034 with m.If(req_op == Op.OP_STCX_FAIL):
1035 r1.stcx_fail.eq(1)
1036 with m.Else():
1037 sync += r1.stcx_fail.eq(0)
1038
1039 # Record TLB hit information for updating TLB PLRU
1040 sync += r1.tlb_hit.eq(tlb_hit)
1041 sync += r1.tlb_hit_way.eq(tlb_hit_way)
1042 sync += r1.tlb_hit_index.eq(tlb_req_index)
1043
1044 # Memory accesses are handled by this state machine:
1045 #
1046 # * Cache load miss/reload (in conjunction with "rams")
1047 # * Load hits for non-cachable forms
1048 # * Stores (the collision case is handled in "rams")
1049 #
1050 # All wishbone requests generation is done here.
1051 # This machine operates at stage 1.
1052 def dcache_slow(self, m, r1, use_forward1_next, cache_valid_bits, r0,
1053 r0_valid, req_op, cache_tag, req_go, ra):
1054
1055 comb = m.d.comb
1056 sync = m.d.sync
1057 wb_in = self.wb_i
1058
1059 req = MemAccessRequest()
1060 acks = Signal(3)
1061 adjust_acks = Signal(3)
1062
1063 sync += r1.use_forward1.eq(use_forward1_next)
1064 sync += r1.forward_sel.eq(0)
1065
1066 with m.If(use_forward1_next):
1067 sync += r1.forward_sel.eq(r1.req.byte_sel)
1068 with m.Elif(use_forward2_next):
1069 sync += r1.forward_sel.eq(r1.forward_sel1)
1070
1071 sync += r1.forward_data2.eq(r1.forward_data1)
1072 with m.If(r1.write_bram):
1073 sync += r1.forward_data1.eq(r1.req.data)
1074 sync += r1.forward_sel1.eq(r1.req.byte_sel)
1075 sync += r1.forward_way1.eq(r1.req.hit_way)
1076 sync += r1.forward_row1.eq(get_row(r1.req.real_addr))
1077 sync += r1.forward_valid1.eq(1)
1078 with m.Else():
1079 with m.If(r1.bcbz):
1080 sync += r1.forward_data1.eq(0)
1081 with m.Else():
1082 sync += r1.forward_data1.eq(wb_in.dat)
1083 sync += r1.forward_sel1.eq(~0) # all 1s
1084 sync += r1.forward_way1.eq(replace_way)
1085 sync += r1.forward_row1.eq(r1.store_row)
1086 sync += r1.forward_valid1.eq(0)
1087
1088 # One cycle pulses reset
1089 sync += r1.slow_valid.eq(0)
1090 sync += r1.write_bram.eq(0)
1091 sync += r1.inc_acks.eq(0)
1092 sync += r1.dec_acks.eq(0)
1093
1094 sync += r1.ls_valid.eq(0)
1095 # complete tlbies and TLB loads in the third cycle
1096 sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
1097
1098 with m.If((req_op == Op.OP_LOAD_HIT)
1099 | (req_op == Op.OP_STCX_FAIL)):
1100 with m.If(~r0.mmu_req):
1101 sync += r1.ls_valid.eq(1)
1102 with m.Else():
1103 sync += r1.mmu_done.eq(1)
1104
1105 with m.If(r1.write_tag):
1106 # Store new tag in selected way
1107 for i in range(NUM_WAYS):
1108 with m.If(i == replace_way):
1109 idx = r1.store_index
1110 trange = range(i * TAG_WIDTH, (i+1) * TAG_WIDTH)
1111 sync += cache_tag[idx][trange].eq(r1.reload_tag)
1112 sync += r1.store_way.eq(replace_way)
1113 sync += r1.write_tag.eq(0)
1114
1115 # Take request from r1.req if there is one there,
1116 # else from req_op, ra, etc.
1117 with m.If(r1.full):
1118 comb += req.eq(r1.req)
1119 with m.Else():
1120 comb += req.op.eq(req_op)
1121 comb += req.valid.eq(req_go)
1122 comb += req.mmu_req.eq(r0.mmu_req)
1123 comb += req.dcbz.eq(r0.req.dcbz)
1124 comb += req.real_addr.eq(ra)
1125
1126 with m.If(~r0.req.dcbz):
1127 comb += req.data.eq(r0.req.data)
1128 with m.Else():
1129 comb += req.data.eq(0)
1130
1131 # Select all bytes for dcbz
1132 # and for cacheable loads
1133 with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc)):
1134 comb += req.byte_sel.eq(~0) # all 1s
1135 with m.Else():
1136 comb += req.byte_sel.eq(r0.req.byte_sel)
1137 comb += req.hit_way.eq(req_hit_way)
1138 comb += req.same_tag.eq(req_same_tag)
1139
1140 # Store the incoming request from r0,
1141 # if it is a slow request
1142 # Note that r1.full = 1 implies req_op = OP_NONE
1143 with m.If((req_op == Op.OP_LOAD_MISS)
1144 | (req_op == Op.OP_LOAD_NC)
1145 | (req_op == Op.OP_STORE_MISS)
1146 | (req_op == Op.OP_STORE_HIT)):
1147 sync += r1.req(req)
1148 sync += r1.full.eq(1)
1149
1150 # Main state machine
1151 with m.Switch(r1.state):
1152
1153 with m.Case(State.IDLE):
1154 # XXX check 'left downto. probably means len(r1.wb.adr)
1155 # r1.wb.adr <= req.real_addr(
1156 # r1.wb.adr'left downto 0
1157 # );
1158 sync += r1.wb.adr.eq(req.real_addr[0:r1.wb.adr])
1159 sync += r1.wb.sel.eq(req.byte_sel)
1160 sync += r1.wb.dat.eq(req.data)
1161 sync += r1.dcbz.eq(req.dcbz)
1162
1163 # Keep track of our index and way
1164 # for subsequent stores.
1165 sync += r1.store_index.eq(get_index(req.real_addr))
1166 sync += r1.store_row.eq(get_row(req.real_addr))
1167 sync += r1.end_row_ix.eq(
1168 get_row_of_line(get_row(req.real_addr))
1169 )
1170 sync += r1.reload_tag.eq(get_tag(req.real_addr))
1171 sync += r1.req.same_tag.eq(1)
1172
1173 with m.If(req.op == Op.OP_STORE_HIT):
1174 sync += r1.store_way.eq(req.hit_way)
1175
1176 # Reset per-row valid bits,
1177 # ready for handling OP_LOAD_MISS
1178 for i in range(ROW_PER_LINE):
1179 sync += r1.rows_valid[i].eq(0)
1180
1181 with m.Switch(req.op):
1182 with m.Case(Op.OP_LOAD_HIT):
1183 # stay in IDLE state
1184 pass
1185
1186 with m.Case(Op.OP_LOAD_MISS):
1187 #Display(f"cache miss real addr:" \
1188 # f"{req_real_addr}" \
1189 # f" idx:{get_index(req_real_addr)}" \
1190 # f" tag:{get_tag(req.real_addr)}")
1191 pass
1192
1193 # Start the wishbone cycle
1194 sync += r1.wb.we.eq(0)
1195 sync += r1.wb.cyc.eq(1)
1196 sync += r1.wb.stb.eq(1)
1197
1198 # Track that we had one request sent
1199 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1200 sync += r1.write_tag.eq(1)
1201
1202 with m.Case(Op.OP_LOAD_NC):
1203 sync += r1.wb.cyc.eq(1)
1204 sync += r1.wb.stb.eq(1)
1205 sync += r1.wb.we.eq(0)
1206 sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
1207
1208 with m.Case(Op.OP_STORE_HIT, Op.OP_STORE_MISS):
1209 with m.If(~req.bcbz):
1210 sync += r1.state.eq(State.STORE_WAIT_ACK)
1211 sync += r1.acks_pending.eq(1)
1212 sync += r1.full.eq(0)
1213 sync += r1.slow_valid.eq(1)
1214
1215 with m.If(~req.mmu_req):
1216 sync += r1.ls_valid.eq(1)
1217 with m.Else():
1218 sync += r1.mmu_done.eq(1)
1219
1220 with m.If(req.op == Op.OP_STORE_HIT):
1221 sync += r1.write_bram.eq(1)
1222 with m.Else():
1223 sync += r1.state.eq(Op.RELOAD_WAIT_ACK)
1224
1225 with m.If(req.op == Op.OP_STORE_MISS):
1226 sync += r1.write_tag.eq(1)
1227
1228 sync += r1.wb.we.eq(1)
1229 sync += r1.wb.cyc.eq(1)
1230 sync += r1.wb.stb.eq(1)
1231
1232 # OP_NONE and OP_BAD do nothing
1233 # OP_BAD & OP_STCX_FAIL were
1234 # handled above already
1235 with m.Case(Op.OP_NONE):
1236 pass
1237 with m.Case(OP_BAD):
1238 pass
1239 with m.Case(OP_STCX_FAIL):
1240 pass
1241
1242 with m.Case(State.RELOAD_WAIT_ACK):
1243 # Requests are all sent if stb is 0
1244 comb += stbs_done.eq(~r1.wb.stb)
1245
1246 with m.If(~wb_in.stall & ~stbs_done):
1247 # That was the last word?
1248 # We are done sending.
1249 # Clear stb and set stbs_done
1250 # so we can handle an eventual
1251 # last ack on the same cycle.
1252 with m.If(is_last_row_addr(
1253 r1.wb.adr, r1.end_row_ix)):
1254 sync += r1.wb.stb.eq(0)
1255 comb += stbs_done.eq(0)
1256
1257 # Calculate the next row address
1258 sync += r1.wb.adr.eq(next_row_addr(r1.wb.adr))
1259
1260 # Incoming acks processing
1261 sync += r1.forward_valid1.eq(wb_in.ack)
1262 with m.If(wb_in.ack):
1263 # XXX needs an Array bit-accessor here
1264 sync += r1.rows_valid[r1.store_row % ROW_PER_LINE].eq(1)
1265
1266 # If this is the data we were looking for,
1267 # we can complete the request next cycle.
1268 # Compare the whole address in case the
1269 # request in r1.req is not the one that
1270 # started this refill.
1271 with m.If(r1.full & r1.req.same_tag &
1272 ((r1.dcbz & r1.req.dcbz) |
1273 (~r1.dcbz & (r1.req.op == Op.OP_LOAD_MISS))) &
1274 (r1.store_row == get_row(r1.req.real_addr))):
1275 sync += r1.full.eq(0)
1276 sync += r1.slow_valid.eq(1)
1277 with m.If(~r1.mmu_req):
1278 sync += r1.ls_valid.eq(1)
1279 with m.Else():
1280 sync += r1.mmu_done.eq(1)
1281 sync += r1.forward_sel.eq(~0) # all 1s
1282 sync += r1.use_forward1.eq(1)
1283
1284 # Check for completion
1285 with m.If(stbs_done & is_last_row(r1.store_row,
1286 r1.end_row_ix)):
1287 # Complete wishbone cycle
1288 sync += r1.wb.cyc.eq(0)
1289
1290 # Cache line is now valid
1291 cv = cache_valid_bits[r1.store_index]
1292 sync += cv[r1.store_way].eq(1)
1293 sync += r1.state.eq(State.IDLE)
1294
1295 # Increment store row counter
1296 sync += r1.store_row.eq(next_row(r1.store_row))
1297
1298 with m.Case(State.STORE_WAIT_ACK):
1299 comb += stbs_done.eq(~r1.wb.stb)
1300 comb += acks.eq(r1.acks_pending)
1301
1302 with m.If(r1.inc_acks != r1.dec_acks):
1303 with m.If(r1.inc_acks):
1304 comb += adjust_acks.eq(acks + 1)
1305 with m.Else():
1306 comb += adjust_acks.eq(acks - 1)
1307 with m.Else():
1308 comb += adjust_acks.eq(acks)
1309
1310 sync += r1.acks_pending.eq(adjust_acks)
1311
1312 # Clear stb when slave accepted request
1313 with m.If(~wb_in.stall):
1314 # See if there is another store waiting
1315 # to be done which is in the same real page.
1316 with m.If(req.valid):
1317 ra = req.real_addr[0:SET_SIZE_BITS]
1318 sync += r1.wb.adr[0:SET_SIZE_BITS].eq(ra)
1319 sync += r1.wb.dat.eq(req.data)
1320 sync += r1.wb.sel.eq(req.byte_sel)
1321
1322 with m.Elif((adjust_acks < 7) & req.same_tag &
1323 ((req.op == Op.Op_STORE_MISS)
1324 | (req.op == Op.OP_SOTRE_HIT))):
1325 sync += r1.wb.stb.eq(1)
1326 comb += stbs_done.eq(0)
1327
1328 with m.If(req.op == Op.OP_STORE_HIT):
1329 sync += r1.write_bram.eq(1)
1330 sync += r1.full.eq(0)
1331 sync += r1.slow_valid.eq(1)
1332
1333 # Store requests never come from the MMU
1334 sync += r1.ls_valid.eq(1)
1335 comb += stbs_done.eq(0)
1336 sync += r1.inc_acks.eq(1)
1337 with m.Else():
1338 sync += r1.wb.stb.eq(0)
1339 comb += stbs_done.eq(1)
1340
1341 # Got ack ? See if complete.
1342 with m.If(wb_in.ack):
1343 with m.If(stbs_done & (adjust_acks == 1)):
1344 sync += r1.state.eq(State.IDLE)
1345 sync += r1.wb.cyc.eq(0)
1346 sync += r1.wb.stb.eq(0)
1347 sync += r1.dec_acks.eq(1)
1348
1349 with m.Case(State.NC_LOAD_WAIT_ACK):
1350 # Clear stb when slave accepted request
1351 with m.If(~wb_in.stall):
1352 sync += r1.wb.stb.eq(0)
1353
1354 # Got ack ? complete.
1355 with m.If(wb_in.ack):
1356 sync += r1.state.eq(State.IDLE)
1357 sync += r1.full.eq(0)
1358 sync += r1.slow_valid.eq(1)
1359
1360 with m.If(~r1.mmu_req):
1361 sync += r1.ls_valid.eq(1)
1362 with m.Else():
1363 sync += r1.mmu_done.eq(1)
1364
1365 sync += r1.forward_sel.eq(~0) # all 1s
1366 sync += r1.use_forward1.eq(1)
1367 sync += r1.wb.cyc.eq(0)
1368 sync += r1.wb.stb.eq(0)
1369
1370 def dcache_log(self, m, r1, valid_ra, tlb_hit_way, stall_out):
1371
1372 sync = m.d.sync
1373 d_out, wb_in, log_out = self.d_out, self.wb_in, self.log_out
1374
1375 sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit_way[:3],
1376 stall_out, req_op[:3], d_out.valid, d_out.error,
1377 r1.wb.cyc, r1.wb.stb, wb_in.ack, wb_in.stall,
1378 r1.wb.adr[3:6]))
1379
1380 def elaborate(self, platform):
1381
1382 m = Module()
1383 comb = m.d.comb
1384
1385 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1386 cache_tags = CacheTagArray()
1387 cache_tag_set = Signal(TAG_RAM_WIDTH)
1388 cache_valid_bits = CacheValidBitsArray()
1389
1390 # TODO attribute ram_style : string;
1391 # TODO attribute ram_style of cache_tags : signal is "distributed";
1392
1393 """note: these are passed to nmigen.hdl.Memory as "attributes".
1394 don't know how, just that they are.
1395 """
1396 dtlb_valid_bits = TLBValidBitsArray()
1397 dtlb_tags = TLBTagsArray()
1398 dtlb_ptes = TLBPtesArray()
1399 # TODO attribute ram_style of
1400 # dtlb_tags : signal is "distributed";
1401 # TODO attribute ram_style of
1402 # dtlb_ptes : signal is "distributed";
1403
1404 r0 = RegStage0()
1405 r0_full = Signal()
1406
1407 r1 = RegStage1()
1408
1409 reservation = Reservation()
1410
1411 # Async signals on incoming request
1412 req_index = Signal(INDEX_BITS)
1413 req_row = Signal(ROW_BITS)
1414 req_hit_way = Signal(WAY_BITS)
1415 req_tag = Signal(TAG_BITS)
1416 req_op = Signal(Op)
1417 req_data = Signal(64)
1418 req_same_tag = Signal()
1419 req_go = Signal()
1420
1421 early_req_row = Signal(ROW_BITS)
1422
1423 cancel_store = Signal()
1424 set_rsrv = Signal()
1425 clear_rsrv = Signal()
1426
1427 r0_valid = Signal()
1428 r0_stall = Signal()
1429
1430 use_forward1_next = Signal()
1431 use_forward2_next = Signal()
1432
1433 cache_out = CacheRamOut()
1434
1435 plru_victim = PLRUOut()
1436 replace_way = Signal(WAY_BITS)
1437
1438 # Wishbone read/write/cache write formatting signals
1439 bus_sel = Signal(8)
1440
1441 # TLB signals
1442 tlb_tag_way = Signal(TLB_TAG_WAY_BITS)
1443 tlb_pte_way = Signal(TLB_PTE_WAY_BITS)
1444 tlb_valid_way = Signal(TLB_NUM_WAYS)
1445 tlb_req_index = Signal(TLB_SET_BITS)
1446 tlb_hit = Signal()
1447 tlb_hit_way = Signal(TLB_WAY_BITS)
1448 pte = Signal(TLB_PTE_BITS)
1449 ra = Signal(REAL_ADDR_BITS)
1450 valid_ra = Signal()
1451 perm_attr = PermAttr()
1452 rc_ok = Signal()
1453 perm_ok = Signal()
1454 access_ok = Signal()
1455
1456 tlb_plru_victim = TLBPLRUOut()
1457
1458 # we don't yet handle collisions between loadstore1 requests
1459 # and MMU requests
1460 comb += self.m_out.stall.eq(0)
1461
1462 # Hold off the request in r0 when r1 has an uncompleted request
1463 comb += r0_stall.eq(r0_full & r1.full)
1464 comb += r0_valid.eq(r0_full & ~r1.full)
1465 comb += self.stall_out.eq(r0_stall)
1466
1467 # Wire up wishbone request latch out of stage 1
1468 comb += self.wb_out.eq(r1.wb)
1469
1470 # call sub-functions putting everything together, using shared
1471 # signals established above
1472 self.stage_0(m, r0, r1, r0_full)
1473 self.tlb_read(m, r0_stall, tlb_valid_way,
1474 tlb_tag_way, tlb_pte_way, dtlb_valid_bits,
1475 dtlb_tags, dtlb_ptes)
1476 self.tlb_search(m, tlb_req_index, r0, r0_valid,
1477 tlb_valid_way, tlb_tag_way, tlb_hit_way,
1478 tlb_pte_way, pte, tlb_hit, valid_ra, perm_attr, ra)
1479 self.tlb_update(m, r0_valid, r0, dtlb_valid_bits, tlb_req_index,
1480 tlb_hit_way, tlb_hit, tlb_plru_victim, tlb_tag_way,
1481 dtlb_tags, tlb_pte_way, dtlb_ptes)
1482 self.maybe_plrus(r1)
1483 self.cache_tag_read(m, r0_stall, req_index, cache_tag_set, cache_tags)
1484 self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
1485 r0_valid, r1, cache_valid_bits, replace_way,
1486 use_forward1_next, use_forward2_next,
1487 req_hit_way, plru_victim, rc_ok, perm_attr,
1488 valid_ra, perm_ok, access_ok, req_op, req_ok,
1489 r0_stall, early_req_row)
1490 self.reservation_comb(m, cancel_store, set_rsrv, clear_rsrv,
1491 r0_valid, r0, reservation)
1492 self.reservation_reg(m, r0_valid, access_ok, clear_rsrv,
1493 reservation, r0)
1494 self.writeback_control(m, r1, cache_out)
1495 self.rams(m, r1)
1496 self.dcache_fast_hit(m, req_op, r0_valid, r1)
1497 self.dcache_slow(m, r1, use_forward1_next, cache_valid_bits, r0,
1498 r0_valid, req_op, cache_tag, req_go, ra)
1499 #self.dcache_log(m, r1, valid_ra, tlb_hit_way, stall_out)
1500
1501
1502 # dcache_tb.vhdl
1503 #
1504 # entity dcache_tb is
1505 # end dcache_tb;
1506 #
1507 # architecture behave of dcache_tb is
1508 # signal clk : std_ulogic;
1509 # signal rst : std_ulogic;
1510 #
1511 # signal d_in : Loadstore1ToDcacheType;
1512 # signal d_out : DcacheToLoadstore1Type;
1513 #
1514 # signal m_in : MmuToDcacheType;
1515 # signal m_out : DcacheToMmuType;
1516 #
1517 # signal wb_bram_in : wishbone_master_out;
1518 # signal wb_bram_out : wishbone_slave_out;
1519 #
1520 # constant clk_period : time := 10 ns;
1521 # begin
1522 # dcache0: entity work.dcache
1523 # generic map(
1524 #
1525 # LINE_SIZE => 64,
1526 # NUM_LINES => 4
1527 # )
1528 # port map(
1529 # clk => clk,
1530 # rst => rst,
1531 # d_in => d_in,
1532 # d_out => d_out,
1533 # m_in => m_in,
1534 # m_out => m_out,
1535 # wishbone_out => wb_bram_in,
1536 # wishbone_in => wb_bram_out
1537 # );
1538 #
1539 # -- BRAM Memory slave
1540 # bram0: entity work.wishbone_bram_wrapper
1541 # generic map(
1542 # MEMORY_SIZE => 1024,
1543 # RAM_INIT_FILE => "icache_test.bin"
1544 # )
1545 # port map(
1546 # clk => clk,
1547 # rst => rst,
1548 # wishbone_in => wb_bram_in,
1549 # wishbone_out => wb_bram_out
1550 # );
1551 #
1552 # clk_process: process
1553 # begin
1554 # clk <= '0';
1555 # wait for clk_period/2;
1556 # clk <= '1';
1557 # wait for clk_period/2;
1558 # end process;
1559 #
1560 # rst_process: process
1561 # begin
1562 # rst <= '1';
1563 # wait for 2*clk_period;
1564 # rst <= '0';
1565 # wait;
1566 # end process;
1567 #
1568 # stim: process
1569 # begin
1570 # -- Clear stuff
1571 # d_in.valid <= '0';
1572 # d_in.load <= '0';
1573 # d_in.nc <= '0';
1574 # d_in.addr <= (others => '0');
1575 # d_in.data <= (others => '0');
1576 # m_in.valid <= '0';
1577 # m_in.addr <= (others => '0');
1578 # m_in.pte <= (others => '0');
1579 #
1580 # wait for 4*clk_period;
1581 # wait until rising_edge(clk);
1582 #
1583 # -- Cacheable read of address 4
1584 # d_in.load <= '1';
1585 # d_in.nc <= '0';
1586 # d_in.addr <= x"0000000000000004";
1587 # d_in.valid <= '1';
1588 # wait until rising_edge(clk);
1589 # d_in.valid <= '0';
1590 #
1591 # wait until rising_edge(clk) and d_out.valid = '1';
1592 # assert d_out.data = x"0000000100000000"
1593 # report "data @" & to_hstring(d_in.addr) &
1594 # "=" & to_hstring(d_out.data) &
1595 # " expected 0000000100000000"
1596 # severity failure;
1597 # -- wait for clk_period;
1598 #
1599 # -- Cacheable read of address 30
1600 # d_in.load <= '1';
1601 # d_in.nc <= '0';
1602 # d_in.addr <= x"0000000000000030";
1603 # d_in.valid <= '1';
1604 # wait until rising_edge(clk);
1605 # d_in.valid <= '0';
1606 #
1607 # wait until rising_edge(clk) and d_out.valid = '1';
1608 # assert d_out.data = x"0000000D0000000C"
1609 # report "data @" & to_hstring(d_in.addr) &
1610 # "=" & to_hstring(d_out.data) &
1611 # " expected 0000000D0000000C"
1612 # severity failure;
1613 #
1614 # -- Non-cacheable read of address 100
1615 # d_in.load <= '1';
1616 # d_in.nc <= '1';
1617 # d_in.addr <= x"0000000000000100";
1618 # d_in.valid <= '1';
1619 # wait until rising_edge(clk);
1620 # d_in.valid <= '0';
1621 # wait until rising_edge(clk) and d_out.valid = '1';
1622 # assert d_out.data = x"0000004100000040"
1623 # report "data @" & to_hstring(d_in.addr) &
1624 # "=" & to_hstring(d_out.data) &
1625 # " expected 0000004100000040"
1626 # severity failure;
1627 #
1628 # wait until rising_edge(clk);
1629 # wait until rising_edge(clk);
1630 # wait until rising_edge(clk);
1631 # wait until rising_edge(clk);
1632 #
1633 # std.env.finish;
1634 # end process;
1635 # end;
1636 def dcache_sim(dut):
1637 # clear stuff
1638 yield dut.d_in.valid.eq(0)
1639 yield dut.d_in.load.eq(0)
1640 yield dut.d_in.nc.eq(0)
1641 yield dut.d_in.adrr.eq(0)
1642 yield dut.d_in.data.eq(0)
1643 yield dut.m_in.valid.eq(0)
1644 yield dut.m_in.addr.eq(0)
1645 yield dut.m_in.pte.eq(0)
1646 # wait 4 * clk_period
1647 yield
1648 yield
1649 yield
1650 yield
1651 # wait_until rising_edge(clk)
1652 yield
1653 # Cacheable read of address 4
1654 yield dut.d_in.load.eq(1)
1655 yield dut.d_in.nc.eq(0)
1656 yield dut.d_in.addr.eq(Const(0x0000000000000004, 64))
1657 yield dut.d_in.valid.eq(1)
1658 # wait-until rising_edge(clk)
1659 yield
1660 yield dut.d_in.valid.eq(0)
1661 yield
1662 while not (yield dut.d_out.valid):
1663 yield
1664 assert dut.d_out.data == 0x0000000100000000, \
1665 f"data @ {dut.d_in.addr}={dut.d_in.data} expected 0000000100000000"
1666
1667
1668 # Cacheable read of address 30
1669 yield dut.d_in.load.eq(1)
1670 yield dut.d_in.nc.eq(0)
1671 yield dut.d_in.addr.eq(Const(0x0000000000000030, 64))
1672 yield dut.d_in.valid.eq(1)
1673 yield
1674 yield dut.d_in.valid.eq(0)
1675 yield
1676 while not (yield dut.d_out.valid):
1677 yield
1678 assert dut.d_out.data == 0x0000000D0000000C, \
1679 f"data @{dut.d_in.addr}={dut.d_out.data} expected 0000000D0000000C"
1680
1681 # Non-cacheable read of address 100
1682 yield dut.d_in.load.eq(1)
1683 yield dut.d_in.nc.eq(1)
1684 yield dut.d_in.addr.eq(Const(0x0000000000000100, 64))
1685 yield dut.d_in.valid.eq(1)
1686 yield
1687 yield dut.d_in.valid.eq(0)
1688 yield
1689 while not (yield dut.d_out.valid):
1690 yield
1691 assert dut.d_out.data == 0x0000004100000040, \
1692 f"data @ {dut.d_in.addr}={dut.d_out.data} expected 0000004100000040"
1693
1694 yield
1695 yield
1696 yield
1697 yield
1698
1699
1700 def test_dcache():
1701 dut = DCache()
1702 vl = rtlil.convert(dut, ports=[])
1703 with open("test_dcache.il", "w") as f:
1704 f.write(vl)
1705
1706 run_simulation(dut, dcache_sim(), vcd_name='test_dcache.vcd')
1707
1708 if __name__ == '__main__':
1709 test_dcache()
1710