putting in a lot more debug print statements in DCache, investigation
[soc.git] / src / soc / experiment / dcache.py
1 """DCache
2
3 based on Anton Blanchard microwatt dcache.vhdl
4
5 note that the microwatt dcache wishbone interface expects "stall".
6 for simplicity at the moment this is hard-coded to cyc & ~ack.
7 see WB4 spec, p84, section 5.2.1
8
9 IMPORTANT: for store, the data is sampled the cycle AFTER the "valid"
10 is raised. sigh
11
12 Links:
13
14 * https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
15 * https://bugs.libre-soc.org/show_bug.cgi?id=469
16
17 """
18
19 import sys
20
21 from nmutil.gtkw import write_gtkw
22
23 sys.setrecursionlimit(1000000)
24
25 from enum import Enum, unique
26
27 from nmigen import Module, Signal, Elaboratable, Cat, Repl, Array, Const
28 from nmutil.util import Display
29
30 from copy import deepcopy
31 from random import randint, seed
32
33 from nmigen.cli import main
34 from nmutil.iocontrol import RecordObject
35 from nmigen.utils import log2_int
36 from soc.experiment.mem_types import (LoadStore1ToDCacheType,
37 DCacheToLoadStore1Type,
38 MMUToDCacheType,
39 DCacheToMMUType)
40
41 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
42 WBAddrType, WBDataType, WBSelType,
43 WBMasterOut, WBSlaveOut,
44 WBMasterOutVector, WBSlaveOutVector,
45 WBIOMasterOut, WBIOSlaveOut)
46
47 from soc.experiment.cache_ram import CacheRam
48 #from soc.experiment.plru import PLRU
49 from nmutil.plru import PLRU
50
51 # for test
52 from soc.bus.sram import SRAM
53 from nmigen import Memory
54 from nmigen.cli import rtlil
55
56 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
57 # Also, check out the cxxsim nmigen branch, and latest yosys from git
58 from nmutil.sim_tmp_alternative import Simulator
59
60 from nmutil.util import wrap
61
62
63 # TODO: make these parameters of DCache at some point
64 LINE_SIZE = 64 # Line size in bytes
65 NUM_LINES = 16 # Number of lines in a set
66 NUM_WAYS = 4 # Number of ways
67 TLB_SET_SIZE = 64 # L1 DTLB entries per set
68 TLB_NUM_WAYS = 2 # L1 DTLB number of sets
69 TLB_LG_PGSZ = 12 # L1 DTLB log_2(page_size)
70 LOG_LENGTH = 0 # Non-zero to enable log data collection
71
72 # BRAM organisation: We never access more than
73 # -- WB_DATA_BITS at a time so to save
74 # -- resources we make the array only that wide, and
75 # -- use consecutive indices for to make a cache "line"
76 # --
77 # -- ROW_SIZE is the width in bytes of the BRAM
78 # -- (based on WB, so 64-bits)
79 ROW_SIZE = WB_DATA_BITS // 8;
80
81 # ROW_PER_LINE is the number of row (wishbone
82 # transactions) in a line
83 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
84
85 # BRAM_ROWS is the number of rows in BRAM needed
86 # to represent the full dcache
87 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
88
89 print ("ROW_SIZE", ROW_SIZE)
90 print ("ROW_PER_LINE", ROW_PER_LINE)
91 print ("BRAM_ROWS", BRAM_ROWS)
92 print ("NUM_WAYS", NUM_WAYS)
93
94 # Bit fields counts in the address
95
96 # REAL_ADDR_BITS is the number of real address
97 # bits that we store
98 REAL_ADDR_BITS = 56
99
100 # ROW_BITS is the number of bits to select a row
101 ROW_BITS = log2_int(BRAM_ROWS)
102
103 # ROW_LINE_BITS is the number of bits to select
104 # a row within a line
105 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
106
107 # LINE_OFF_BITS is the number of bits for
108 # the offset in a cache line
109 LINE_OFF_BITS = log2_int(LINE_SIZE)
110
111 # ROW_OFF_BITS is the number of bits for
112 # the offset in a row
113 ROW_OFF_BITS = log2_int(ROW_SIZE)
114
115 # INDEX_BITS is the number if bits to
116 # select a cache line
117 INDEX_BITS = log2_int(NUM_LINES)
118
119 # SET_SIZE_BITS is the log base 2 of the set size
120 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
121
122 # TAG_BITS is the number of bits of
123 # the tag part of the address
124 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
125
126 # TAG_WIDTH is the width in bits of each way of the tag RAM
127 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
128
129 # WAY_BITS is the number of bits to select a way
130 WAY_BITS = log2_int(NUM_WAYS)
131
132 # Example of layout for 32 lines of 64 bytes:
133 layout = """\
134 .. tag |index| line |
135 .. | row | |
136 .. | |---| | ROW_LINE_BITS (3)
137 .. | |--- - --| LINE_OFF_BITS (6)
138 .. | |- --| ROW_OFF_BITS (3)
139 .. |----- ---| | ROW_BITS (8)
140 .. |-----| | INDEX_BITS (5)
141 .. --------| | TAG_BITS (45)
142 """
143 print (layout)
144 print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
145 (TAG_BITS, INDEX_BITS, ROW_BITS,
146 ROW_OFF_BITS, LINE_OFF_BITS, ROW_LINE_BITS))
147 print ("index @: %d-%d" % (LINE_OFF_BITS, SET_SIZE_BITS))
148 print ("row @: %d-%d" % (LINE_OFF_BITS, ROW_OFF_BITS))
149 print ("tag @: %d-%d width %d" % (SET_SIZE_BITS, REAL_ADDR_BITS, TAG_WIDTH))
150
151 TAG_RAM_WIDTH = TAG_WIDTH * NUM_WAYS
152
153 print ("TAG_RAM_WIDTH", TAG_RAM_WIDTH)
154
155 def CacheTagArray():
156 return Array(Signal(TAG_RAM_WIDTH, name="cachetag_%d" % x) \
157 for x in range(NUM_LINES))
158
159 def CacheValidBitsArray():
160 return Array(Signal(NUM_WAYS, name="cachevalid_%d" % x) \
161 for x in range(NUM_LINES))
162
163 def RowPerLineValidArray():
164 return Array(Signal(name="rows_valid%d" % x) \
165 for x in range(ROW_PER_LINE))
166
167 # L1 TLB
168 TLB_SET_BITS = log2_int(TLB_SET_SIZE)
169 TLB_WAY_BITS = log2_int(TLB_NUM_WAYS)
170 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_SET_BITS)
171 TLB_TAG_WAY_BITS = TLB_NUM_WAYS * TLB_EA_TAG_BITS
172 TLB_PTE_BITS = 64
173 TLB_PTE_WAY_BITS = TLB_NUM_WAYS * TLB_PTE_BITS;
174
175 def ispow2(x):
176 return (1<<log2_int(x, False)) == x
177
178 assert (LINE_SIZE % ROW_SIZE) == 0, "LINE_SIZE not multiple of ROW_SIZE"
179 assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
180 assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
181 assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
182 assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS), "geometry bits don't add up"
183 assert (LINE_OFF_BITS == ROW_OFF_BITS + ROW_LINE_BITS), \
184 "geometry bits don't add up"
185 assert REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS), \
186 "geometry bits don't add up"
187 assert REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS), \
188 "geometry bits don't add up"
189 assert 64 == WB_DATA_BITS, "Can't yet handle wb width that isn't 64-bits"
190 assert SET_SIZE_BITS <= TLB_LG_PGSZ, "Set indexed by virtual address"
191
192
193 def TLBValidBitsArray():
194 return Array(Signal(TLB_NUM_WAYS, name="tlbvalid%d" % x) \
195 for x in range(TLB_SET_SIZE))
196
197 def TLBTagEAArray():
198 return Array(Signal(TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
199 for x in range (TLB_NUM_WAYS))
200
201 def TLBTagsArray():
202 return Array(Signal(TLB_TAG_WAY_BITS, name="tlbtags%d" % x) \
203 for x in range (TLB_SET_SIZE))
204
205 def TLBPtesArray():
206 return Array(Signal(TLB_PTE_WAY_BITS, name="tlbptes%d" % x) \
207 for x in range(TLB_SET_SIZE))
208
209 def HitWaySet():
210 return Array(Signal(WAY_BITS, name="hitway_%d" % x) \
211 for x in range(TLB_NUM_WAYS))
212
213 # Cache RAM interface
214 def CacheRamOut():
215 return Array(Signal(WB_DATA_BITS, name="cache_out%d" % x) \
216 for x in range(NUM_WAYS))
217
218 # PLRU output interface
219 def PLRUOut():
220 return Array(Signal(WAY_BITS, name="plru_out%d" % x) \
221 for x in range(NUM_LINES))
222
223 # TLB PLRU output interface
224 def TLBPLRUOut():
225 return Array(Signal(TLB_WAY_BITS, name="tlbplru_out%d" % x) \
226 for x in range(TLB_SET_SIZE))
227
228 # Helper functions to decode incoming requests
229 #
230 # Return the cache line index (tag index) for an address
231 def get_index(addr):
232 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
233
234 # Return the cache row index (data memory) for an address
235 def get_row(addr):
236 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
237
238 # Return the index of a row within a line
239 def get_row_of_line(row):
240 return row[:ROW_BITS][:ROW_LINE_BITS]
241
242 # Returns whether this is the last row of a line
243 def is_last_row_addr(addr, last):
244 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
245
246 # Returns whether this is the last row of a line
247 def is_last_row(row, last):
248 return get_row_of_line(row) == last
249
250 # Return the next row in the current cache line. We use a
251 # dedicated function in order to limit the size of the
252 # generated adder to be only the bits within a cache line
253 # (3 bits with default settings)
254 def next_row(row):
255 row_v = row[0:ROW_LINE_BITS] + 1
256 return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
257
258 # Get the tag value from the address
259 def get_tag(addr):
260 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
261
262 # Read a tag from a tag memory row
263 def read_tag(way, tagset):
264 return tagset.word_select(way, TAG_WIDTH)[:TAG_BITS]
265
266 # Read a TLB tag from a TLB tag memory row
267 def read_tlb_tag(way, tags):
268 return tags.word_select(way, TLB_EA_TAG_BITS)
269
270 # Write a TLB tag to a TLB tag memory row
271 def write_tlb_tag(way, tags, tag):
272 return read_tlb_tag(way, tags).eq(tag)
273
274 # Read a PTE from a TLB PTE memory row
275 def read_tlb_pte(way, ptes):
276 return ptes.word_select(way, TLB_PTE_BITS)
277
278 def write_tlb_pte(way, ptes, newpte):
279 return read_tlb_pte(way, ptes).eq(newpte)
280
281
282 # Record for storing permission, attribute, etc. bits from a PTE
283 class PermAttr(RecordObject):
284 def __init__(self, name=None):
285 super().__init__(name=name)
286 self.reference = Signal()
287 self.changed = Signal()
288 self.nocache = Signal()
289 self.priv = Signal()
290 self.rd_perm = Signal()
291 self.wr_perm = Signal()
292
293
294 def extract_perm_attr(pte):
295 pa = PermAttr()
296 return pa;
297
298
299 # Type of operation on a "valid" input
300 @unique
301 class Op(Enum):
302 OP_NONE = 0
303 OP_BAD = 1 # NC cache hit, TLB miss, prot/RC failure
304 OP_STCX_FAIL = 2 # conditional store w/o reservation
305 OP_LOAD_HIT = 3 # Cache hit on load
306 OP_LOAD_MISS = 4 # Load missing cache
307 OP_LOAD_NC = 5 # Non-cachable load
308 OP_STORE_HIT = 6 # Store hitting cache
309 OP_STORE_MISS = 7 # Store missing cache
310
311
312 # Cache state machine
313 @unique
314 class State(Enum):
315 IDLE = 0 # Normal load hit processing
316 RELOAD_WAIT_ACK = 1 # Cache reload wait ack
317 STORE_WAIT_ACK = 2 # Store wait ack
318 NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
319
320
321 # Dcache operations:
322 #
323 # In order to make timing, we use the BRAMs with
324 # an output buffer, which means that the BRAM
325 # output is delayed by an extra cycle.
326 #
327 # Thus, the dcache has a 2-stage internal pipeline
328 # for cache hits with no stalls.
329 #
330 # All other operations are handled via stalling
331 # in the first stage.
332 #
333 # The second stage can thus complete a hit at the same
334 # time as the first stage emits a stall for a complex op.
335 #
336 # Stage 0 register, basically contains just the latched request
337
338 class RegStage0(RecordObject):
339 def __init__(self, name=None):
340 super().__init__(name=name)
341 self.req = LoadStore1ToDCacheType(name="lsmem")
342 self.tlbie = Signal() # indicates a tlbie request (from MMU)
343 self.doall = Signal() # with tlbie, indicates flush whole TLB
344 self.tlbld = Signal() # indicates a TLB load request (from MMU)
345 self.mmu_req = Signal() # indicates source of request
346 self.d_valid = Signal() # indicates req.data is valid now
347
348
349 class MemAccessRequest(RecordObject):
350 def __init__(self, name=None):
351 super().__init__(name=name)
352 self.op = Signal(Op)
353 self.valid = Signal()
354 self.dcbz = Signal()
355 self.real_addr = Signal(REAL_ADDR_BITS)
356 self.data = Signal(64)
357 self.byte_sel = Signal(8)
358 self.hit_way = Signal(WAY_BITS)
359 self.same_tag = Signal()
360 self.mmu_req = Signal()
361
362
363 # First stage register, contains state for stage 1 of load hits
364 # and for the state machine used by all other operations
365 class RegStage1(RecordObject):
366 def __init__(self, name=None):
367 super().__init__(name=name)
368 # Info about the request
369 self.full = Signal() # have uncompleted request
370 self.mmu_req = Signal() # request is from MMU
371 self.req = MemAccessRequest(name="reqmem")
372
373 # Cache hit state
374 self.hit_way = Signal(WAY_BITS)
375 self.hit_load_valid = Signal()
376 self.hit_index = Signal(INDEX_BITS)
377 self.cache_hit = Signal()
378
379 # TLB hit state
380 self.tlb_hit = Signal()
381 self.tlb_hit_way = Signal(TLB_NUM_WAYS)
382 self.tlb_hit_index = Signal(TLB_WAY_BITS)
383
384 # 2-stage data buffer for data forwarded from writes to reads
385 self.forward_data1 = Signal(64)
386 self.forward_data2 = Signal(64)
387 self.forward_sel1 = Signal(8)
388 self.forward_valid1 = Signal()
389 self.forward_way1 = Signal(WAY_BITS)
390 self.forward_row1 = Signal(ROW_BITS)
391 self.use_forward1 = Signal()
392 self.forward_sel = Signal(8)
393
394 # Cache miss state (reload state machine)
395 self.state = Signal(State)
396 self.dcbz = Signal()
397 self.write_bram = Signal()
398 self.write_tag = Signal()
399 self.slow_valid = Signal()
400 self.wb = WBMasterOut("wb")
401 self.reload_tag = Signal(TAG_BITS)
402 self.store_way = Signal(WAY_BITS)
403 self.store_row = Signal(ROW_BITS)
404 self.store_index = Signal(INDEX_BITS)
405 self.end_row_ix = Signal(ROW_LINE_BITS)
406 self.rows_valid = RowPerLineValidArray()
407 self.acks_pending = Signal(3)
408 self.inc_acks = Signal()
409 self.dec_acks = Signal()
410
411 # Signals to complete (possibly with error)
412 self.ls_valid = Signal()
413 self.ls_error = Signal()
414 self.mmu_done = Signal()
415 self.mmu_error = Signal()
416 self.cache_paradox = Signal()
417
418 # Signal to complete a failed stcx.
419 self.stcx_fail = Signal()
420
421
422 # Reservation information
423 class Reservation(RecordObject):
424 def __init__(self):
425 super().__init__()
426 self.valid = Signal()
427 self.addr = Signal(64-LINE_OFF_BITS)
428
429
430 class DTLBUpdate(Elaboratable):
431 def __init__(self):
432 self.tlbie = Signal()
433 self.tlbwe = Signal()
434 self.doall = Signal()
435 self.updated = Signal()
436 self.v_updated = Signal()
437 self.tlb_hit = Signal()
438 self.tlb_req_index = Signal(TLB_SET_BITS)
439
440 self.tlb_hit_way = Signal(TLB_WAY_BITS)
441 self.tlb_tag_way = Signal(TLB_TAG_WAY_BITS)
442 self.tlb_pte_way = Signal(TLB_PTE_WAY_BITS)
443 self.repl_way = Signal(TLB_WAY_BITS)
444 self.eatag = Signal(TLB_EA_TAG_BITS)
445 self.pte_data = Signal(TLB_PTE_BITS)
446
447 self.dv = Signal(TLB_NUM_WAYS) # tlb_way_valids_t
448
449 self.tb_out = Signal(TLB_TAG_WAY_BITS) # tlb_way_tags_t
450 self.pb_out = Signal(TLB_NUM_WAYS) # tlb_way_valids_t
451 self.db_out = Signal(TLB_PTE_WAY_BITS) # tlb_way_ptes_t
452
453 def elaborate(self, platform):
454 m = Module()
455 comb = m.d.comb
456 sync = m.d.sync
457
458 tagset = Signal(TLB_TAG_WAY_BITS)
459 pteset = Signal(TLB_PTE_WAY_BITS)
460
461 tb_out, pb_out, db_out = self.tb_out, self.pb_out, self.db_out
462 comb += db_out.eq(self.dv)
463
464 with m.If(self.tlbie & self.doall):
465 pass # clear all back in parent
466 with m.Elif(self.tlbie):
467 with m.If(self.tlb_hit):
468 comb += db_out.bit_select(self.tlb_hit_way, 1).eq(1)
469 comb += self.v_updated.eq(1)
470
471 with m.Elif(self.tlbwe):
472
473 comb += tagset.eq(self.tlb_tag_way)
474 comb += write_tlb_tag(self.repl_way, tagset, self.eatag)
475 comb += tb_out.eq(tagset)
476
477 comb += pteset.eq(self.tlb_pte_way)
478 comb += write_tlb_pte(self.repl_way, pteset, self.pte_data)
479 comb += pb_out.eq(pteset)
480
481 comb += db_out.bit_select(self.repl_way, 1).eq(1)
482
483 comb += self.updated.eq(1)
484 comb += self.v_updated.eq(1)
485
486 return m
487
488
489 class DCachePendingHit(Elaboratable):
490
491 def __init__(self, tlb_pte_way, tlb_valid_way, tlb_hit_way,
492 cache_valid_idx, cache_tag_set,
493 req_addr,
494 hit_set):
495
496 self.go = Signal()
497 self.virt_mode = Signal()
498 self.is_hit = Signal()
499 self.tlb_hit = Signal()
500 self.hit_way = Signal(WAY_BITS)
501 self.rel_match = Signal()
502 self.req_index = Signal(INDEX_BITS)
503 self.reload_tag = Signal(TAG_BITS)
504
505 self.tlb_hit_way = tlb_hit_way
506 self.tlb_pte_way = tlb_pte_way
507 self.tlb_valid_way = tlb_valid_way
508 self.cache_valid_idx = cache_valid_idx
509 self.cache_tag_set = cache_tag_set
510 self.req_addr = req_addr
511 self.hit_set = hit_set
512
513 def elaborate(self, platform):
514 m = Module()
515 comb = m.d.comb
516 sync = m.d.sync
517
518 go = self.go
519 virt_mode = self.virt_mode
520 is_hit = self.is_hit
521 tlb_pte_way = self.tlb_pte_way
522 tlb_valid_way = self.tlb_valid_way
523 cache_valid_idx = self.cache_valid_idx
524 cache_tag_set = self.cache_tag_set
525 req_addr = self.req_addr
526 tlb_hit_way = self.tlb_hit_way
527 tlb_hit = self.tlb_hit
528 hit_set = self.hit_set
529 hit_way = self.hit_way
530 rel_match = self.rel_match
531 req_index = self.req_index
532 reload_tag = self.reload_tag
533
534 rel_matches = Array(Signal(name="rel_matches_%d" % i) \
535 for i in range(TLB_NUM_WAYS))
536 hit_way_set = HitWaySet()
537
538 # Test if pending request is a hit on any way
539 # In order to make timing in virtual mode,
540 # when we are using the TLB, we compare each
541 # way with each of the real addresses from each way of
542 # the TLB, and then decide later which match to use.
543
544 with m.If(virt_mode):
545 for j in range(TLB_NUM_WAYS): # tlb_num_way_t
546 s_tag = Signal(TAG_BITS, name="s_tag%d" % j)
547 s_hit = Signal()
548 s_pte = Signal(TLB_PTE_BITS)
549 s_ra = Signal(REAL_ADDR_BITS)
550 comb += s_pte.eq(read_tlb_pte(j, tlb_pte_way))
551 comb += s_ra.eq(Cat(req_addr[0:TLB_LG_PGSZ],
552 s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
553 comb += s_tag.eq(get_tag(s_ra))
554
555 for i in range(NUM_WAYS): # way_t
556 is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
557 comb += is_tag_hit.eq(go & cache_valid_idx[i] &
558 (read_tag(i, cache_tag_set) == s_tag)
559 & tlb_valid_way[j])
560 with m.If(is_tag_hit):
561 comb += hit_way_set[j].eq(i)
562 comb += s_hit.eq(1)
563 comb += hit_set[j].eq(s_hit)
564 with m.If(s_tag == reload_tag):
565 comb += rel_matches[j].eq(1)
566 with m.If(tlb_hit):
567 comb += is_hit.eq(hit_set[tlb_hit_way])
568 comb += hit_way.eq(hit_way_set[tlb_hit_way])
569 comb += rel_match.eq(rel_matches[tlb_hit_way])
570 with m.Else():
571 s_tag = Signal(TAG_BITS)
572 comb += s_tag.eq(get_tag(req_addr))
573 for i in range(NUM_WAYS): # way_t
574 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
575 comb += is_tag_hit.eq(go & cache_valid_idx[i] &
576 (read_tag(i, cache_tag_set) == s_tag))
577 with m.If(is_tag_hit):
578 comb += hit_way.eq(i)
579 comb += is_hit.eq(1)
580 with m.If(s_tag == reload_tag):
581 comb += rel_match.eq(1)
582
583 return m
584
585
586 class DCache(Elaboratable):
587 """Set associative dcache write-through
588
589 TODO (in no specific order):
590 * See list in icache.vhdl
591 * Complete load misses on the cycle when WB data comes instead of
592 at the end of line (this requires dealing with requests coming in
593 while not idle...)
594 """
595 def __init__(self):
596 self.d_in = LoadStore1ToDCacheType("d_in")
597 self.d_out = DCacheToLoadStore1Type("d_out")
598
599 self.m_in = MMUToDCacheType("m_in")
600 self.m_out = DCacheToMMUType("m_out")
601
602 self.stall_out = Signal()
603
604 self.wb_out = WBMasterOut("wb_out")
605 self.wb_in = WBSlaveOut("wb_in")
606
607 self.log_out = Signal(20)
608
609 def stage_0(self, m, r0, r1, r0_full):
610 """Latch the request in r0.req as long as we're not stalling
611 """
612 comb = m.d.comb
613 sync = m.d.sync
614 d_in, d_out, m_in = self.d_in, self.d_out, self.m_in
615
616 r = RegStage0("stage0")
617
618 # TODO, this goes in unit tests and formal proofs
619 with m.If(d_in.valid & m_in.valid):
620 sync += Display("request collision loadstore vs MMU")
621
622 with m.If(m_in.valid):
623 comb += r.req.valid.eq(1)
624 comb += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))# no invalidate
625 comb += r.req.dcbz.eq(0)
626 comb += r.req.nc.eq(0)
627 comb += r.req.reserve.eq(0)
628 comb += r.req.virt_mode.eq(0)
629 comb += r.req.priv_mode.eq(1)
630 comb += r.req.addr.eq(m_in.addr)
631 comb += r.req.data.eq(m_in.pte)
632 comb += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
633 comb += r.tlbie.eq(m_in.tlbie)
634 comb += r.doall.eq(m_in.doall)
635 comb += r.tlbld.eq(m_in.tlbld)
636 comb += r.mmu_req.eq(1)
637 m.d.sync += Display(" DCACHE req mmu addr %x pte %x ld %d",
638 m_in.addr, m_in.pte, r.req.load)
639
640 with m.Else():
641 comb += r.req.eq(d_in)
642 comb += r.req.data.eq(0)
643 comb += r.tlbie.eq(0)
644 comb += r.doall.eq(0)
645 comb += r.tlbld.eq(0)
646 comb += r.mmu_req.eq(0)
647 with m.If((~r1.full & ~d_in.hold) | ~r0_full):
648 sync += r0.eq(r)
649 sync += r0_full.eq(r.req.valid)
650 # Sample data the cycle after a request comes in from loadstore1.
651 # If another request has come in already then the data will get
652 # put directly into req.data below.
653 with m.If(r0.req.valid & ~r.req.valid & ~r0.d_valid &
654 ~r0.mmu_req):
655 sync += r0.req.data.eq(d_in.data)
656 sync += r0.d_valid.eq(1)
657 with m.If(d_in.valid):
658 m.d.sync += Display(" DCACHE req cache addr %x data %x ld %d",
659 r.req.addr, r.req.data, r.req.load)
660
661 def tlb_read(self, m, r0_stall, tlb_valid_way,
662 tlb_tag_way, tlb_pte_way, dtlb_valid_bits,
663 dtlb_tags, dtlb_ptes):
664 """TLB
665 Operates in the second cycle on the request latched in r0.req.
666 TLB updates write the entry at the end of the second cycle.
667 """
668 comb = m.d.comb
669 sync = m.d.sync
670 m_in, d_in = self.m_in, self.d_in
671
672 index = Signal(TLB_SET_BITS)
673 addrbits = Signal(TLB_SET_BITS)
674
675 amin = TLB_LG_PGSZ
676 amax = TLB_LG_PGSZ + TLB_SET_BITS
677
678 with m.If(m_in.valid):
679 comb += addrbits.eq(m_in.addr[amin : amax])
680 with m.Else():
681 comb += addrbits.eq(d_in.addr[amin : amax])
682 comb += index.eq(addrbits)
683
684 # If we have any op and the previous op isn't finished,
685 # then keep the same output for next cycle.
686 with m.If(~r0_stall):
687 sync += tlb_valid_way.eq(dtlb_valid_bits[index])
688 sync += tlb_tag_way.eq(dtlb_tags[index])
689 sync += tlb_pte_way.eq(dtlb_ptes[index])
690
691 def maybe_tlb_plrus(self, m, r1, tlb_plru_victim):
692 """Generate TLB PLRUs
693 """
694 comb = m.d.comb
695 sync = m.d.sync
696
697 if TLB_NUM_WAYS == 0:
698 return
699 for i in range(TLB_SET_SIZE):
700 # TLB PLRU interface
701 tlb_plru = PLRU(TLB_WAY_BITS)
702 setattr(m.submodules, "maybe_plru_%d" % i, tlb_plru)
703 tlb_plru_acc_en = Signal()
704
705 comb += tlb_plru_acc_en.eq(r1.tlb_hit & (r1.tlb_hit_index == i))
706 comb += tlb_plru.acc_en.eq(tlb_plru_acc_en)
707 comb += tlb_plru.acc_i.eq(r1.tlb_hit_way)
708 comb += tlb_plru_victim[i].eq(tlb_plru.lru_o)
709
710 def tlb_search(self, m, tlb_req_index, r0, r0_valid,
711 tlb_valid_way, tlb_tag_way, tlb_hit_way,
712 tlb_pte_way, pte, tlb_hit, valid_ra, perm_attr, ra):
713
714 comb = m.d.comb
715
716 hitway = Signal(TLB_WAY_BITS)
717 hit = Signal()
718 eatag = Signal(TLB_EA_TAG_BITS)
719
720 TLB_LG_END = TLB_LG_PGSZ + TLB_SET_BITS
721 comb += tlb_req_index.eq(r0.req.addr[TLB_LG_PGSZ : TLB_LG_END])
722 comb += eatag.eq(r0.req.addr[TLB_LG_END : 64 ])
723
724 for i in range(TLB_NUM_WAYS):
725 is_tag_hit = Signal()
726 comb += is_tag_hit.eq(tlb_valid_way[i]
727 & (read_tlb_tag(i, tlb_tag_way) == eatag))
728 with m.If(is_tag_hit):
729 comb += hitway.eq(i)
730 comb += hit.eq(1)
731
732 comb += tlb_hit.eq(hit & r0_valid)
733 comb += tlb_hit_way.eq(hitway)
734
735 with m.If(tlb_hit):
736 comb += pte.eq(read_tlb_pte(hitway, tlb_pte_way))
737 comb += valid_ra.eq(tlb_hit | ~r0.req.virt_mode)
738
739 with m.If(r0.req.virt_mode):
740 comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
741 r0.req.addr[ROW_OFF_BITS:TLB_LG_PGSZ],
742 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
743 comb += perm_attr.reference.eq(pte[8])
744 comb += perm_attr.changed.eq(pte[7])
745 comb += perm_attr.nocache.eq(pte[5])
746 comb += perm_attr.priv.eq(pte[3])
747 comb += perm_attr.rd_perm.eq(pte[2])
748 comb += perm_attr.wr_perm.eq(pte[1])
749 with m.Else():
750 comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
751 r0.req.addr[ROW_OFF_BITS:REAL_ADDR_BITS]))
752 comb += perm_attr.reference.eq(1)
753 comb += perm_attr.changed.eq(1)
754 comb += perm_attr.nocache.eq(0)
755 comb += perm_attr.priv.eq(1)
756 comb += perm_attr.rd_perm.eq(1)
757 comb += perm_attr.wr_perm.eq(1)
758 with m.If(valid_ra):
759 m.d.sync += Display("DCACHE virt mode %d ra %x pte %x",
760 r0.req.virt_mode, ra, pte)
761 m.d.sync += Display(" perm ref=%d", perm_attr.reference)
762 m.d.sync += Display(" perm chg=%d", perm_attr.changed)
763 m.d.sync += Display(" perm noc=%d", perm_attr.nocache)
764 m.d.sync += Display(" perm prv=%d", perm_attr.priv)
765 m.d.sync += Display(" perm rdp=%d", perm_attr.rd_perm)
766 m.d.sync += Display(" perm wrp=%d", perm_attr.wr_perm)
767
768 def tlb_update(self, m, r0_valid, r0, dtlb_valid_bits, tlb_req_index,
769 tlb_hit_way, tlb_hit, tlb_plru_victim, tlb_tag_way,
770 dtlb_tags, tlb_pte_way, dtlb_ptes):
771
772 dtlb_valids = TLBValidBitsArray()
773
774 comb = m.d.comb
775 sync = m.d.sync
776
777 tlbie = Signal()
778 tlbwe = Signal()
779
780 comb += tlbie.eq(r0_valid & r0.tlbie)
781 comb += tlbwe.eq(r0_valid & r0.tlbld)
782
783 m.submodules.tlb_update = d = DTLBUpdate()
784 with m.If(tlbie & r0.doall):
785 # clear all valid bits at once
786 for i in range(TLB_SET_SIZE):
787 sync += dtlb_valid_bits[i].eq(0)
788 with m.If(d.updated):
789 sync += dtlb_tags[tlb_req_index].eq(d.tb_out)
790 sync += dtlb_ptes[tlb_req_index].eq(d.pb_out)
791 with m.If(d.v_updated):
792 sync += dtlb_valid_bits[tlb_req_index].eq(d.db_out)
793
794 comb += d.dv.eq(dtlb_valid_bits[tlb_req_index])
795
796 comb += d.tlbie.eq(tlbie)
797 comb += d.tlbwe.eq(tlbwe)
798 comb += d.doall.eq(r0.doall)
799 comb += d.tlb_hit.eq(tlb_hit)
800 comb += d.tlb_hit_way.eq(tlb_hit_way)
801 comb += d.tlb_tag_way.eq(tlb_tag_way)
802 comb += d.tlb_pte_way.eq(tlb_pte_way)
803 comb += d.tlb_req_index.eq(tlb_req_index)
804
805 with m.If(tlb_hit):
806 comb += d.repl_way.eq(tlb_hit_way)
807 with m.Else():
808 comb += d.repl_way.eq(tlb_plru_victim[tlb_req_index])
809 comb += d.eatag.eq(r0.req.addr[TLB_LG_PGSZ + TLB_SET_BITS:64])
810 comb += d.pte_data.eq(r0.req.data)
811
812 def maybe_plrus(self, m, r1, plru_victim):
813 """Generate PLRUs
814 """
815 comb = m.d.comb
816 sync = m.d.sync
817
818 if TLB_NUM_WAYS == 0:
819 return
820
821 for i in range(NUM_LINES):
822 # PLRU interface
823 plru = PLRU(WAY_BITS)
824 setattr(m.submodules, "plru%d" % i, plru)
825 plru_acc_en = Signal()
826
827 comb += plru_acc_en.eq(r1.cache_hit & (r1.hit_index == i))
828 comb += plru.acc_en.eq(plru_acc_en)
829 comb += plru.acc_i.eq(r1.hit_way)
830 comb += plru_victim[i].eq(plru.lru_o)
831
832 def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set, cache_tags):
833 """Cache tag RAM read port
834 """
835 comb = m.d.comb
836 sync = m.d.sync
837 m_in, d_in = self.m_in, self.d_in
838
839 index = Signal(INDEX_BITS)
840
841 with m.If(r0_stall):
842 comb += index.eq(req_index)
843 with m.Elif(m_in.valid):
844 comb += index.eq(get_index(m_in.addr))
845 with m.Else():
846 comb += index.eq(get_index(d_in.addr))
847 sync += cache_tag_set.eq(cache_tags[index])
848
849 def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
850 r0_valid, r1, cache_valids, replace_way,
851 use_forward1_next, use_forward2_next,
852 req_hit_way, plru_victim, rc_ok, perm_attr,
853 valid_ra, perm_ok, access_ok, req_op, req_go,
854 tlb_pte_way,
855 tlb_hit, tlb_hit_way, tlb_valid_way, cache_tag_set,
856 cancel_store, req_same_tag, r0_stall, early_req_row):
857 """Cache request parsing and hit detection
858 """
859
860 comb = m.d.comb
861 m_in, d_in = self.m_in, self.d_in
862
863 is_hit = Signal()
864 hit_way = Signal(WAY_BITS)
865 op = Signal(Op)
866 opsel = Signal(3)
867 go = Signal()
868 nc = Signal()
869 hit_set = Array(Signal(name="hit_set_%d" % i) \
870 for i in range(TLB_NUM_WAYS))
871 cache_valid_idx = Signal(NUM_WAYS)
872
873 # Extract line, row and tag from request
874 comb += req_index.eq(get_index(r0.req.addr))
875 comb += req_row.eq(get_row(r0.req.addr))
876 comb += req_tag.eq(get_tag(ra))
877
878 if False: # display on comb is a bit... busy.
879 comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
880 r0.req.addr, ra, req_index, req_tag, req_row)
881
882 comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
883 comb += cache_valid_idx.eq(cache_valids[req_index])
884
885 m.submodules.dcache_pend = dc = DCachePendingHit(tlb_pte_way,
886 tlb_valid_way, tlb_hit_way,
887 cache_valid_idx, cache_tag_set,
888 r0.req.addr,
889 hit_set)
890
891 comb += dc.tlb_hit.eq(tlb_hit)
892 comb += dc.reload_tag.eq(r1.reload_tag)
893 comb += dc.virt_mode.eq(r0.req.virt_mode)
894 comb += dc.go.eq(go)
895 comb += dc.req_index.eq(req_index)
896 comb += is_hit.eq(dc.is_hit)
897 comb += hit_way.eq(dc.hit_way)
898 comb += req_same_tag.eq(dc.rel_match)
899
900 # See if the request matches the line currently being reloaded
901 with m.If((r1.state == State.RELOAD_WAIT_ACK) &
902 (req_index == r1.store_index) & req_same_tag):
903 # For a store, consider this a hit even if the row isn't
904 # valid since it will be by the time we perform the store.
905 # For a load, check the appropriate row valid bit.
906 rrow = Signal(ROW_LINE_BITS)
907 comb += rrow.eq(req_row)
908 valid = r1.rows_valid[rrow]
909 comb += is_hit.eq((~r0.req.load) | valid)
910 comb += hit_way.eq(replace_way)
911
912 # Whether to use forwarded data for a load or not
913 with m.If((get_row(r1.req.real_addr) == req_row) &
914 (r1.req.hit_way == hit_way)):
915 # Only need to consider r1.write_bram here, since if we
916 # are writing refill data here, then we don't have a
917 # cache hit this cycle on the line being refilled.
918 # (There is the possibility that the load following the
919 # load miss that started the refill could be to the old
920 # contents of the victim line, since it is a couple of
921 # cycles after the refill starts before we see the updated
922 # cache tag. In that case we don't use the bypass.)
923 comb += use_forward1_next.eq(r1.write_bram)
924 with m.If((r1.forward_row1 == req_row) & (r1.forward_way1 == hit_way)):
925 comb += use_forward2_next.eq(r1.forward_valid1)
926
927 # The way that matched on a hit
928 comb += req_hit_way.eq(hit_way)
929
930 # The way to replace on a miss
931 with m.If(r1.write_tag):
932 comb += replace_way.eq(plru_victim[r1.store_index])
933 with m.Else():
934 comb += replace_way.eq(r1.store_way)
935
936 # work out whether we have permission for this access
937 # NB we don't yet implement AMR, thus no KUAP
938 comb += rc_ok.eq(perm_attr.reference
939 & (r0.req.load | perm_attr.changed))
940 comb += perm_ok.eq((r0.req.priv_mode | (~perm_attr.priv)) &
941 (perm_attr.wr_perm |
942 (r0.req.load & perm_attr.rd_perm)))
943 comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
944 # Combine the request and cache hit status to decide what
945 # operation needs to be done
946 comb += nc.eq(r0.req.nc | perm_attr.nocache)
947 comb += op.eq(Op.OP_NONE)
948 with m.If(go):
949 with m.If(~access_ok):
950 m.d.sync += Display("DCACHE access fail valid_ra=%d p=%d rc=%d",
951 valid_ra, perm_ok, rc_ok)
952 comb += op.eq(Op.OP_BAD)
953 with m.Elif(cancel_store):
954 m.d.sync += Display("DCACHE cancel store")
955 comb += op.eq(Op.OP_STCX_FAIL)
956 with m.Else():
957 m.d.sync += Display("DCACHE valid_ra=%d nc=%d ld=%d",
958 valid_ra, nc, r0.req.load)
959 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
960 with m.Switch(opsel):
961 with m.Case(0b101): comb += op.eq(Op.OP_LOAD_HIT)
962 with m.Case(0b100): comb += op.eq(Op.OP_LOAD_MISS)
963 with m.Case(0b110): comb += op.eq(Op.OP_LOAD_NC)
964 with m.Case(0b001): comb += op.eq(Op.OP_STORE_HIT)
965 with m.Case(0b000): comb += op.eq(Op.OP_STORE_MISS)
966 with m.Case(0b010): comb += op.eq(Op.OP_STORE_MISS)
967 with m.Case(0b011): comb += op.eq(Op.OP_BAD)
968 with m.Case(0b111): comb += op.eq(Op.OP_BAD)
969 comb += req_op.eq(op)
970 comb += req_go.eq(go)
971
972 # Version of the row number that is valid one cycle earlier
973 # in the cases where we need to read the cache data BRAM.
974 # If we're stalling then we need to keep reading the last
975 # row requested.
976 with m.If(~r0_stall):
977 with m.If(m_in.valid):
978 comb += early_req_row.eq(get_row(m_in.addr))
979 with m.Else():
980 comb += early_req_row.eq(get_row(d_in.addr))
981 with m.Else():
982 comb += early_req_row.eq(req_row)
983
984 def reservation_comb(self, m, cancel_store, set_rsrv, clear_rsrv,
985 r0_valid, r0, reservation):
986 """Handle load-with-reservation and store-conditional instructions
987 """
988 comb = m.d.comb
989
990 with m.If(r0_valid & r0.req.reserve):
991 # XXX generate alignment interrupt if address
992 # is not aligned XXX or if r0.req.nc = '1'
993 with m.If(r0.req.load):
994 comb += set_rsrv.eq(r0.req.atomic_last) # load with reservation
995 with m.Else():
996 comb += clear_rsrv.eq(r0.req.atomic_last) # store conditional
997 with m.If((~reservation.valid) |
998 (r0.req.addr[LINE_OFF_BITS:64] != reservation.addr)):
999 comb += cancel_store.eq(1)
1000
1001 def reservation_reg(self, m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1002 reservation, r0):
1003
1004 comb = m.d.comb
1005 sync = m.d.sync
1006
1007 with m.If(r0_valid & access_ok):
1008 with m.If(clear_rsrv):
1009 sync += reservation.valid.eq(0)
1010 with m.Elif(set_rsrv):
1011 sync += reservation.valid.eq(1)
1012 sync += reservation.addr.eq(r0.req.addr[LINE_OFF_BITS:64])
1013
1014 def writeback_control(self, m, r1, cache_out_row):
1015 """Return data for loads & completion control logic
1016 """
1017 comb = m.d.comb
1018 sync = m.d.sync
1019 d_out, m_out = self.d_out, self.m_out
1020
1021 data_out = Signal(64)
1022 data_fwd = Signal(64)
1023
1024 # Use the bypass if are reading the row that was
1025 # written 1 or 2 cycles ago, including for the
1026 # slow_valid = 1 case (i.e. completing a load
1027 # miss or a non-cacheable load).
1028 with m.If(r1.use_forward1):
1029 comb += data_fwd.eq(r1.forward_data1)
1030 with m.Else():
1031 comb += data_fwd.eq(r1.forward_data2)
1032
1033 comb += data_out.eq(cache_out_row)
1034
1035 for i in range(8):
1036 with m.If(r1.forward_sel[i]):
1037 dsel = data_fwd.word_select(i, 8)
1038 comb += data_out.word_select(i, 8).eq(dsel)
1039
1040 comb += d_out.valid.eq(r1.ls_valid)
1041 comb += d_out.data.eq(data_out)
1042 comb += d_out.store_done.eq(~r1.stcx_fail)
1043 comb += d_out.error.eq(r1.ls_error)
1044 comb += d_out.cache_paradox.eq(r1.cache_paradox)
1045
1046 # Outputs to MMU
1047 comb += m_out.done.eq(r1.mmu_done)
1048 comb += m_out.err.eq(r1.mmu_error)
1049 comb += m_out.data.eq(data_out)
1050
1051 # We have a valid load or store hit or we just completed
1052 # a slow op such as a load miss, a NC load or a store
1053 #
1054 # Note: the load hit is delayed by one cycle. However it
1055 # can still not collide with r.slow_valid (well unless I
1056 # miscalculated) because slow_valid can only be set on a
1057 # subsequent request and not on its first cycle (the state
1058 # machine must have advanced), which makes slow_valid
1059 # at least 2 cycles from the previous hit_load_valid.
1060
1061 # Sanity: Only one of these must be set in any given cycle
1062
1063 if False: # TODO: need Display to get this to work
1064 assert (r1.slow_valid & r1.stcx_fail) != 1, \
1065 "unexpected slow_valid collision with stcx_fail"
1066
1067 assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1, \
1068 "unexpected hit_load_delayed collision with slow_valid"
1069
1070 with m.If(~r1.mmu_req):
1071 # Request came from loadstore1...
1072 # Load hit case is the standard path
1073 with m.If(r1.hit_load_valid):
1074 sync += Display("completing load hit data=%x", data_out)
1075
1076 # error cases complete without stalling
1077 with m.If(r1.ls_error):
1078 sync += Display("completing ld/st with error")
1079
1080 # Slow ops (load miss, NC, stores)
1081 with m.If(r1.slow_valid):
1082 sync += Display("completing store or load miss adr=%x data=%x",
1083 r1.req.real_addr, data_out)
1084
1085 with m.Else():
1086 # Request came from MMU
1087 with m.If(r1.hit_load_valid):
1088 sync += Display("completing load hit to MMU, data=%x",
1089 m_out.data)
1090 # error cases complete without stalling
1091 with m.If(r1.mmu_error):
1092 sync += Display("combpleting MMU ld with error")
1093
1094 # Slow ops (i.e. load miss)
1095 with m.If(r1.slow_valid):
1096 sync += Display("completing MMU load miss, adr=%x data=%x",
1097 r1.req.real_addr, m_out.data)
1098
1099 def rams(self, m, r1, early_req_row, cache_out_row, replace_way):
1100 """rams
1101 Generate a cache RAM for each way. This handles the normal
1102 reads, writes from reloads and the special store-hit update
1103 path as well.
1104
1105 Note: the BRAMs have an extra read buffer, meaning the output
1106 is pipelined an extra cycle. This differs from the
1107 icache. The writeback logic needs to take that into
1108 account by using 1-cycle delayed signals for load hits.
1109 """
1110 comb = m.d.comb
1111 wb_in = self.wb_in
1112
1113 for i in range(NUM_WAYS):
1114 do_read = Signal(name="do_rd%d" % i)
1115 rd_addr = Signal(ROW_BITS, name="rd_addr_%d" % i)
1116 do_write = Signal(name="do_wr%d" % i)
1117 wr_addr = Signal(ROW_BITS, name="wr_addr_%d" % i)
1118 wr_data = Signal(WB_DATA_BITS, name="din_%d" % i)
1119 wr_sel = Signal(ROW_SIZE)
1120 wr_sel_m = Signal(ROW_SIZE)
1121 _d_out = Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
1122
1123 way = CacheRam(ROW_BITS, WB_DATA_BITS, ADD_BUF=True)
1124 setattr(m.submodules, "cacheram_%d" % i, way)
1125
1126 comb += way.rd_en.eq(do_read)
1127 comb += way.rd_addr.eq(rd_addr)
1128 comb += _d_out.eq(way.rd_data_o)
1129 comb += way.wr_sel.eq(wr_sel_m)
1130 comb += way.wr_addr.eq(wr_addr)
1131 comb += way.wr_data.eq(wr_data)
1132
1133 # Cache hit reads
1134 comb += do_read.eq(1)
1135 comb += rd_addr.eq(early_req_row)
1136 with m.If(r1.hit_way == i):
1137 comb += cache_out_row.eq(_d_out)
1138
1139 # Write mux:
1140 #
1141 # Defaults to wishbone read responses (cache refill)
1142 #
1143 # For timing, the mux on wr_data/sel/addr is not
1144 # dependent on anything other than the current state.
1145
1146 with m.If(r1.write_bram):
1147 # Write store data to BRAM. This happens one
1148 # cycle after the store is in r0.
1149 comb += wr_data.eq(r1.req.data)
1150 comb += wr_sel.eq(r1.req.byte_sel)
1151 comb += wr_addr.eq(get_row(r1.req.real_addr))
1152
1153 with m.If(i == r1.req.hit_way):
1154 comb += do_write.eq(1)
1155 with m.Else():
1156 # Otherwise, we might be doing a reload or a DCBZ
1157 with m.If(r1.dcbz):
1158 comb += wr_data.eq(0)
1159 with m.Else():
1160 comb += wr_data.eq(wb_in.dat)
1161 comb += wr_addr.eq(r1.store_row)
1162 comb += wr_sel.eq(~0) # all 1s
1163
1164 with m.If((r1.state == State.RELOAD_WAIT_ACK)
1165 & wb_in.ack & (replace_way == i)):
1166 comb += do_write.eq(1)
1167
1168 # Mask write selects with do_write since BRAM
1169 # doesn't have a global write-enable
1170 with m.If(do_write):
1171 comb += wr_sel_m.eq(wr_sel)
1172
1173 # Cache hit synchronous machine for the easy case.
1174 # This handles load hits.
1175 # It also handles error cases (TLB miss, cache paradox)
1176 def dcache_fast_hit(self, m, req_op, r0_valid, r0, r1,
1177 req_hit_way, req_index, req_tag, access_ok,
1178 tlb_hit, tlb_hit_way, tlb_req_index):
1179
1180 comb = m.d.comb
1181 sync = m.d.sync
1182
1183 with m.If(req_op != Op.OP_NONE):
1184 sync += Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1185 req_op, r0.req.addr, r0.req.nc,
1186 req_index, req_tag, req_hit_way)
1187
1188 with m.If(r0_valid):
1189 sync += r1.mmu_req.eq(r0.mmu_req)
1190
1191 # Fast path for load/store hits.
1192 # Set signals for the writeback controls.
1193 sync += r1.hit_way.eq(req_hit_way)
1194 sync += r1.hit_index.eq(req_index)
1195
1196 with m.If(req_op == Op.OP_LOAD_HIT):
1197 sync += r1.hit_load_valid.eq(1)
1198 with m.Else():
1199 sync += r1.hit_load_valid.eq(0)
1200
1201 with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STORE_HIT)):
1202 sync += r1.cache_hit.eq(1)
1203 with m.Else():
1204 sync += r1.cache_hit.eq(0)
1205
1206 with m.If(req_op == Op.OP_BAD):
1207 # Display(f"Signalling ld/st error valid_ra={valid_ra}"
1208 # f"rc_ok={rc_ok} perm_ok={perm_ok}"
1209 sync += r1.ls_error.eq(~r0.mmu_req)
1210 sync += r1.mmu_error.eq(r0.mmu_req)
1211 sync += r1.cache_paradox.eq(access_ok)
1212
1213 with m.Else():
1214 sync += r1.ls_error.eq(0)
1215 sync += r1.mmu_error.eq(0)
1216 sync += r1.cache_paradox.eq(0)
1217
1218 with m.If(req_op == Op.OP_STCX_FAIL):
1219 sync += r1.stcx_fail.eq(1)
1220 with m.Else():
1221 sync += r1.stcx_fail.eq(0)
1222
1223 # Record TLB hit information for updating TLB PLRU
1224 sync += r1.tlb_hit.eq(tlb_hit)
1225 sync += r1.tlb_hit_way.eq(tlb_hit_way)
1226 sync += r1.tlb_hit_index.eq(tlb_req_index)
1227
1228 # Memory accesses are handled by this state machine:
1229 #
1230 # * Cache load miss/reload (in conjunction with "rams")
1231 # * Load hits for non-cachable forms
1232 # * Stores (the collision case is handled in "rams")
1233 #
1234 # All wishbone requests generation is done here.
1235 # This machine operates at stage 1.
1236 def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
1237 cache_valids, r0, replace_way,
1238 req_hit_way, req_same_tag,
1239 r0_valid, req_op, cache_tags, req_go, ra):
1240
1241 comb = m.d.comb
1242 sync = m.d.sync
1243 wb_in = self.wb_in
1244 d_in = self.d_in
1245
1246 req = MemAccessRequest("mreq_ds")
1247
1248 req_row = Signal(ROW_BITS)
1249 req_idx = Signal(INDEX_BITS)
1250 req_tag = Signal(TAG_BITS)
1251 comb += req_idx.eq(get_index(req.real_addr))
1252 comb += req_row.eq(get_row(req.real_addr))
1253 comb += req_tag.eq(get_tag(req.real_addr))
1254
1255 sync += r1.use_forward1.eq(use_forward1_next)
1256 sync += r1.forward_sel.eq(0)
1257
1258 with m.If(use_forward1_next):
1259 sync += r1.forward_sel.eq(r1.req.byte_sel)
1260 with m.Elif(use_forward2_next):
1261 sync += r1.forward_sel.eq(r1.forward_sel1)
1262
1263 sync += r1.forward_data2.eq(r1.forward_data1)
1264 with m.If(r1.write_bram):
1265 sync += r1.forward_data1.eq(r1.req.data)
1266 sync += r1.forward_sel1.eq(r1.req.byte_sel)
1267 sync += r1.forward_way1.eq(r1.req.hit_way)
1268 sync += r1.forward_row1.eq(get_row(r1.req.real_addr))
1269 sync += r1.forward_valid1.eq(1)
1270 with m.Else():
1271 with m.If(r1.dcbz):
1272 sync += r1.forward_data1.eq(0)
1273 with m.Else():
1274 sync += r1.forward_data1.eq(wb_in.dat)
1275 sync += r1.forward_sel1.eq(~0) # all 1s
1276 sync += r1.forward_way1.eq(replace_way)
1277 sync += r1.forward_row1.eq(r1.store_row)
1278 sync += r1.forward_valid1.eq(0)
1279
1280 # One cycle pulses reset
1281 sync += r1.slow_valid.eq(0)
1282 sync += r1.write_bram.eq(0)
1283 sync += r1.inc_acks.eq(0)
1284 sync += r1.dec_acks.eq(0)
1285
1286 sync += r1.ls_valid.eq(0)
1287 # complete tlbies and TLB loads in the third cycle
1288 sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
1289
1290 with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STCX_FAIL)):
1291 with m.If(~r0.mmu_req):
1292 sync += r1.ls_valid.eq(1)
1293 with m.Else():
1294 sync += r1.mmu_done.eq(1)
1295
1296 with m.If(r1.write_tag):
1297 # Store new tag in selected way
1298 for i in range(NUM_WAYS):
1299 with m.If(i == replace_way):
1300 ct = Signal(TAG_RAM_WIDTH)
1301 comb += ct.eq(cache_tags[r1.store_index])
1302 """
1303 TODO: check this
1304 cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
1305 (TAG_WIDTH - 1 downto TAG_BITS => '0') & r1.reload_tag;
1306 """
1307 comb += ct.word_select(i, TAG_WIDTH).eq(r1.reload_tag)
1308 sync += cache_tags[r1.store_index].eq(ct)
1309 sync += r1.store_way.eq(replace_way)
1310 sync += r1.write_tag.eq(0)
1311
1312 # Take request from r1.req if there is one there,
1313 # else from req_op, ra, etc.
1314 with m.If(r1.full):
1315 comb += req.eq(r1.req)
1316 with m.Else():
1317 comb += req.op.eq(req_op)
1318 comb += req.valid.eq(req_go)
1319 comb += req.mmu_req.eq(r0.mmu_req)
1320 comb += req.dcbz.eq(r0.req.dcbz)
1321 comb += req.real_addr.eq(ra)
1322
1323 with m.If(r0.req.dcbz):
1324 # force data to 0 for dcbz
1325 comb += req.data.eq(0)
1326 with m.Elif(r0.d_valid):
1327 comb += req.data.eq(r0.req.data)
1328 with m.Else():
1329 comb += req.data.eq(d_in.data)
1330
1331 # Select all bytes for dcbz
1332 # and for cacheable loads
1333 with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc)):
1334 comb += req.byte_sel.eq(~0) # all 1s
1335 with m.Else():
1336 comb += req.byte_sel.eq(r0.req.byte_sel)
1337 comb += req.hit_way.eq(req_hit_way)
1338 comb += req.same_tag.eq(req_same_tag)
1339
1340 # Store the incoming request from r0,
1341 # if it is a slow request
1342 # Note that r1.full = 1 implies req_op = OP_NONE
1343 with m.If((req_op == Op.OP_LOAD_MISS)
1344 | (req_op == Op.OP_LOAD_NC)
1345 | (req_op == Op.OP_STORE_MISS)
1346 | (req_op == Op.OP_STORE_HIT)):
1347 sync += r1.req.eq(req)
1348 sync += r1.full.eq(1)
1349
1350 # Main state machine
1351 with m.Switch(r1.state):
1352
1353 with m.Case(State.IDLE):
1354 sync += r1.wb.adr.eq(req.real_addr[ROW_LINE_BITS:])
1355 sync += r1.wb.sel.eq(req.byte_sel)
1356 sync += r1.wb.dat.eq(req.data)
1357 sync += r1.dcbz.eq(req.dcbz)
1358
1359 # Keep track of our index and way
1360 # for subsequent stores.
1361 sync += r1.store_index.eq(req_idx)
1362 sync += r1.store_row.eq(req_row)
1363 sync += r1.end_row_ix.eq(get_row_of_line(req_row)-1)
1364 sync += r1.reload_tag.eq(req_tag)
1365 sync += r1.req.same_tag.eq(1)
1366
1367 with m.If(req.op == Op.OP_STORE_HIT):
1368 sync += r1.store_way.eq(req.hit_way)
1369
1370 # Reset per-row valid bits,
1371 # ready for handling OP_LOAD_MISS
1372 for i in range(ROW_PER_LINE):
1373 sync += r1.rows_valid[i].eq(0)
1374
1375 with m.If(req_op != Op.OP_NONE):
1376 sync += Display("cache op %d", req.op)
1377
1378 with m.Switch(req.op):
1379 with m.Case(Op.OP_LOAD_HIT):
1380 # stay in IDLE state
1381 pass
1382
1383 with m.Case(Op.OP_LOAD_MISS):
1384 sync += Display("cache miss real addr: %x " \
1385 "idx: %x tag: %x",
1386 req.real_addr, req_row, req_tag)
1387
1388 # Start the wishbone cycle
1389 sync += r1.wb.we.eq(0)
1390 sync += r1.wb.cyc.eq(1)
1391 sync += r1.wb.stb.eq(1)
1392
1393 # Track that we had one request sent
1394 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1395 sync += r1.write_tag.eq(1)
1396
1397 with m.Case(Op.OP_LOAD_NC):
1398 sync += r1.wb.cyc.eq(1)
1399 sync += r1.wb.stb.eq(1)
1400 sync += r1.wb.we.eq(0)
1401 sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
1402
1403 with m.Case(Op.OP_STORE_HIT, Op.OP_STORE_MISS):
1404 with m.If(~req.dcbz):
1405 sync += r1.state.eq(State.STORE_WAIT_ACK)
1406 sync += r1.acks_pending.eq(1)
1407 sync += r1.full.eq(0)
1408 sync += r1.slow_valid.eq(1)
1409
1410 with m.If(~req.mmu_req):
1411 sync += r1.ls_valid.eq(1)
1412 with m.Else():
1413 sync += r1.mmu_done.eq(1)
1414
1415 with m.If(req.op == Op.OP_STORE_HIT):
1416 sync += r1.write_bram.eq(1)
1417 with m.Else():
1418 # dcbz is handled much like a load miss except
1419 # that we are writing to memory instead of reading
1420 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1421
1422 with m.If(req.op == Op.OP_STORE_MISS):
1423 sync += r1.write_tag.eq(1)
1424
1425 sync += r1.wb.we.eq(1)
1426 sync += r1.wb.cyc.eq(1)
1427 sync += r1.wb.stb.eq(1)
1428
1429 # OP_NONE and OP_BAD do nothing
1430 # OP_BAD & OP_STCX_FAIL were
1431 # handled above already
1432 with m.Case(Op.OP_NONE):
1433 pass
1434 with m.Case(Op.OP_BAD):
1435 pass
1436 with m.Case(Op.OP_STCX_FAIL):
1437 pass
1438
1439 with m.Case(State.RELOAD_WAIT_ACK):
1440 ld_stbs_done = Signal()
1441 # Requests are all sent if stb is 0
1442 comb += ld_stbs_done.eq(~r1.wb.stb)
1443
1444 # If we are still sending requests, was one accepted?
1445 with m.If((~wb_in.stall) & r1.wb.stb):
1446 # That was the last word? We are done sending.
1447 # Clear stb and set ld_stbs_done so we can handle an
1448 # eventual last ack on the same cycle.
1449 # sigh - reconstruct wb adr with 3 extra 0s at front
1450 wb_adr = Cat(Const(0, ROW_OFF_BITS), r1.wb.adr)
1451 with m.If(is_last_row_addr(wb_adr, r1.end_row_ix)):
1452 sync += r1.wb.stb.eq(0)
1453 comb += ld_stbs_done.eq(1)
1454
1455 # Calculate the next row address in the current cache line
1456 row = Signal(LINE_OFF_BITS-ROW_OFF_BITS)
1457 comb += row.eq(r1.wb.adr)
1458 sync += r1.wb.adr[:LINE_OFF_BITS-ROW_OFF_BITS].eq(row+1)
1459
1460 # Incoming acks processing
1461 sync += r1.forward_valid1.eq(wb_in.ack)
1462 with m.If(wb_in.ack):
1463 srow = Signal(ROW_LINE_BITS)
1464 comb += srow.eq(r1.store_row)
1465 sync += r1.rows_valid[srow].eq(1)
1466
1467 # If this is the data we were looking for,
1468 # we can complete the request next cycle.
1469 # Compare the whole address in case the
1470 # request in r1.req is not the one that
1471 # started this refill.
1472 with m.If(req.valid & r1.req.same_tag &
1473 ((r1.dcbz & r1.req.dcbz) |
1474 (~r1.dcbz & (r1.req.op == Op.OP_LOAD_MISS))) &
1475 (r1.store_row == get_row(req.real_addr))):
1476 sync += r1.full.eq(0)
1477 sync += r1.slow_valid.eq(1)
1478 with m.If(~r1.mmu_req):
1479 sync += r1.ls_valid.eq(1)
1480 with m.Else():
1481 sync += r1.mmu_done.eq(1)
1482 sync += r1.forward_sel.eq(~0) # all 1s
1483 sync += r1.use_forward1.eq(1)
1484
1485 # Check for completion
1486 with m.If(ld_stbs_done & is_last_row(r1.store_row,
1487 r1.end_row_ix)):
1488 # Complete wishbone cycle
1489 sync += r1.wb.cyc.eq(0)
1490
1491 # Cache line is now valid
1492 cv = Signal(INDEX_BITS)
1493 comb += cv.eq(cache_valids[r1.store_index])
1494 comb += cv.bit_select(r1.store_way, 1).eq(1)
1495 sync += cache_valids[r1.store_index].eq(cv)
1496
1497 sync += r1.state.eq(State.IDLE)
1498
1499 # Increment store row counter
1500 sync += r1.store_row.eq(next_row(r1.store_row))
1501
1502 with m.Case(State.STORE_WAIT_ACK):
1503 st_stbs_done = Signal()
1504 acks = Signal(3)
1505 adjust_acks = Signal(3)
1506
1507 comb += st_stbs_done.eq(~r1.wb.stb)
1508 comb += acks.eq(r1.acks_pending)
1509
1510 with m.If(r1.inc_acks != r1.dec_acks):
1511 with m.If(r1.inc_acks):
1512 comb += adjust_acks.eq(acks + 1)
1513 with m.Else():
1514 comb += adjust_acks.eq(acks - 1)
1515 with m.Else():
1516 comb += adjust_acks.eq(acks)
1517
1518 sync += r1.acks_pending.eq(adjust_acks)
1519
1520 # Clear stb when slave accepted request
1521 with m.If(~wb_in.stall):
1522 # See if there is another store waiting
1523 # to be done which is in the same real page.
1524 with m.If(req.valid):
1525 _ra = req.real_addr[ROW_LINE_BITS:SET_SIZE_BITS]
1526 sync += r1.wb.adr[0:SET_SIZE_BITS].eq(_ra)
1527 sync += r1.wb.dat.eq(req.data)
1528 sync += r1.wb.sel.eq(req.byte_sel)
1529
1530 with m.If((adjust_acks < 7) & req.same_tag &
1531 ((req.op == Op.OP_STORE_MISS)
1532 | (req.op == Op.OP_STORE_HIT))):
1533 sync += r1.wb.stb.eq(1)
1534 comb += st_stbs_done.eq(0)
1535
1536 with m.If(req.op == Op.OP_STORE_HIT):
1537 sync += r1.write_bram.eq(1)
1538 sync += r1.full.eq(0)
1539 sync += r1.slow_valid.eq(1)
1540
1541 # Store requests never come from the MMU
1542 sync += r1.ls_valid.eq(1)
1543 comb += st_stbs_done.eq(0)
1544 sync += r1.inc_acks.eq(1)
1545 with m.Else():
1546 sync += r1.wb.stb.eq(0)
1547 comb += st_stbs_done.eq(1)
1548
1549 # Got ack ? See if complete.
1550 with m.If(wb_in.ack):
1551 with m.If(st_stbs_done & (adjust_acks == 1)):
1552 sync += r1.state.eq(State.IDLE)
1553 sync += r1.wb.cyc.eq(0)
1554 sync += r1.wb.stb.eq(0)
1555 sync += r1.dec_acks.eq(1)
1556
1557 with m.Case(State.NC_LOAD_WAIT_ACK):
1558 # Clear stb when slave accepted request
1559 with m.If(~wb_in.stall):
1560 sync += r1.wb.stb.eq(0)
1561
1562 # Got ack ? complete.
1563 with m.If(wb_in.ack):
1564 sync += r1.state.eq(State.IDLE)
1565 sync += r1.full.eq(0)
1566 sync += r1.slow_valid.eq(1)
1567
1568 with m.If(~r1.mmu_req):
1569 sync += r1.ls_valid.eq(1)
1570 with m.Else():
1571 sync += r1.mmu_done.eq(1)
1572
1573 sync += r1.forward_sel.eq(~0) # all 1s
1574 sync += r1.use_forward1.eq(1)
1575 sync += r1.wb.cyc.eq(0)
1576 sync += r1.wb.stb.eq(0)
1577
1578 def dcache_log(self, m, r1, valid_ra, tlb_hit_way, stall_out):
1579
1580 sync = m.d.sync
1581 d_out, wb_in, log_out = self.d_out, self.wb_in, self.log_out
1582
1583 sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit_way[:3],
1584 stall_out, req_op[:3], d_out.valid, d_out.error,
1585 r1.wb.cyc, r1.wb.stb, wb_in.ack, wb_in.stall,
1586 r1.real_adr[3:6]))
1587
1588 def elaborate(self, platform):
1589
1590 m = Module()
1591 comb = m.d.comb
1592 d_in = self.d_in
1593
1594 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1595 cache_tags = CacheTagArray()
1596 cache_tag_set = Signal(TAG_RAM_WIDTH)
1597 cache_valids = CacheValidBitsArray()
1598
1599 # TODO attribute ram_style : string;
1600 # TODO attribute ram_style of cache_tags : signal is "distributed";
1601
1602 """note: these are passed to nmigen.hdl.Memory as "attributes".
1603 don't know how, just that they are.
1604 """
1605 dtlb_valid_bits = TLBValidBitsArray()
1606 dtlb_tags = TLBTagsArray()
1607 dtlb_ptes = TLBPtesArray()
1608 # TODO attribute ram_style of
1609 # dtlb_tags : signal is "distributed";
1610 # TODO attribute ram_style of
1611 # dtlb_ptes : signal is "distributed";
1612
1613 r0 = RegStage0("r0")
1614 r0_full = Signal()
1615
1616 r1 = RegStage1("r1")
1617
1618 reservation = Reservation()
1619
1620 # Async signals on incoming request
1621 req_index = Signal(INDEX_BITS)
1622 req_row = Signal(ROW_BITS)
1623 req_hit_way = Signal(WAY_BITS)
1624 req_tag = Signal(TAG_BITS)
1625 req_op = Signal(Op)
1626 req_data = Signal(64)
1627 req_same_tag = Signal()
1628 req_go = Signal()
1629
1630 early_req_row = Signal(ROW_BITS)
1631
1632 cancel_store = Signal()
1633 set_rsrv = Signal()
1634 clear_rsrv = Signal()
1635
1636 r0_valid = Signal()
1637 r0_stall = Signal()
1638
1639 use_forward1_next = Signal()
1640 use_forward2_next = Signal()
1641
1642 cache_out_row = Signal(WB_DATA_BITS)
1643
1644 plru_victim = PLRUOut()
1645 replace_way = Signal(WAY_BITS)
1646
1647 # Wishbone read/write/cache write formatting signals
1648 bus_sel = Signal(8)
1649
1650 # TLB signals
1651 tlb_tag_way = Signal(TLB_TAG_WAY_BITS)
1652 tlb_pte_way = Signal(TLB_PTE_WAY_BITS)
1653 tlb_valid_way = Signal(TLB_NUM_WAYS)
1654 tlb_req_index = Signal(TLB_SET_BITS)
1655 tlb_hit = Signal()
1656 tlb_hit_way = Signal(TLB_WAY_BITS)
1657 pte = Signal(TLB_PTE_BITS)
1658 ra = Signal(REAL_ADDR_BITS)
1659 valid_ra = Signal()
1660 perm_attr = PermAttr("dc_perms")
1661 rc_ok = Signal()
1662 perm_ok = Signal()
1663 access_ok = Signal()
1664
1665 tlb_plru_victim = TLBPLRUOut()
1666
1667 # we don't yet handle collisions between loadstore1 requests
1668 # and MMU requests
1669 comb += self.m_out.stall.eq(0)
1670
1671 # Hold off the request in r0 when r1 has an uncompleted request
1672 comb += r0_stall.eq(r0_full & (r1.full | d_in.hold))
1673 comb += r0_valid.eq(r0_full & ~r1.full & ~d_in.hold)
1674 comb += self.stall_out.eq(r0_stall)
1675
1676 # Wire up wishbone request latch out of stage 1
1677 comb += self.wb_out.eq(r1.wb)
1678
1679 # deal with litex not doing wishbone pipeline mode
1680 # XXX in wrong way. FIFOs are needed in the SRAM test
1681 # so that stb/ack match up
1682 comb += self.wb_in.stall.eq(self.wb_out.cyc & ~self.wb_in.ack)
1683
1684 # call sub-functions putting everything together, using shared
1685 # signals established above
1686 self.stage_0(m, r0, r1, r0_full)
1687 self.tlb_read(m, r0_stall, tlb_valid_way,
1688 tlb_tag_way, tlb_pte_way, dtlb_valid_bits,
1689 dtlb_tags, dtlb_ptes)
1690 self.tlb_search(m, tlb_req_index, r0, r0_valid,
1691 tlb_valid_way, tlb_tag_way, tlb_hit_way,
1692 tlb_pte_way, pte, tlb_hit, valid_ra, perm_attr, ra)
1693 self.tlb_update(m, r0_valid, r0, dtlb_valid_bits, tlb_req_index,
1694 tlb_hit_way, tlb_hit, tlb_plru_victim, tlb_tag_way,
1695 dtlb_tags, tlb_pte_way, dtlb_ptes)
1696 self.maybe_plrus(m, r1, plru_victim)
1697 self.maybe_tlb_plrus(m, r1, tlb_plru_victim)
1698 self.cache_tag_read(m, r0_stall, req_index, cache_tag_set, cache_tags)
1699 self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
1700 r0_valid, r1, cache_valids, replace_way,
1701 use_forward1_next, use_forward2_next,
1702 req_hit_way, plru_victim, rc_ok, perm_attr,
1703 valid_ra, perm_ok, access_ok, req_op, req_go,
1704 tlb_pte_way,
1705 tlb_hit, tlb_hit_way, tlb_valid_way, cache_tag_set,
1706 cancel_store, req_same_tag, r0_stall, early_req_row)
1707 self.reservation_comb(m, cancel_store, set_rsrv, clear_rsrv,
1708 r0_valid, r0, reservation)
1709 self.reservation_reg(m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1710 reservation, r0)
1711 self.writeback_control(m, r1, cache_out_row)
1712 self.rams(m, r1, early_req_row, cache_out_row, replace_way)
1713 self.dcache_fast_hit(m, req_op, r0_valid, r0, r1,
1714 req_hit_way, req_index, req_tag, access_ok,
1715 tlb_hit, tlb_hit_way, tlb_req_index)
1716 self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
1717 cache_valids, r0, replace_way,
1718 req_hit_way, req_same_tag,
1719 r0_valid, req_op, cache_tags, req_go, ra)
1720 #self.dcache_log(m, r1, valid_ra, tlb_hit_way, stall_out)
1721
1722 return m
1723
1724
1725 if __name__ == '__main__':
1726 dut = DCache()
1727 vl = rtlil.convert(dut, ports=[])
1728 with open("test_dcache.il", "w") as f:
1729 f.write(vl)