Allow the formal engine to perform a same-cycle result in the ALU
[soc.git] / src / soc / experiment / dcache.py
1 #!/usr/bin/env python3
2 #
3 # Copyright (C) 2020,2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
4 # Copyright (C) 2020 Cole Poirier
5 # Copyright (C) 2020,2021 Cesar Strauss
6 # Copyright (C) 2021 Tobias Platen
7 #
8 # Original dcache.vhdl Copyright of its authors and licensed
9 # by IBM under CC-BY 4.0
10 # https://github.com/antonblanchard/microwatt
11 #
12 # Conversion to nmigen funded by NLnet and NGI POINTER under EU Grants
13 # 871528 and 957073, under the LGPL-v3+ License
14
15 """DCache
16
17 based on Anton Blanchard microwatt dcache.vhdl
18
19 note that the microwatt dcache wishbone interface expects "stall".
20 for simplicity at the moment this is hard-coded to cyc & ~ack.
21 see WB4 spec, p84, section 5.2.1
22
23 IMPORTANT: for store, the data is sampled the cycle AFTER the "valid"
24 is raised. sigh
25
26 Links:
27
28 * https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
29 * https://bugs.libre-soc.org/show_bug.cgi?id=469
30 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
31 (discussion about brams for ECP5)
32
33 """
34
35 import sys
36
37 from nmutil.gtkw import write_gtkw
38
39 sys.setrecursionlimit(1000000)
40
41 from enum import Enum, unique
42
43 from nmigen import (Module, Signal, Elaboratable, Cat, Repl, Array, Const,
44 Record, Memory)
45 from nmutil.util import Display
46 from nmigen.lib.coding import Decoder
47
48 from copy import deepcopy
49 from random import randint, seed
50
51 from nmigen_soc.wishbone.bus import Interface
52
53 from nmigen.cli import main
54 from nmutil.iocontrol import RecordObject
55 from nmigen.utils import log2_int
56 from soc.experiment.mem_types import (LoadStore1ToDCacheType,
57 DCacheToLoadStore1Type,
58 MMUToDCacheType,
59 DCacheToMMUType)
60
61 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
62 WBAddrType, WBDataType, WBSelType,
63 WBMasterOut, WBSlaveOut,
64 WBMasterOutVector, WBSlaveOutVector,
65 WBIOMasterOut, WBIOSlaveOut)
66
67 from soc.experiment.cache_ram import CacheRam
68 from soc.experiment.plru import PLRU, PLRUs
69 #from nmutil.plru import PLRU, PLRUs
70
71 # for test
72 from soc.bus.sram import SRAM
73 from nmigen import Memory
74 from nmigen.cli import rtlil
75
76 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
77 # Also, check out the cxxsim nmigen branch, and latest yosys from git
78 from nmutil.sim_tmp_alternative import Simulator
79
80 from nmutil.util import wrap
81
82 LOG_LENGTH = 0 # Non-zero to enable log data collection
83
84 def ispow2(x):
85 return (1<<log2_int(x, False)) == x
86
87
88 class DCacheConfig:
89 def __init__(self, LINE_SIZE = 64, # Line size in bytes
90 NUM_LINES = 64, # Number of lines in a set
91 NUM_WAYS = 2, # Number of ways
92 TLB_SET_SIZE = 64, # L1 DTLB entries per set
93 TLB_NUM_WAYS = 2, # L1 DTLB number of sets
94 TLB_LG_PGSZ = 12): # L1 DTLB log_2(page_size)
95 self.LINE_SIZE = LINE_SIZE
96 self.NUM_LINES = NUM_LINES
97 self.NUM_WAYS = NUM_WAYS
98 self.TLB_SET_SIZE = TLB_SET_SIZE
99 self.TLB_NUM_WAYS = TLB_NUM_WAYS
100 self.TLB_LG_PGSZ = TLB_LG_PGSZ
101
102 # BRAM organisation: We never access more than
103 # -- WB_DATA_BITS at a time so to save
104 # -- resources we make the array only that wide, and
105 # -- use consecutive indices to make a cache "line"
106 # --
107 # -- ROW_SIZE is the width in bytes of the BRAM
108 # -- (based on WB, so 64-bits)
109 self.ROW_SIZE = WB_DATA_BITS // 8;
110
111 # ROW_PER_LINE is the number of row (wishbone
112 # transactions) in a line
113 self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
114
115 # BRAM_ROWS is the number of rows in BRAM needed
116 # to represent the full dcache
117 self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
118
119 print ("ROW_SIZE", self.ROW_SIZE)
120 print ("ROW_PER_LINE", self.ROW_PER_LINE)
121 print ("BRAM_ROWS", self.BRAM_ROWS)
122 print ("NUM_WAYS", self.NUM_WAYS)
123
124 # Bit fields counts in the address
125
126 # REAL_ADDR_BITS is the number of real address
127 # bits that we store
128 self.REAL_ADDR_BITS = 56
129
130 # ROW_BITS is the number of bits to select a row
131 self.ROW_BITS = log2_int(self.BRAM_ROWS)
132
133 # ROW_LINE_BITS is the number of bits to select
134 # a row within a line
135 self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
136
137 # LINE_OFF_BITS is the number of bits for
138 # the offset in a cache line
139 self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
140
141 # ROW_OFF_BITS is the number of bits for
142 # the offset in a row
143 self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
144
145 # INDEX_BITS is the number if bits to
146 # select a cache line
147 self.INDEX_BITS = log2_int(self.NUM_LINES)
148
149 # SET_SIZE_BITS is the log base 2 of the set size
150 self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
151
152 # TAG_BITS is the number of bits of
153 # the tag part of the address
154 self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
155
156 # TAG_WIDTH is the width in bits of each way of the tag RAM
157 self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
158
159 # WAY_BITS is the number of bits to select a way
160 self.WAY_BITS = log2_int(self.NUM_WAYS)
161
162 # Example of layout for 32 lines of 64 bytes:
163 layout = f"""\
164 DCache Layout:
165 |.. -----------------------| REAL_ADDR_BITS ({self.REAL_ADDR_BITS})
166 .. |--------------| SET_SIZE_BITS ({self.SET_SIZE_BITS})
167 .. tag |index| line |
168 .. | row | |
169 .. | |---| | ROW_LINE_BITS ({self.ROW_LINE_BITS})
170 .. | |--- - --| LINE_OFF_BITS ({self.LINE_OFF_BITS})
171 .. | |- --| ROW_OFF_BITS ({self.ROW_OFF_BITS})
172 .. |----- ---| | ROW_BITS ({self.ROW_BITS})
173 .. |-----| | INDEX_BITS ({self.INDEX_BITS})
174 .. --------| | TAG_BITS ({self.TAG_BITS})
175 """
176 print (layout)
177 print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
178 (self.TAG_BITS, self.INDEX_BITS, self.ROW_BITS,
179 self.ROW_OFF_BITS, self.LINE_OFF_BITS, self.ROW_LINE_BITS))
180 print ("index @: %d-%d" % (self.LINE_OFF_BITS, self.SET_SIZE_BITS))
181 print ("row @: %d-%d" % (self.LINE_OFF_BITS, self.ROW_OFF_BITS))
182 print ("tag @: %d-%d width %d" % (self.SET_SIZE_BITS,
183 self.REAL_ADDR_BITS, self.TAG_WIDTH))
184
185 self.TAG_RAM_WIDTH = self.TAG_WIDTH * self.NUM_WAYS
186
187 print ("TAG_RAM_WIDTH", self.TAG_RAM_WIDTH)
188 print (" TAG_WIDTH", self.TAG_WIDTH)
189 print (" NUM_WAYS", self.NUM_WAYS)
190 print (" NUM_LINES", self.NUM_LINES)
191
192 # L1 TLB
193 self.TLB_SET_BITS = log2_int(self.TLB_SET_SIZE)
194 self.TLB_WAY_BITS = log2_int(self.TLB_NUM_WAYS)
195 self.TLB_EA_TAG_BITS = 64 - (self.TLB_LG_PGSZ + self.TLB_SET_BITS)
196 self.TLB_TAG_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_EA_TAG_BITS
197 self.TLB_PTE_BITS = 64
198 self.TLB_PTE_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_PTE_BITS;
199
200 assert (self.LINE_SIZE % self.ROW_SIZE) == 0, \
201 "LINE_SIZE not multiple of ROW_SIZE"
202 assert ispow2(self.LINE_SIZE), "LINE_SIZE not power of 2"
203 assert ispow2(self.NUM_LINES), "NUM_LINES not power of 2"
204 assert ispow2(self.ROW_PER_LINE), "ROW_PER_LINE not power of 2"
205 assert self.ROW_BITS == \
206 (self.INDEX_BITS + self.ROW_LINE_BITS), \
207 "geometry bits don't add up"
208 assert (self.LINE_OFF_BITS == \
209 self.ROW_OFF_BITS + self.ROW_LINE_BITS), \
210 "geometry bits don't add up"
211 assert self.REAL_ADDR_BITS == \
212 (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS), \
213 "geometry bits don't add up"
214 assert self.REAL_ADDR_BITS == \
215 (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS), \
216 "geometry bits don't add up"
217 assert 64 == WB_DATA_BITS, \
218 "Can't yet handle wb width that isn't 64-bits"
219 assert self.SET_SIZE_BITS <= self.TLB_LG_PGSZ, \
220 "Set indexed by virtual address"
221
222 def CacheTagArray(self):
223 return Array(Signal(self.TAG_RAM_WIDTH, name="tag%d" % x) \
224 for x in range(self.NUM_LINES))
225
226 def CacheValidsArray(self):
227 return Array(Signal(self.NUM_WAYS, name="tag_valids%d" % x)
228 for x in range(self.NUM_LINES))
229
230 def RowPerLineValidArray(self):
231 return Array(Signal(name="rows_valid%d" % x) \
232 for x in range(self.ROW_PER_LINE))
233
234 def TLBHit(self, name):
235 return Record([('valid', 1),
236 ('way', self.TLB_WAY_BITS)], name=name)
237
238 def TLBTagEAArray(self):
239 return Array(Signal(self.TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
240 for x in range (self.TLB_NUM_WAYS))
241
242 def TLBRecord(self, name):
243 tlb_layout = [('valid', self.TLB_NUM_WAYS),
244 ('tag', self.TLB_TAG_WAY_BITS),
245 ('pte', self.TLB_PTE_WAY_BITS)
246 ]
247 return Record(tlb_layout, name=name)
248
249 def TLBValidArray(self):
250 return Array(Signal(self.TLB_NUM_WAYS, name="tlb_valid%d" % x)
251 for x in range(self.TLB_SET_SIZE))
252
253 def HitWaySet(self):
254 return Array(Signal(self.WAY_BITS, name="hitway_%d" % x) \
255 for x in range(self.TLB_NUM_WAYS))
256
257 # Cache RAM interface
258 def CacheRamOut(self):
259 return Array(Signal(self.WB_DATA_BITS, name="cache_out%d" % x) \
260 for x in range(self.NUM_WAYS))
261
262 # PLRU output interface
263 def PLRUOut(self):
264 return Array(Signal(self.WAY_BITS, name="plru_out%d" % x) \
265 for x in range(self.NUM_LINES))
266
267 # TLB PLRU output interface
268 def TLBPLRUOut(self):
269 return Array(Signal(self.TLB_WAY_BITS, name="tlbplru_out%d" % x) \
270 for x in range(self.TLB_SET_SIZE))
271
272 # Helper functions to decode incoming requests
273 #
274 # Return the cache line index (tag index) for an address
275 def get_index(self, addr):
276 return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
277
278 # Return the cache row index (data memory) for an address
279 def get_row(self, addr):
280 return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
281
282 # Return the index of a row within a line
283 def get_row_of_line(self, row):
284 return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
285
286 # Returns whether this is the last row of a line
287 def is_last_row_addr(self, addr, last):
288 return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
289
290 # Returns whether this is the last row of a line
291 def is_last_row(self, row, last):
292 return self.get_row_of_line(row) == last
293
294 # Return the next row in the current cache line. We use a
295 # dedicated function in order to limit the size of the
296 # generated adder to be only the bits within a cache line
297 # (3 bits with default settings)
298 def next_row(self, row):
299 row_v = row[0:self.ROW_LINE_BITS] + 1
300 return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
301
302 # Get the tag value from the address
303 def get_tag(self, addr):
304 return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
305
306 # Read a tag from a tag memory row
307 def read_tag(self, way, tagset):
308 return tagset.word_select(way, self.TAG_WIDTH)[:self.TAG_BITS]
309
310 # Read a TLB tag from a TLB tag memory row
311 def read_tlb_tag(self, way, tags):
312 return tags.word_select(way, self.TLB_EA_TAG_BITS)
313
314 # Write a TLB tag to a TLB tag memory row
315 def write_tlb_tag(self, way, tags, tag):
316 return self.read_tlb_tag(way, tags).eq(tag)
317
318 # Read a PTE from a TLB PTE memory row
319 def read_tlb_pte(self, way, ptes):
320 return ptes.word_select(way, self.TLB_PTE_BITS)
321
322 def write_tlb_pte(self, way, ptes, newpte):
323 return self.read_tlb_pte(way, ptes).eq(newpte)
324
325
326 # Record for storing permission, attribute, etc. bits from a PTE
327 class PermAttr(RecordObject):
328 def __init__(self, name=None):
329 super().__init__(name=name)
330 self.reference = Signal()
331 self.changed = Signal()
332 self.nocache = Signal()
333 self.priv = Signal()
334 self.rd_perm = Signal()
335 self.wr_perm = Signal()
336
337
338 def extract_perm_attr(pte):
339 pa = PermAttr()
340 return pa;
341
342
343 # Type of operation on a "valid" input
344 @unique
345 class Op(Enum):
346 OP_NONE = 0
347 OP_BAD = 1 # NC cache hit, TLB miss, prot/RC failure
348 OP_STCX_FAIL = 2 # conditional store w/o reservation
349 OP_LOAD_HIT = 3 # Cache hit on load
350 OP_LOAD_MISS = 4 # Load missing cache
351 OP_LOAD_NC = 5 # Non-cachable load
352 OP_STORE_HIT = 6 # Store hitting cache
353 OP_STORE_MISS = 7 # Store missing cache
354
355
356 # Cache state machine
357 @unique
358 class State(Enum):
359 IDLE = 0 # Normal load hit processing
360 RELOAD_WAIT_ACK = 1 # Cache reload wait ack
361 STORE_WAIT_ACK = 2 # Store wait ack
362 NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
363
364
365 # Dcache operations:
366 #
367 # In order to make timing, we use the BRAMs with
368 # an output buffer, which means that the BRAM
369 # output is delayed by an extra cycle.
370 #
371 # Thus, the dcache has a 2-stage internal pipeline
372 # for cache hits with no stalls.
373 #
374 # All other operations are handled via stalling
375 # in the first stage.
376 #
377 # The second stage can thus complete a hit at the same
378 # time as the first stage emits a stall for a complex op.
379 #
380 # Stage 0 register, basically contains just the latched request
381
382 class RegStage0(RecordObject):
383 def __init__(self, name=None):
384 super().__init__(name=name)
385 self.req = LoadStore1ToDCacheType(name="lsmem")
386 self.tlbie = Signal() # indicates a tlbie request (from MMU)
387 self.doall = Signal() # with tlbie, indicates flush whole TLB
388 self.tlbld = Signal() # indicates a TLB load request (from MMU)
389 self.mmu_req = Signal() # indicates source of request
390 self.d_valid = Signal() # indicates req.data is valid now
391
392
393 class MemAccessRequest(RecordObject):
394 def __init__(self, cfg, name=None):
395 super().__init__(name=name)
396 self.op = Signal(Op)
397 self.valid = Signal()
398 self.dcbz = Signal()
399 self.real_addr = Signal(cfg.REAL_ADDR_BITS)
400 self.data = Signal(64)
401 self.byte_sel = Signal(8)
402 self.hit_way = Signal(cfg.WAY_BITS)
403 self.same_tag = Signal()
404 self.mmu_req = Signal()
405
406
407 # First stage register, contains state for stage 1 of load hits
408 # and for the state machine used by all other operations
409 class RegStage1(RecordObject):
410 def __init__(self, cfg, name=None):
411 super().__init__(name=name)
412 # Info about the request
413 self.full = Signal() # have uncompleted request
414 self.mmu_req = Signal() # request is from MMU
415 self.req = MemAccessRequest(cfg, name="reqmem")
416
417 # Cache hit state
418 self.hit_way = Signal(cfg.WAY_BITS)
419 self.hit_load_valid = Signal()
420 self.hit_index = Signal(cfg.INDEX_BITS)
421 self.cache_hit = Signal()
422
423 # TLB hit state
424 self.tlb_hit = cfg.TLBHit("tlb_hit")
425 self.tlb_hit_index = Signal(cfg.TLB_SET_BITS)
426
427 # 2-stage data buffer for data forwarded from writes to reads
428 self.forward_data1 = Signal(64)
429 self.forward_data2 = Signal(64)
430 self.forward_sel1 = Signal(8)
431 self.forward_valid1 = Signal()
432 self.forward_way1 = Signal(cfg.WAY_BITS)
433 self.forward_row1 = Signal(cfg.ROW_BITS)
434 self.use_forward1 = Signal()
435 self.forward_sel = Signal(8)
436
437 # Cache miss state (reload state machine)
438 self.state = Signal(State)
439 self.dcbz = Signal()
440 self.write_bram = Signal()
441 self.write_tag = Signal()
442 self.slow_valid = Signal()
443 self.wb = WBMasterOut("wb")
444 self.reload_tag = Signal(cfg.TAG_BITS)
445 self.store_way = Signal(cfg.WAY_BITS)
446 self.store_row = Signal(cfg.ROW_BITS)
447 self.store_index = Signal(cfg.INDEX_BITS)
448 self.end_row_ix = Signal(cfg.ROW_LINE_BITS)
449 self.rows_valid = cfg.RowPerLineValidArray()
450 self.acks_pending = Signal(3)
451 self.inc_acks = Signal()
452 self.dec_acks = Signal()
453
454 # Signals to complete (possibly with error)
455 self.ls_valid = Signal()
456 self.ls_error = Signal()
457 self.mmu_done = Signal()
458 self.mmu_error = Signal()
459 self.cache_paradox = Signal()
460
461 # Signal to complete a failed stcx.
462 self.stcx_fail = Signal()
463
464
465 # Reservation information
466 class Reservation(RecordObject):
467 def __init__(self, cfg, name=None):
468 super().__init__(name=name)
469 self.valid = Signal()
470 self.addr = Signal(64-cfg.LINE_OFF_BITS)
471
472
473 class DTLBUpdate(Elaboratable):
474 def __init__(self, cfg):
475 self.cfg = cfg
476 self.tlbie = Signal()
477 self.tlbwe = Signal()
478 self.doall = Signal()
479 self.tlb_hit = cfg.TLBHit("tlb_hit")
480 self.tlb_req_index = Signal(cfg.TLB_SET_BITS)
481
482 self.repl_way = Signal(cfg.TLB_WAY_BITS)
483 self.eatag = Signal(cfg.TLB_EA_TAG_BITS)
484 self.pte_data = Signal(cfg.TLB_PTE_BITS)
485
486 # read from dtlb array
487 self.tlb_read = Signal()
488 self.tlb_read_index = Signal(cfg.TLB_SET_BITS)
489 self.tlb_way = cfg.TLBRecord("o_tlb_way")
490
491 def elaborate(self, platform):
492 m = Module()
493 comb = m.d.comb
494 sync = m.d.sync
495 cfg = self.cfg
496
497 # there are 3 parts to this:
498 # QTY TLB_NUM_WAYs TAGs - of width (say) 46 bits of Effective Address
499 # QTY TLB_NUM_WAYs PTEs - of width (say) 64 bits
500 # "Valid" bits, one per "way", of QTY TLB_NUM_WAYs. these cannot
501 # be a Memory because they can all be cleared (tlbie, doall), i mean,
502 # we _could_, in theory, by overriding the Reset Signal of the Memory,
503 # hmmm....
504
505 dtlb_valid = cfg.TLBValidArray()
506 tlb_req_index = self.tlb_req_index
507
508 print ("TLB_TAG_WAY_BITS", cfg.TLB_TAG_WAY_BITS)
509 print (" TLB_EA_TAG_BITS", cfg.TLB_EA_TAG_BITS)
510 print (" TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
511 print ("TLB_PTE_WAY_BITS", cfg.TLB_PTE_WAY_BITS)
512 print (" TLB_PTE_BITS", cfg.TLB_PTE_BITS)
513 print (" TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
514
515 # TAG and PTE Memory SRAMs. transparent, write-enables are TLB_NUM_WAYS
516 tagway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_TAG_WAY_BITS,
517 attrs={'syn_ramstyle': "block_ram"})
518 m.submodules.rd_tagway = rd_tagway = tagway.read_port()
519 m.submodules.wr_tagway = wr_tagway = tagway.write_port(
520 granularity=cfg.TLB_EA_TAG_BITS)
521
522 pteway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_PTE_WAY_BITS,
523 attrs={'syn_ramstyle': "block_ram"})
524 m.submodules.rd_pteway = rd_pteway = pteway.read_port()
525 m.submodules.wr_pteway = wr_pteway = pteway.write_port(
526 granularity=cfg.TLB_PTE_BITS)
527
528 # commented out for now, can be put in if Memory.reset can be
529 # used for tlbie&doall to reset the entire Memory to zero in 1 cycle
530 #validm = Memory(depth=TLB_SET_SIZE, width=TLB_NUM_WAYS)
531 #m.submodules.rd_valid = rd_valid = validm.read_port()
532 #m.submodules.wr_valid = wr_valid = validm.write_port(
533 #granularity=1)
534
535 # connect up read and write addresses to Valid/PTE/TAG SRAMs
536 m.d.comb += rd_pteway.addr.eq(self.tlb_read_index)
537 m.d.comb += rd_tagway.addr.eq(self.tlb_read_index)
538 #m.d.comb += rd_valid.addr.eq(self.tlb_read_index)
539 m.d.comb += wr_tagway.addr.eq(tlb_req_index)
540 m.d.comb += wr_pteway.addr.eq(tlb_req_index)
541 #m.d.comb += wr_valid.addr.eq(tlb_req_index)
542
543 updated = Signal()
544 v_updated = Signal()
545 tb_out = Signal(cfg.TLB_TAG_WAY_BITS) # tlb_way_tags_t
546 db_out = Signal(cfg.TLB_NUM_WAYS) # tlb_way_valids_t
547 pb_out = Signal(cfg.TLB_PTE_WAY_BITS) # tlb_way_ptes_t
548 dv = Signal(cfg.TLB_NUM_WAYS) # tlb_way_valids_t
549
550 comb += dv.eq(dtlb_valid[tlb_req_index])
551 comb += db_out.eq(dv)
552
553 with m.If(self.tlbie & self.doall):
554 # clear all valid bits at once
555 # XXX hmmm, validm _could_ use Memory reset here...
556 for i in range(cfg.TLB_SET_SIZE):
557 sync += dtlb_valid[i].eq(0)
558 with m.Elif(self.tlbie):
559 # invalidate just the hit_way
560 with m.If(self.tlb_hit.valid):
561 comb += db_out.bit_select(self.tlb_hit.way, 1).eq(0)
562 comb += v_updated.eq(1)
563 with m.Elif(self.tlbwe):
564 # write to the requested tag and PTE
565 comb += cfg.write_tlb_tag(self.repl_way, tb_out, self.eatag)
566 comb += cfg.write_tlb_pte(self.repl_way, pb_out, self.pte_data)
567 # set valid bit
568 comb += db_out.bit_select(self.repl_way, 1).eq(1)
569
570 comb += updated.eq(1)
571 comb += v_updated.eq(1)
572
573 # above, sometimes valid is requested to be updated but data not
574 # therefore split them out, here. note the granularity thing matches
575 # with the shift-up of the eatag/pte_data into the correct TLB way.
576 # thus is it not necessary to write the entire lot, just the portion
577 # being altered: hence writing the *old* copy of the row is not needed
578 with m.If(updated): # PTE and TAG to be written
579 comb += wr_pteway.data.eq(pb_out)
580 comb += wr_pteway.en.eq(1<<self.repl_way)
581 comb += wr_tagway.data.eq(tb_out)
582 comb += wr_tagway.en.eq(1<<self.repl_way)
583 with m.If(v_updated): # Valid to be written
584 sync += dtlb_valid[tlb_req_index].eq(db_out)
585 #comb += wr_valid.data.eq(db_out)
586 #comb += wr_valid.en.eq(1<<self.repl_way)
587
588 # select one TLB way, use a register here
589 r_delay = Signal()
590 sync += r_delay.eq(self.tlb_read)
591 # first deal with the valids, which are not in a Memory.
592 # tlb way valid is output on a 1 clock delay with sync,
593 # but have to explicitly deal with "forwarding" here
594 with m.If(self.tlb_read):
595 with m.If(v_updated): # write *and* read in same cycle: forward
596 sync += self.tlb_way.valid.eq(db_out)
597 with m.Else():
598 sync += self.tlb_way.valid.eq(dtlb_valid[self.tlb_read_index])
599 # now deal with the Memory-read case. the output must remain
600 # valid (stable) even when a read-request is not made, but stable
601 # on a one-clock delay, hence the register
602 r_tlb_way = cfg.TLBRecord("r_tlb_way")
603 with m.If(r_delay):
604 # on one clock delay, capture the contents of the read port(s)
605 comb += self.tlb_way.tag.eq(rd_tagway.data)
606 comb += self.tlb_way.pte.eq(rd_pteway.data)
607 sync += r_tlb_way.tag.eq(rd_tagway.data)
608 sync += r_tlb_way.pte.eq(rd_pteway.data)
609 with m.Else():
610 # ... so that the register can output it when no read is requested
611 # it's rather overkill but better to be safe than sorry
612 comb += self.tlb_way.tag.eq(r_tlb_way.tag)
613 comb += self.tlb_way.pte.eq(r_tlb_way.pte)
614 #comb += self.tlb_way.eq(r_tlb_way)
615
616 return m
617
618
619 class DCachePendingHit(Elaboratable):
620
621 def __init__(self, cfg, tlb_way,
622 cache_i_validdx, cache_tag_set,
623 req_addr):
624
625 self.go = Signal()
626 self.virt_mode = Signal()
627 self.is_hit = Signal()
628 self.tlb_hit = cfg.TLBHit("tlb_hit")
629 self.hit_way = Signal(cfg.WAY_BITS)
630 self.rel_match = Signal()
631 self.req_index = Signal(cfg.INDEX_BITS)
632 self.reload_tag = Signal(cfg.TAG_BITS)
633
634 self.tlb_way = tlb_way
635 self.cache_i_validdx = cache_i_validdx
636 self.cache_tag_set = cache_tag_set
637 self.req_addr = req_addr
638 self.cfg = cfg
639
640 def elaborate(self, platform):
641 m = Module()
642 comb = m.d.comb
643 sync = m.d.sync
644
645 go = self.go
646 virt_mode = self.virt_mode
647 is_hit = self.is_hit
648 tlb_way = self.tlb_way
649 cache_i_validdx = self.cache_i_validdx
650 cache_tag_set = self.cache_tag_set
651 req_addr = self.req_addr
652 tlb_hit = self.tlb_hit
653 hit_way = self.hit_way
654 rel_match = self.rel_match
655 req_index = self.req_index
656 reload_tag = self.reload_tag
657 cfg = self.cfg
658
659 hit_set = Array(Signal(name="hit_set_%d" % i) \
660 for i in range(cfg.TLB_NUM_WAYS))
661 rel_matches = Array(Signal(name="rel_matches_%d" % i) \
662 for i in range(cfg.TLB_NUM_WAYS))
663 hit_way_set = cfg.HitWaySet()
664
665 # Test if pending request is a hit on any way
666 # In order to make timing in virtual mode,
667 # when we are using the TLB, we compare each
668 # way with each of the real addresses from each way of
669 # the TLB, and then decide later which match to use.
670
671 with m.If(virt_mode):
672 for j in range(cfg.TLB_NUM_WAYS): # tlb_num_way_t
673 s_tag = Signal(cfg.TAG_BITS, name="s_tag%d" % j)
674 s_hit = Signal(name="s_hit%d" % j)
675 s_pte = Signal(cfg.TLB_PTE_BITS, name="s_pte%d" % j)
676 s_ra = Signal(cfg.REAL_ADDR_BITS, name="s_ra%d" % j)
677 # read the PTE, calc the Real Address, get tge tag
678 comb += s_pte.eq(cfg.read_tlb_pte(j, tlb_way.pte))
679 comb += s_ra.eq(Cat(req_addr[0:cfg.TLB_LG_PGSZ],
680 s_pte[cfg.TLB_LG_PGSZ:cfg.REAL_ADDR_BITS]))
681 comb += s_tag.eq(cfg.get_tag(s_ra))
682 # for each way check tge tag against the cache tag set
683 for i in range(cfg.NUM_WAYS): # way_t
684 is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
685 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
686 (cfg.read_tag(i, cache_tag_set) == s_tag)
687 & (tlb_way.valid[j]))
688 with m.If(is_tag_hit):
689 comb += hit_way_set[j].eq(i)
690 comb += s_hit.eq(1)
691 comb += hit_set[j].eq(s_hit)
692 comb += rel_matches[j].eq(s_tag == reload_tag)
693 with m.If(tlb_hit.valid):
694 comb += is_hit.eq(hit_set[tlb_hit.way])
695 comb += hit_way.eq(hit_way_set[tlb_hit.way])
696 comb += rel_match.eq(rel_matches[tlb_hit.way])
697 with m.Else():
698 s_tag = Signal(cfg.TAG_BITS)
699 comb += s_tag.eq(cfg.get_tag(req_addr))
700 for i in range(cfg.NUM_WAYS): # way_t
701 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
702 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
703 (cfg.read_tag(i, cache_tag_set) == s_tag))
704 with m.If(is_tag_hit):
705 comb += hit_way.eq(i)
706 comb += is_hit.eq(1)
707 with m.If(s_tag == reload_tag):
708 comb += rel_match.eq(1)
709
710 return m
711
712
713 class DCache(Elaboratable, DCacheConfig):
714 """Set associative dcache write-through
715
716 TODO (in no specific order):
717 * See list in icache.vhdl
718 * Complete load misses on the cycle when WB data comes instead of
719 at the end of line (this requires dealing with requests coming in
720 while not idle...)
721 """
722 def __init__(self, pspec=None):
723 self.d_in = LoadStore1ToDCacheType("d_in")
724 self.d_out = DCacheToLoadStore1Type("d_out")
725
726 self.m_in = MMUToDCacheType("m_in")
727 self.m_out = DCacheToMMUType("m_out")
728
729 self.stall_out = Signal()
730 self.any_stall_out = Signal()
731 self.dreq_when_stall = Signal()
732 self.mreq_when_stall = Signal()
733
734 # standard naming (wired to non-standard for compatibility)
735 self.bus = Interface(addr_width=32,
736 data_width=64,
737 granularity=8,
738 features={'stall'},
739 #alignment=0,
740 name="dcache")
741
742 self.log_out = Signal(20)
743
744 # test if small cache to be enabled
745 self.small_cache = (hasattr(pspec, "small_cache") and
746 (pspec.small_cache == True))
747 # test if microwatt compatibility is to be enabled
748 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
749 (pspec.microwatt_compat == True))
750 # test if fabric compatibility is to be enabled
751 self.fabric_compat = (hasattr(pspec, "fabric_compat") and
752 (pspec.fabric_compat == True))
753
754 XLEN = pspec.XLEN
755 TLB_SET_SIZE = 8
756 TLB_NUM_WAYS = 2
757 NUM_LINES = 8
758 NUM_WAYS = 2
759
760 if self.small_cache:
761 # reduce way sizes and num lines to ridiculously small
762 TLB_SET_SIZE = 2
763 TLB_NUM_WAYS = 1
764 NUM_LINES = 2
765 NUM_WAYS = 1
766 if self.microwatt_compat or self.fabric_compat:
767 # reduce way sizes
768 NUM_WAYS = 1
769 TLB_NUM_WAYS = 1
770
771 super().__init__(TLB_SET_SIZE=TLB_SET_SIZE,
772 # XLEN=XLEN, # TODO
773 TLB_NUM_WAYS = TLB_NUM_WAYS,
774 NUM_LINES = NUM_LINES,
775 NUM_WAYS = NUM_WAYS
776 )
777
778 def stage_0(self, m, r0, r1, r0_full):
779 """Latch the request in r0.req as long as we're not stalling
780 """
781 comb = m.d.comb
782 sync = m.d.sync
783 d_in, d_out, m_in = self.d_in, self.d_out, self.m_in
784
785 r = RegStage0("stage0")
786
787 # TODO, this goes in unit tests and formal proofs
788 with m.If(d_in.valid & m_in.valid):
789 sync += Display("request collision loadstore vs MMU")
790
791 with m.If(m_in.valid):
792 comb += r.req.valid.eq(1)
793 comb += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))# no invalidate
794 comb += r.req.dcbz.eq(0)
795 comb += r.req.nc.eq(0)
796 comb += r.req.reserve.eq(0)
797 comb += r.req.virt_mode.eq(0)
798 comb += r.req.priv_mode.eq(1)
799 comb += r.req.addr.eq(m_in.addr)
800 comb += r.req.data.eq(m_in.pte)
801 comb += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
802 comb += r.tlbie.eq(m_in.tlbie)
803 comb += r.doall.eq(m_in.doall)
804 comb += r.tlbld.eq(m_in.tlbld)
805 comb += r.mmu_req.eq(1)
806 comb += r.d_valid.eq(1)
807 m.d.sync += Display(" DCACHE req mmu addr %x pte %x ld %d",
808 m_in.addr, m_in.pte, r.req.load)
809
810 with m.Else():
811 comb += r.req.eq(d_in)
812 comb += r.req.data.eq(0)
813 comb += r.tlbie.eq(0)
814 comb += r.doall.eq(0)
815 comb += r.tlbld.eq(0)
816 comb += r.mmu_req.eq(0)
817 comb += r.d_valid.eq(0)
818
819 sync += r0_full.eq(0)
820 with m.If((~r1.full & ~d_in.hold) | ~r0_full):
821 sync += r0.eq(r)
822 sync += r0_full.eq(r.req.valid)
823 with m.Elif(~r0.d_valid):
824 # Sample data the cycle after a request comes in from loadstore1.
825 # If another request has come in already then the data will get
826 # put directly into req.data below.
827 sync += r0.req.data.eq(d_in.data)
828 sync += r0.d_valid.eq(1)
829 with m.If(d_in.valid):
830 m.d.sync += Display(" DCACHE req cache "
831 "virt %d addr %x data %x ld %d",
832 r.req.virt_mode, r.req.addr,
833 r.req.data, r.req.load)
834
835 def tlb_read(self, m, r0_stall, tlb_way):
836 """TLB
837 Operates in the second cycle on the request latched in r0.req.
838 TLB updates write the entry at the end of the second cycle.
839 """
840 comb = m.d.comb
841 sync = m.d.sync
842 m_in, d_in = self.m_in, self.d_in
843
844 addrbits = Signal(self.TLB_SET_BITS)
845
846 amin = self.TLB_LG_PGSZ
847 amax = self.TLB_LG_PGSZ + self.TLB_SET_BITS
848
849 with m.If(m_in.valid):
850 comb += addrbits.eq(m_in.addr[amin : amax])
851 with m.Else():
852 comb += addrbits.eq(d_in.addr[amin : amax])
853
854 # If we have any op and the previous op isn't finished,
855 # then keep the same output for next cycle.
856 d = self.dtlb_update
857 comb += d.tlb_read_index.eq(addrbits)
858 comb += d.tlb_read.eq(~r0_stall)
859 comb += tlb_way.eq(d.tlb_way)
860
861 def maybe_tlb_plrus(self, m, r1, tlb_plru_victim, tlb_req_index):
862 """Generate TLB PLRUs
863 """
864 comb = m.d.comb
865 sync = m.d.sync
866
867 if self.TLB_NUM_WAYS == 0:
868 return
869
870 # suite of PLRUs with a selection and output mechanism
871 tlb_plrus = PLRUs("d_tlb", self.TLB_SET_SIZE, self.TLB_WAY_BITS)
872 m.submodules.tlb_plrus = tlb_plrus
873 comb += tlb_plrus.way.eq(r1.tlb_hit.way)
874 comb += tlb_plrus.valid.eq(r1.tlb_hit.valid)
875 comb += tlb_plrus.index.eq(r1.tlb_hit_index)
876 comb += tlb_plrus.isel.eq(tlb_req_index) # select victim
877 comb += tlb_plru_victim.eq(tlb_plrus.o_index) # selected victim
878
879 def tlb_search(self, m, tlb_req_index, r0, r0_valid,
880 tlb_way,
881 pte, tlb_hit, valid_ra, perm_attr, ra):
882
883 comb = m.d.comb
884
885 hitway = Signal(self.TLB_WAY_BITS)
886 hit = Signal()
887 eatag = Signal(self.TLB_EA_TAG_BITS)
888
889 self.TLB_LG_END = self.TLB_LG_PGSZ + self.TLB_SET_BITS
890 r0_req_addr = r0.req.addr[self.TLB_LG_PGSZ : self.TLB_LG_END]
891 comb += tlb_req_index.eq(r0_req_addr)
892 comb += eatag.eq(r0.req.addr[self.TLB_LG_END : 64 ])
893
894 for i in range(self.TLB_NUM_WAYS):
895 is_tag_hit = Signal(name="is_tag_hit%d" % i)
896 tlb_tag = Signal(self.TLB_EA_TAG_BITS, name="tlb_tag%d" % i)
897 comb += tlb_tag.eq(self.read_tlb_tag(i, tlb_way.tag))
898 comb += is_tag_hit.eq((tlb_way.valid[i]) & (tlb_tag == eatag))
899 with m.If(is_tag_hit):
900 comb += hitway.eq(i)
901 comb += hit.eq(1)
902
903 comb += tlb_hit.valid.eq(hit & r0_valid)
904 comb += tlb_hit.way.eq(hitway)
905
906 with m.If(tlb_hit.valid):
907 comb += pte.eq(self.read_tlb_pte(hitway, tlb_way.pte))
908 comb += valid_ra.eq(tlb_hit.valid | ~r0.req.virt_mode)
909
910 with m.If(r0.req.virt_mode):
911 comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
912 r0.req.addr[self.ROW_OFF_BITS:self.TLB_LG_PGSZ],
913 pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
914 comb += perm_attr.reference.eq(pte[8])
915 comb += perm_attr.changed.eq(pte[7])
916 comb += perm_attr.nocache.eq(pte[5])
917 comb += perm_attr.priv.eq(pte[3])
918 comb += perm_attr.rd_perm.eq(pte[2])
919 comb += perm_attr.wr_perm.eq(pte[1])
920 with m.Else():
921 comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
922 r0.req.addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS]))
923 comb += perm_attr.reference.eq(1)
924 comb += perm_attr.changed.eq(1)
925 comb += perm_attr.nocache.eq(0)
926 comb += perm_attr.priv.eq(1)
927 comb += perm_attr.rd_perm.eq(1)
928 comb += perm_attr.wr_perm.eq(1)
929
930 with m.If(valid_ra):
931 m.d.sync += Display("DCACHE virt mode %d hit %d ra %x pte %x",
932 r0.req.virt_mode, tlb_hit.valid, ra, pte)
933 m.d.sync += Display(" perm ref=%d", perm_attr.reference)
934 m.d.sync += Display(" perm chg=%d", perm_attr.changed)
935 m.d.sync += Display(" perm noc=%d", perm_attr.nocache)
936 m.d.sync += Display(" perm prv=%d", perm_attr.priv)
937 m.d.sync += Display(" perm rdp=%d", perm_attr.rd_perm)
938 m.d.sync += Display(" perm wrp=%d", perm_attr.wr_perm)
939
940 def tlb_update(self, m, r0_valid, r0, tlb_req_index,
941 tlb_hit, tlb_plru_victim):
942
943 comb = m.d.comb
944 sync = m.d.sync
945
946 tlbie = Signal()
947 tlbwe = Signal()
948
949 comb += tlbie.eq(r0_valid & r0.tlbie)
950 comb += tlbwe.eq(r0_valid & r0.tlbld)
951
952 d = self.dtlb_update
953
954 comb += d.tlbie.eq(tlbie)
955 comb += d.tlbwe.eq(tlbwe)
956 comb += d.doall.eq(r0.doall)
957 comb += d.tlb_hit.eq(tlb_hit)
958 comb += d.tlb_req_index.eq(tlb_req_index)
959
960 with m.If(tlb_hit.valid):
961 comb += d.repl_way.eq(tlb_hit.way)
962 with m.Else():
963 comb += d.repl_way.eq(tlb_plru_victim)
964 comb += d.eatag.eq(r0.req.addr[self.TLB_LG_PGSZ + self.TLB_SET_BITS:64])
965 comb += d.pte_data.eq(r0.req.data)
966
967 def maybe_plrus(self, m, r1, plru_victim):
968 """Generate PLRUs
969 """
970 comb = m.d.comb
971 sync = m.d.sync
972
973 if self.TLB_NUM_WAYS == 0:
974 return
975
976 # suite of PLRUs with a selection and output mechanism
977 m.submodules.plrus = plrus = PLRUs("dtag", self.NUM_LINES,
978 self.WAY_BITS)
979 comb += plrus.way.eq(r1.hit_way)
980 comb += plrus.valid.eq(r1.cache_hit)
981 comb += plrus.index.eq(r1.hit_index)
982 comb += plrus.isel.eq(r1.store_index) # select victim
983 comb += plru_victim.eq(plrus.o_index) # selected victim
984
985 def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set):
986 """Cache tag RAM read port
987 """
988 comb = m.d.comb
989 sync = m.d.sync
990
991 m_in, d_in = self.m_in, self.d_in
992
993 # synchronous tag read-port: NOT TRANSPARENT (cannot pass through
994 # write-to-a-read at the same time), seems to pass tests ok
995 m.submodules.rd_tag = rd_tag = self.tagmem.read_port(transparent=False)
996
997 index = Signal(self.INDEX_BITS)
998
999 with m.If(r0_stall):
1000 comb += index.eq(req_index)
1001 with m.Elif(m_in.valid):
1002 comb += index.eq(self.get_index(m_in.addr))
1003 with m.Else():
1004 comb += index.eq(self.get_index(d_in.addr))
1005 comb += rd_tag.addr.eq(index)
1006 comb += cache_tag_set.eq(rd_tag.data) # read-port is a 1-clock delay
1007
1008 def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
1009 r0_valid, r1, cache_valids, replace_way,
1010 use_forward1_next, use_forward2_next,
1011 req_hit_way, plru_victim, rc_ok, perm_attr,
1012 valid_ra, perm_ok, access_ok, req_op, req_go,
1013 tlb_hit, tlb_way, cache_tag_set,
1014 cancel_store, req_same_tag, r0_stall, early_req_row):
1015 """Cache request parsing and hit detection
1016 """
1017
1018 comb = m.d.comb
1019 m_in, d_in = self.m_in, self.d_in
1020
1021 is_hit = Signal()
1022 hit_way = Signal(self.WAY_BITS)
1023 op = Signal(Op)
1024 opsel = Signal(3)
1025 go = Signal()
1026 nc = Signal()
1027 cache_i_validdx = Signal(self.NUM_WAYS)
1028
1029 # Extract line, row and tag from request
1030 comb += req_index.eq(self.get_index(r0.req.addr))
1031 comb += req_row.eq(self.get_row(r0.req.addr))
1032 comb += req_tag.eq(self.get_tag(ra))
1033
1034 if False: # display on comb is a bit... busy.
1035 comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
1036 r0.req.addr, ra, req_index, req_tag, req_row)
1037
1038 comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
1039 comb += cache_i_validdx.eq(cache_valids[req_index])
1040
1041 m.submodules.dcache_pend = dc = DCachePendingHit(self, tlb_way,
1042 cache_i_validdx, cache_tag_set,
1043 r0.req.addr)
1044 comb += dc.tlb_hit.eq(tlb_hit)
1045 comb += dc.reload_tag.eq(r1.reload_tag)
1046 comb += dc.virt_mode.eq(r0.req.virt_mode)
1047 comb += dc.go.eq(go)
1048 comb += dc.req_index.eq(req_index)
1049
1050 comb += is_hit.eq(dc.is_hit)
1051 comb += hit_way.eq(dc.hit_way)
1052 comb += req_same_tag.eq(dc.rel_match)
1053
1054 # See if the request matches the line currently being reloaded
1055 with m.If((r1.state == State.RELOAD_WAIT_ACK) &
1056 (req_index == r1.store_index) & req_same_tag):
1057 # For a store, consider this a hit even if the row isn't
1058 # valid since it will be by the time we perform the store.
1059 # For a load, check the appropriate row valid bit.
1060 rrow = Signal(self.ROW_LINE_BITS)
1061 comb += rrow.eq(req_row)
1062 valid = r1.rows_valid[rrow]
1063 comb += is_hit.eq((~r0.req.load) | valid)
1064 comb += hit_way.eq(replace_way)
1065
1066 # Whether to use forwarded data for a load or not
1067 with m.If((self.get_row(r1.req.real_addr) == req_row) &
1068 (r1.req.hit_way == hit_way)):
1069 # Only need to consider r1.write_bram here, since if we
1070 # are writing refill data here, then we don't have a
1071 # cache hit this cycle on the line being refilled.
1072 # (There is the possibility that the load following the
1073 # load miss that started the refill could be to the old
1074 # contents of the victim line, since it is a couple of
1075 # cycles after the refill starts before we see the updated
1076 # cache tag. In that case we don't use the bypass.)
1077 comb += use_forward1_next.eq(r1.write_bram)
1078 with m.If((r1.forward_row1 == req_row) & (r1.forward_way1 == hit_way)):
1079 comb += use_forward2_next.eq(r1.forward_valid1)
1080
1081 # The way that matched on a hit
1082 comb += req_hit_way.eq(hit_way)
1083
1084 # The way to replace on a miss
1085 with m.If(r1.write_tag):
1086 comb += replace_way.eq(plru_victim)
1087 with m.Else():
1088 comb += replace_way.eq(r1.store_way)
1089
1090 # work out whether we have permission for this access
1091 # NB we don't yet implement AMR, thus no KUAP
1092 comb += rc_ok.eq(perm_attr.reference
1093 & (r0.req.load | perm_attr.changed))
1094 comb += perm_ok.eq((r0.req.priv_mode | (~perm_attr.priv)) &
1095 (perm_attr.wr_perm |
1096 (r0.req.load & perm_attr.rd_perm)))
1097 comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
1098
1099 # Combine the request and cache hit status to decide what
1100 # operation needs to be done
1101 comb += nc.eq(r0.req.nc | perm_attr.nocache)
1102 comb += op.eq(Op.OP_NONE)
1103 with m.If(go):
1104 with m.If(~access_ok):
1105 m.d.sync += Display("DCACHE access fail valid_ra=%d p=%d rc=%d",
1106 valid_ra, perm_ok, rc_ok)
1107 comb += op.eq(Op.OP_BAD)
1108 with m.Elif(cancel_store):
1109 m.d.sync += Display("DCACHE cancel store")
1110 comb += op.eq(Op.OP_STCX_FAIL)
1111 with m.Else():
1112 m.d.sync += Display("DCACHE valid_ra=%d nc=%d ld=%d",
1113 valid_ra, nc, r0.req.load)
1114 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
1115 with m.Switch(opsel):
1116 with m.Case(0b101): comb += op.eq(Op.OP_LOAD_HIT)
1117 with m.Case(0b100): comb += op.eq(Op.OP_LOAD_MISS)
1118 with m.Case(0b110): comb += op.eq(Op.OP_LOAD_NC)
1119 with m.Case(0b001): comb += op.eq(Op.OP_STORE_HIT)
1120 with m.Case(0b000): comb += op.eq(Op.OP_STORE_MISS)
1121 with m.Case(0b010): comb += op.eq(Op.OP_STORE_MISS)
1122 with m.Case(0b011): comb += op.eq(Op.OP_BAD)
1123 with m.Case(0b111): comb += op.eq(Op.OP_BAD)
1124 comb += req_op.eq(op)
1125 comb += req_go.eq(go)
1126
1127 # Version of the row number that is valid one cycle earlier
1128 # in the cases where we need to read the cache data BRAM.
1129 # If we're stalling then we need to keep reading the last
1130 # row requested.
1131 with m.If(~r0_stall):
1132 with m.If(m_in.valid):
1133 comb += early_req_row.eq(self.get_row(m_in.addr))
1134 with m.Else():
1135 comb += early_req_row.eq(self.get_row(d_in.addr))
1136 with m.Else():
1137 comb += early_req_row.eq(req_row)
1138
1139 def reservation_comb(self, m, cancel_store, set_rsrv, clear_rsrv,
1140 r0_valid, r0, reservation):
1141 """Handle load-with-reservation and store-conditional instructions
1142 """
1143 comb = m.d.comb
1144
1145 with m.If(r0_valid & r0.req.reserve):
1146 # XXX generate alignment interrupt if address
1147 # is not aligned XXX or if r0.req.nc = '1'
1148 with m.If(r0.req.load):
1149 comb += set_rsrv.eq(r0.req.atomic_last) # load with reservation
1150 with m.Else():
1151 comb += clear_rsrv.eq(r0.req.atomic_last) # store conditional
1152 with m.If((~reservation.valid) |
1153 (r0.req.addr[self.LINE_OFF_BITS:64] !=
1154 reservation.addr)):
1155 comb += cancel_store.eq(1)
1156
1157 def reservation_reg(self, m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1158 reservation, r0):
1159 comb = m.d.comb
1160 sync = m.d.sync
1161
1162 with m.If(r0_valid & access_ok):
1163 with m.If(clear_rsrv):
1164 sync += reservation.valid.eq(0)
1165 with m.Elif(set_rsrv):
1166 sync += reservation.valid.eq(1)
1167 sync += reservation.addr.eq(r0.req.addr[self.LINE_OFF_BITS:64])
1168
1169 def writeback_control(self, m, r1, cache_out_row):
1170 """Return data for loads & completion control logic
1171 """
1172 comb = m.d.comb
1173 sync = m.d.sync
1174 d_out, m_out = self.d_out, self.m_out
1175
1176 data_out = Signal(64)
1177 data_fwd = Signal(64)
1178
1179 # Use the bypass if are reading the row that was
1180 # written 1 or 2 cycles ago, including for the
1181 # slow_valid = 1 case (i.e. completing a load
1182 # miss or a non-cacheable load).
1183 with m.If(r1.use_forward1):
1184 comb += data_fwd.eq(r1.forward_data1)
1185 with m.Else():
1186 comb += data_fwd.eq(r1.forward_data2)
1187
1188 comb += data_out.eq(cache_out_row)
1189
1190 for i in range(8):
1191 with m.If(r1.forward_sel[i]):
1192 dsel = data_fwd.word_select(i, 8)
1193 comb += data_out.word_select(i, 8).eq(dsel)
1194
1195 # DCache output to LoadStore
1196 comb += d_out.valid.eq(r1.ls_valid)
1197 comb += d_out.data.eq(data_out)
1198 comb += d_out.store_done.eq(~r1.stcx_fail)
1199 comb += d_out.error.eq(r1.ls_error)
1200 comb += d_out.cache_paradox.eq(r1.cache_paradox)
1201
1202 # Outputs to MMU
1203 comb += m_out.done.eq(r1.mmu_done)
1204 comb += m_out.err.eq(r1.mmu_error)
1205 comb += m_out.data.eq(data_out)
1206
1207 # We have a valid load or store hit or we just completed
1208 # a slow op such as a load miss, a NC load or a store
1209 #
1210 # Note: the load hit is delayed by one cycle. However it
1211 # can still not collide with r.slow_valid (well unless I
1212 # miscalculated) because slow_valid can only be set on a
1213 # subsequent request and not on its first cycle (the state
1214 # machine must have advanced), which makes slow_valid
1215 # at least 2 cycles from the previous hit_load_valid.
1216
1217 # Sanity: Only one of these must be set in any given cycle
1218
1219 if False: # TODO: need Display to get this to work
1220 assert (r1.slow_valid & r1.stcx_fail) != 1, \
1221 "unexpected slow_valid collision with stcx_fail"
1222
1223 assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1, \
1224 "unexpected hit_load_delayed collision with slow_valid"
1225
1226 with m.If(~r1.mmu_req):
1227 # Request came from loadstore1...
1228 # Load hit case is the standard path
1229 with m.If(r1.hit_load_valid):
1230 sync += Display("completing load hit data=%x", data_out)
1231
1232 # error cases complete without stalling
1233 with m.If(r1.ls_error):
1234 with m.If(r1.dcbz):
1235 sync += Display("completing dcbz with error")
1236 with m.Else():
1237 sync += Display("completing ld/st with error")
1238
1239 # Slow ops (load miss, NC, stores)
1240 with m.If(r1.slow_valid):
1241 sync += Display("completing store or load miss adr=%x data=%x",
1242 r1.req.real_addr, data_out)
1243
1244 with m.Else():
1245 # Request came from MMU
1246 with m.If(r1.hit_load_valid):
1247 sync += Display("completing load hit to MMU, data=%x",
1248 m_out.data)
1249 # error cases complete without stalling
1250 with m.If(r1.mmu_error):
1251 sync += Display("combpleting MMU ld with error")
1252
1253 # Slow ops (i.e. load miss)
1254 with m.If(r1.slow_valid):
1255 sync += Display("completing MMU load miss, adr=%x data=%x",
1256 r1.req.real_addr, m_out.data)
1257
1258 def rams(self, m, r1, early_req_row, cache_out_row, replace_way):
1259 """rams
1260 Generate a cache RAM for each way. This handles the normal
1261 reads, writes from reloads and the special store-hit update
1262 path as well.
1263
1264 Note: the BRAMs have an extra read buffer, meaning the output
1265 is pipelined an extra cycle. This differs from the
1266 icache. The writeback logic needs to take that into
1267 account by using 1-cycle delayed signals for load hits.
1268 """
1269 comb = m.d.comb
1270 bus = self.bus
1271
1272 # a Binary-to-Unary one-hots here. replace-way one-hot is gated
1273 # (enabled) by bus.ack, not-write-bram, and state RELOAD_WAIT_ACK
1274 m.submodules.rams_replace_way_e = rwe = Decoder(self.NUM_WAYS)
1275 comb += rwe.n.eq(~((r1.state == State.RELOAD_WAIT_ACK) & bus.ack &
1276 ~r1.write_bram))
1277 comb += rwe.i.eq(replace_way)
1278
1279 m.submodules.rams_hit_way_e = hwe = Decoder(self.NUM_WAYS)
1280 comb += hwe.i.eq(r1.hit_way)
1281
1282 # this one is gated with write_bram, and replace_way_e can never be
1283 # set at the same time. that means that do_write can OR the outputs
1284 m.submodules.rams_hit_req_way_e = hre = Decoder(self.NUM_WAYS)
1285 comb += hre.n.eq(~r1.write_bram) # Decoder.n is inverted
1286 comb += hre.i.eq(r1.req.hit_way)
1287
1288 # common Signals
1289 do_read = Signal()
1290 wr_addr = Signal(self.ROW_BITS)
1291 wr_data = Signal(WB_DATA_BITS)
1292 wr_sel = Signal(self.ROW_SIZE)
1293 rd_addr = Signal(self.ROW_BITS)
1294
1295 comb += do_read.eq(1) # always enable
1296 comb += rd_addr.eq(early_req_row)
1297
1298 # Write mux:
1299 #
1300 # Defaults to wishbone read responses (cache refill)
1301 #
1302 # For timing, the mux on wr_data/sel/addr is not
1303 # dependent on anything other than the current state.
1304
1305 with m.If(r1.write_bram):
1306 # Write store data to BRAM. This happens one
1307 # cycle after the store is in r0.
1308 comb += wr_data.eq(r1.req.data)
1309 comb += wr_sel.eq(r1.req.byte_sel)
1310 comb += wr_addr.eq(self.get_row(r1.req.real_addr))
1311
1312 with m.Else():
1313 # Otherwise, we might be doing a reload or a DCBZ
1314 with m.If(r1.dcbz):
1315 comb += wr_data.eq(0)
1316 with m.Else():
1317 comb += wr_data.eq(bus.dat_r)
1318 comb += wr_addr.eq(r1.store_row)
1319 comb += wr_sel.eq(~0) # all 1s
1320
1321 # set up Cache Rams
1322 for i in range(self.NUM_WAYS):
1323 do_write = Signal(name="do_wr%d" % i)
1324 wr_sel_m = Signal(self.ROW_SIZE, name="wr_sel_m_%d" % i)
1325 d_out= Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
1326
1327 way = CacheRam(self.ROW_BITS, WB_DATA_BITS, ADD_BUF=True, ram_num=i)
1328 m.submodules["cacheram_%d" % i] = way
1329
1330 comb += way.rd_en.eq(do_read)
1331 comb += way.rd_addr.eq(rd_addr)
1332 comb += d_out.eq(way.rd_data_o)
1333 comb += way.wr_sel.eq(wr_sel_m)
1334 comb += way.wr_addr.eq(wr_addr)
1335 comb += way.wr_data.eq(wr_data)
1336
1337 # Cache hit reads
1338 with m.If(hwe.o[i]):
1339 comb += cache_out_row.eq(d_out)
1340
1341 # these are mutually-exclusive via their Decoder-enablers
1342 # (note: Decoder-enable is inverted)
1343 comb += do_write.eq(hre.o[i] | rwe.o[i])
1344
1345 # Mask write selects with do_write since BRAM
1346 # doesn't have a global write-enable
1347 with m.If(do_write):
1348 comb += wr_sel_m.eq(wr_sel)
1349
1350 # Cache hit synchronous machine for the easy case.
1351 # This handles load hits.
1352 # It also handles error cases (TLB miss, cache paradox)
1353 def dcache_fast_hit(self, m, req_op, r0_valid, r0, r1,
1354 req_hit_way, req_index, req_tag, access_ok,
1355 tlb_hit, tlb_req_index):
1356 comb = m.d.comb
1357 sync = m.d.sync
1358
1359 with m.If(req_op != Op.OP_NONE):
1360 sync += Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1361 req_op, r0.req.addr, r0.req.nc,
1362 req_index, req_tag, req_hit_way)
1363
1364 with m.If(r0_valid):
1365 sync += r1.mmu_req.eq(r0.mmu_req)
1366
1367 # Fast path for load/store hits.
1368 # Set signals for the writeback controls.
1369 sync += r1.hit_way.eq(req_hit_way)
1370 sync += r1.hit_index.eq(req_index)
1371
1372 sync += r1.hit_load_valid.eq(req_op == Op.OP_LOAD_HIT)
1373 sync += r1.cache_hit.eq((req_op == Op.OP_LOAD_HIT) |
1374 (req_op == Op.OP_STORE_HIT))
1375
1376 with m.If(req_op == Op.OP_BAD):
1377 sync += Display("Signalling ld/st error "
1378 "ls_error=%i mmu_error=%i cache_paradox=%i",
1379 ~r0.mmu_req,r0.mmu_req,access_ok)
1380 sync += r1.ls_error.eq(~r0.mmu_req)
1381 sync += r1.mmu_error.eq(r0.mmu_req)
1382 sync += r1.cache_paradox.eq(access_ok)
1383 with m.Else():
1384 sync += r1.ls_error.eq(0)
1385 sync += r1.mmu_error.eq(0)
1386 sync += r1.cache_paradox.eq(0)
1387
1388 sync += r1.stcx_fail.eq(req_op == Op.OP_STCX_FAIL)
1389
1390 # Record TLB hit information for updating TLB PLRU
1391 sync += r1.tlb_hit.eq(tlb_hit)
1392 sync += r1.tlb_hit_index.eq(tlb_req_index)
1393
1394 # Memory accesses are handled by this state machine:
1395 #
1396 # * Cache load miss/reload (in conjunction with "rams")
1397 # * Load hits for non-cachable forms
1398 # * Stores (the collision case is handled in "rams")
1399 #
1400 # All wishbone requests generation is done here.
1401 # This machine operates at stage 1.
1402 def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
1403 r0, replace_way,
1404 req_hit_way, req_same_tag,
1405 r0_valid, req_op, cache_valids, req_go, ra):
1406
1407 comb = m.d.comb
1408 sync = m.d.sync
1409 bus = self.bus
1410 d_in = self.d_in
1411
1412 m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
1413 granularity=self.TAG_WIDTH)
1414
1415 req = MemAccessRequest(self, "mreq_ds")
1416
1417 r1_next_cycle = Signal()
1418 req_row = Signal(self.ROW_BITS)
1419 req_idx = Signal(self.INDEX_BITS)
1420 req_tag = Signal(self.TAG_BITS)
1421 comb += req_idx.eq(self.get_index(req.real_addr))
1422 comb += req_row.eq(self.get_row(req.real_addr))
1423 comb += req_tag.eq(self.get_tag(req.real_addr))
1424
1425 sync += r1.use_forward1.eq(use_forward1_next)
1426 sync += r1.forward_sel.eq(0)
1427
1428 with m.If(use_forward1_next):
1429 sync += r1.forward_sel.eq(r1.req.byte_sel)
1430 with m.Elif(use_forward2_next):
1431 sync += r1.forward_sel.eq(r1.forward_sel1)
1432
1433 sync += r1.forward_data2.eq(r1.forward_data1)
1434 with m.If(r1.write_bram):
1435 sync += r1.forward_data1.eq(r1.req.data)
1436 sync += r1.forward_sel1.eq(r1.req.byte_sel)
1437 sync += r1.forward_way1.eq(r1.req.hit_way)
1438 sync += r1.forward_row1.eq(self.get_row(r1.req.real_addr))
1439 sync += r1.forward_valid1.eq(1)
1440 with m.Else():
1441 with m.If(r1.dcbz):
1442 sync += r1.forward_data1.eq(0)
1443 with m.Else():
1444 sync += r1.forward_data1.eq(bus.dat_r)
1445 sync += r1.forward_sel1.eq(~0) # all 1s
1446 sync += r1.forward_way1.eq(replace_way)
1447 sync += r1.forward_row1.eq(r1.store_row)
1448 sync += r1.forward_valid1.eq(0)
1449
1450 # One cycle pulses reset
1451 sync += r1.slow_valid.eq(0)
1452 sync += r1.write_bram.eq(0)
1453 sync += r1.inc_acks.eq(0)
1454 sync += r1.dec_acks.eq(0)
1455
1456 sync += r1.ls_valid.eq(0)
1457 # complete tlbies and TLB loads in the third cycle
1458 sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
1459
1460 with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STCX_FAIL)):
1461 with m.If(r0.mmu_req):
1462 sync += r1.mmu_done.eq(1)
1463 with m.Else():
1464 sync += r1.ls_valid.eq(1)
1465
1466 with m.If(r1.write_tag):
1467 # Store new tag in selected way
1468 replace_way_onehot = Signal(self.NUM_WAYS)
1469 comb += replace_way_onehot.eq(1<<replace_way)
1470 ct = Signal(self.TAG_RAM_WIDTH)
1471 comb += ct.eq(r1.reload_tag << (replace_way*self.TAG_WIDTH))
1472 comb += wr_tag.en.eq(replace_way_onehot)
1473 comb += wr_tag.addr.eq(r1.store_index)
1474 comb += wr_tag.data.eq(ct)
1475
1476 sync += r1.store_way.eq(replace_way)
1477 sync += r1.write_tag.eq(0)
1478
1479 # Take request from r1.req if there is one there,
1480 # else from req_op, ra, etc.
1481 with m.If(r1.full):
1482 comb += req.eq(r1.req)
1483 with m.Else():
1484 comb += req.op.eq(req_op)
1485 comb += req.valid.eq(req_go)
1486 comb += req.mmu_req.eq(r0.mmu_req)
1487 comb += req.dcbz.eq(r0.req.dcbz)
1488 comb += req.real_addr.eq(ra)
1489
1490 with m.If(r0.req.dcbz):
1491 # force data to 0 for dcbz
1492 comb += req.data.eq(0)
1493 with m.Elif(r0.d_valid):
1494 comb += req.data.eq(r0.req.data)
1495 with m.Else():
1496 comb += req.data.eq(d_in.data)
1497
1498 # Select all bytes for dcbz
1499 # and for cacheable loads
1500 with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc)):
1501 comb += req.byte_sel.eq(~0) # all 1s
1502 with m.Else():
1503 comb += req.byte_sel.eq(r0.req.byte_sel)
1504 comb += req.hit_way.eq(req_hit_way)
1505 comb += req.same_tag.eq(req_same_tag)
1506
1507 # Store the incoming request from r0,
1508 # if it is a slow request
1509 # Note that r1.full = 1 implies req_op = OP_NONE
1510 with m.If((req_op == Op.OP_LOAD_MISS)
1511 | (req_op == Op.OP_LOAD_NC)
1512 | (req_op == Op.OP_STORE_MISS)
1513 | (req_op == Op.OP_STORE_HIT)):
1514 sync += r1.req.eq(req)
1515 sync += r1.full.eq(1)
1516 # do not let r1.state RELOAD_WAIT_ACK or STORE_WAIT_ACK
1517 # destroy r1.req by overwriting r1.full back to zero
1518 comb += r1_next_cycle.eq(1)
1519
1520 # Main state machine
1521 with m.Switch(r1.state):
1522
1523 with m.Case(State.IDLE):
1524 sync += r1.wb.adr.eq(req.real_addr[self.ROW_OFF_BITS:])
1525 sync += r1.wb.sel.eq(req.byte_sel)
1526 sync += r1.wb.dat.eq(req.data)
1527 sync += r1.dcbz.eq(req.dcbz)
1528
1529 # Keep track of our index and way
1530 # for subsequent stores.
1531 sync += r1.store_index.eq(req_idx)
1532 sync += r1.store_row.eq(req_row)
1533 sync += r1.end_row_ix.eq(self.get_row_of_line(req_row)-1)
1534 sync += r1.reload_tag.eq(req_tag)
1535 sync += r1.req.same_tag.eq(1)
1536
1537 with m.If(req.op == Op.OP_STORE_HIT):
1538 sync += r1.store_way.eq(req.hit_way)
1539
1540 #with m.If(r1.dec_acks):
1541 # sync += r1.acks_pending.eq(r1.acks_pending - 1)
1542
1543 # Reset per-row valid bits,
1544 # ready for handling OP_LOAD_MISS
1545 for i in range(self.ROW_PER_LINE):
1546 sync += r1.rows_valid[i].eq(0)
1547
1548 with m.If(req_op != Op.OP_NONE):
1549 sync += Display("cache op %d", req.op)
1550
1551 with m.Switch(req.op):
1552 with m.Case(Op.OP_LOAD_HIT):
1553 # stay in IDLE state
1554 pass
1555
1556 with m.Case(Op.OP_LOAD_MISS):
1557 sync += Display("cache miss real addr: %x " \
1558 "idx: %x tag: %x",
1559 req.real_addr, req_row, req_tag)
1560
1561 # Start the wishbone cycle
1562 sync += r1.wb.we.eq(0)
1563 sync += r1.wb.cyc.eq(1)
1564 sync += r1.wb.stb.eq(1)
1565
1566 # Track that we had one request sent
1567 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1568 sync += r1.write_tag.eq(1)
1569
1570 with m.Case(Op.OP_LOAD_NC):
1571 sync += r1.wb.cyc.eq(1)
1572 sync += r1.wb.stb.eq(1)
1573 sync += r1.wb.we.eq(0)
1574 sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
1575
1576 with m.Case(Op.OP_STORE_HIT, Op.OP_STORE_MISS):
1577 with m.If(~req.dcbz):
1578 sync += r1.state.eq(State.STORE_WAIT_ACK)
1579 sync += r1.acks_pending.eq(1)
1580 sync += r1.full.eq(0)
1581 comb += r1_next_cycle.eq(0)
1582 sync += r1.slow_valid.eq(1)
1583
1584 with m.If(req.mmu_req):
1585 sync += r1.mmu_done.eq(1)
1586 with m.Else():
1587 sync += r1.ls_valid.eq(1)
1588
1589 with m.If(req.op == Op.OP_STORE_HIT):
1590 sync += r1.write_bram.eq(1)
1591 with m.Else():
1592 # dcbz is handled much like a load miss except
1593 # that we are writing to memory instead of reading
1594 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1595
1596 with m.If(req.op == Op.OP_STORE_MISS):
1597 sync += r1.write_tag.eq(1)
1598
1599 sync += r1.wb.we.eq(1)
1600 sync += r1.wb.cyc.eq(1)
1601 sync += r1.wb.stb.eq(1)
1602
1603 # OP_NONE and OP_BAD do nothing
1604 # OP_BAD & OP_STCX_FAIL were
1605 # handled above already
1606 with m.Case(Op.OP_NONE):
1607 pass
1608 with m.Case(Op.OP_BAD):
1609 pass
1610 with m.Case(Op.OP_STCX_FAIL):
1611 pass
1612
1613 with m.Case(State.RELOAD_WAIT_ACK):
1614
1615 # If we are still sending requests, was one accepted?
1616 with m.If((~bus.stall) & r1.wb.stb):
1617 # That was the last word? We are done sending. Clear stb
1618 # sigh - reconstruct wb adr with 3 extra 0s at front
1619 wb_adr = Cat(Const(0, self.ROW_OFF_BITS), r1.wb.adr)
1620 with m.If(self.is_last_row_addr(wb_adr, r1.end_row_ix)):
1621 sync += r1.wb.stb.eq(0)
1622
1623 # Calculate the next row address in the current cache line
1624 rlen = self.LINE_OFF_BITS-self.ROW_OFF_BITS
1625 row = Signal(rlen)
1626 comb += row.eq(r1.wb.adr)
1627 sync += r1.wb.adr[:rlen].eq(row+1)
1628
1629 # Incoming acks processing
1630 sync += r1.forward_valid1.eq(bus.ack)
1631 with m.If(bus.ack):
1632 srow = Signal(self.ROW_LINE_BITS)
1633 comb += srow.eq(r1.store_row)
1634 sync += r1.rows_valid[srow].eq(1)
1635
1636 # If this is the data we were looking for,
1637 # we can complete the request next cycle.
1638 # Compare the whole address in case the
1639 # request in r1.req is not the one that
1640 # started this refill.
1641 rowmatch = Signal()
1642 lastrow = Signal()
1643 comb += rowmatch.eq(r1.store_row ==
1644 self.get_row(r1.req.real_addr))
1645 comb += lastrow.eq(self.is_last_row(r1.store_row,
1646 r1.end_row_ix))
1647 with m.If(r1.full & r1.req.same_tag &
1648 ((r1.dcbz & req.dcbz) |
1649 (r1.req.op == Op.OP_LOAD_MISS)) & rowmatch):
1650 sync += r1.full.eq(r1_next_cycle)
1651 sync += r1.slow_valid.eq(1)
1652 with m.If(r1.mmu_req):
1653 sync += r1.mmu_done.eq(1)
1654 with m.Else():
1655 sync += r1.ls_valid.eq(1)
1656 sync += r1.forward_sel.eq(~0) # all 1s
1657 sync += r1.use_forward1.eq(1)
1658
1659 # Check for completion
1660 with m.If(lastrow):
1661 # Complete wishbone cycle
1662 sync += r1.wb.cyc.eq(0)
1663
1664 # Cache line is now valid
1665 cv = Signal(self.INDEX_BITS)
1666 comb += cv.eq(cache_valids[r1.store_index])
1667 comb += cv.bit_select(r1.store_way, 1).eq(1)
1668 sync += cache_valids[r1.store_index].eq(cv)
1669
1670 sync += r1.state.eq(State.IDLE)
1671 sync += Display("cache valid set %x "
1672 "idx %d way %d",
1673 cv, r1.store_index, r1.store_way)
1674
1675 # Increment store row counter
1676 sync += r1.store_row.eq(self.next_row(r1.store_row))
1677
1678 with m.Case(State.STORE_WAIT_ACK):
1679 st_stbs_done = Signal()
1680 adjust_acks = Signal(3)
1681
1682 comb += st_stbs_done.eq(~r1.wb.stb)
1683
1684 with m.If(r1.inc_acks != r1.dec_acks):
1685 with m.If(r1.inc_acks):
1686 comb += adjust_acks.eq(r1.acks_pending + 1)
1687 with m.Else():
1688 comb += adjust_acks.eq(r1.acks_pending - 1)
1689 with m.Else():
1690 comb += adjust_acks.eq(r1.acks_pending)
1691
1692 sync += r1.acks_pending.eq(adjust_acks)
1693
1694 # Clear stb when slave accepted request
1695 with m.If(~bus.stall):
1696 # See if there is another store waiting
1697 # to be done which is in the same real page.
1698 # (this is when same_tsg is true)
1699 with m.If(req.valid):
1700 _ra = req.real_addr[self.ROW_OFF_BITS:
1701 self.SET_SIZE_BITS]
1702 alen = self.SET_SIZE_BITS-self.ROW_OFF_BITS
1703 sync += r1.wb.adr[0:alen].eq(_ra)
1704 sync += r1.wb.dat.eq(req.data)
1705 sync += r1.wb.sel.eq(req.byte_sel)
1706
1707 with m.If((adjust_acks < 7) & req.same_tag &
1708 ((req.op == Op.OP_STORE_MISS) |
1709 (req.op == Op.OP_STORE_HIT))):
1710 sync += r1.wb.stb.eq(1)
1711 comb += st_stbs_done.eq(0)
1712 sync += r1.store_way.eq(req.hit_way)
1713 sync += r1.store_row.eq(self.get_row(req.real_addr))
1714
1715 with m.If(req.op == Op.OP_STORE_HIT):
1716 sync += r1.write_bram.eq(1)
1717 sync += r1.full.eq(r1_next_cycle)
1718 sync += r1.slow_valid.eq(1)
1719
1720 # Store requests never come from the MMU
1721 sync += r1.ls_valid.eq(1)
1722 comb += st_stbs_done.eq(0)
1723 sync += r1.inc_acks.eq(1)
1724 with m.Else():
1725 sync += r1.wb.stb.eq(0)
1726 comb += st_stbs_done.eq(1)
1727
1728 # Got ack ? See if complete.
1729 sync += Display("got ack %d %d stbs %d adjust_acks %d",
1730 bus.ack, bus.ack, st_stbs_done, adjust_acks)
1731 with m.If(bus.ack):
1732 with m.If(st_stbs_done & (adjust_acks == 1)):
1733 sync += r1.state.eq(State.IDLE)
1734 sync += r1.wb.cyc.eq(0)
1735 sync += r1.wb.stb.eq(0)
1736 sync += r1.dec_acks.eq(1)
1737
1738 with m.Case(State.NC_LOAD_WAIT_ACK):
1739 # Clear stb when slave accepted request
1740 with m.If(~bus.stall):
1741 sync += r1.wb.stb.eq(0)
1742
1743 # Got ack ? complete.
1744 with m.If(bus.ack):
1745 sync += r1.state.eq(State.IDLE)
1746 sync += r1.full.eq(r1_next_cycle)
1747 sync += r1.slow_valid.eq(1)
1748
1749 with m.If(r1.mmu_req):
1750 sync += r1.mmu_done.eq(1)
1751 with m.Else():
1752 sync += r1.ls_valid.eq(1)
1753
1754 sync += r1.forward_sel.eq(~0) # all 1s
1755 sync += r1.use_forward1.eq(1)
1756 sync += r1.wb.cyc.eq(0)
1757 sync += r1.wb.stb.eq(0)
1758
1759 def dcache_log(self, m, r1, valid_ra, tlb_hit, stall_out):
1760
1761 sync = m.d.sync
1762 d_out, bus, log_out = self.d_out, self.bus, self.log_out
1763
1764 sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit.way[:3],
1765 stall_out, req_op[:3], d_out.valid, d_out.error,
1766 r1.wb.cyc, r1.wb.stb, bus.ack, bus.stall,
1767 r1.real_adr[3:6]))
1768
1769 def elaborate(self, platform):
1770
1771 m = Module()
1772 comb, sync = m.d.comb, m.d.sync
1773 m_in, d_in = self.m_in, self.d_in
1774
1775 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1776 cache_valids = self.CacheValidsArray()
1777 cache_tag_set = Signal(self.TAG_RAM_WIDTH)
1778
1779 self.tagmem = Memory(depth=self.NUM_LINES, width=self.TAG_RAM_WIDTH,
1780 attrs={'syn_ramstyle': "block_ram"})
1781
1782 """note: these are passed to nmigen.hdl.Memory as "attributes".
1783 don't know how, just that they are.
1784 """
1785 # TODO attribute ram_style of
1786 # dtlb_tags : signal is "distributed";
1787 # TODO attribute ram_style of
1788 # dtlb_ptes : signal is "distributed";
1789
1790 r0 = RegStage0("r0")
1791 r0_full = Signal()
1792
1793 r1 = RegStage1(self, "r1")
1794
1795 reservation = Reservation(self, "rsrv")
1796
1797 # Async signals on incoming request
1798 req_index = Signal(self.INDEX_BITS)
1799 req_row = Signal(self.ROW_BITS)
1800 req_hit_way = Signal(self.WAY_BITS)
1801 req_tag = Signal(self.TAG_BITS)
1802 req_op = Signal(Op)
1803 req_data = Signal(64)
1804 req_same_tag = Signal()
1805 req_go = Signal()
1806
1807 early_req_row = Signal(self.ROW_BITS)
1808
1809 cancel_store = Signal()
1810 set_rsrv = Signal()
1811 clear_rsrv = Signal()
1812
1813 r0_valid = Signal()
1814 r0_stall = Signal()
1815
1816 use_forward1_next = Signal()
1817 use_forward2_next = Signal()
1818
1819 cache_out_row = Signal(WB_DATA_BITS)
1820
1821 plru_victim = Signal(self.WAY_BITS)
1822 replace_way = Signal(self.WAY_BITS)
1823
1824 # Wishbone read/write/cache write formatting signals
1825 bus_sel = Signal(8)
1826
1827 # TLB signals
1828 tlb_way = self.TLBRecord("tlb_way")
1829 tlb_req_index = Signal(self.TLB_SET_BITS)
1830 tlb_hit = self.TLBHit("tlb_hit")
1831 pte = Signal(self.TLB_PTE_BITS)
1832 ra = Signal(self.REAL_ADDR_BITS)
1833 valid_ra = Signal()
1834 perm_attr = PermAttr("dc_perms")
1835 rc_ok = Signal()
1836 perm_ok = Signal()
1837 access_ok = Signal()
1838
1839 tlb_plru_victim = Signal(self.TLB_WAY_BITS)
1840
1841 # we don't yet handle collisions between loadstore1 requests
1842 # and MMU requests
1843 comb += self.m_out.stall.eq(0)
1844
1845 # Hold off the request in r0 when r1 has an uncompleted request
1846 comb += r0_stall.eq(r0_full & (r1.full | d_in.hold))
1847 comb += r0_valid.eq(r0_full & ~r1.full & ~d_in.hold)
1848 comb += self.stall_out.eq(r0_stall)
1849 # debugging: detect if any stall ever requested, which is fine,
1850 # but if a request comes in when stall requested, that's bad.
1851 with m.If(r0_stall):
1852 sync += self.any_stall_out.eq(1)
1853 with m.If(d_in.valid):
1854 sync += self.dreq_when_stall.eq(1)
1855 with m.If(m_in.valid):
1856 sync += self.mreq_when_stall.eq(1)
1857
1858 # deal with litex not doing wishbone pipeline mode
1859 # XXX in wrong way. FIFOs are needed in the SRAM test
1860 # so that stb/ack match up. same thing done in icache.py
1861 if not self.microwatt_compat or self.fabric_compat:
1862 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
1863
1864 # Wire up wishbone request latch out of stage 1
1865 comb += self.bus.we.eq(r1.wb.we)
1866 comb += self.bus.adr.eq(r1.wb.adr)
1867 comb += self.bus.sel.eq(r1.wb.sel)
1868 comb += self.bus.stb.eq(r1.wb.stb)
1869 comb += self.bus.dat_w.eq(r1.wb.dat)
1870 comb += self.bus.cyc.eq(r1.wb.cyc)
1871
1872 # create submodule TLBUpdate
1873 m.submodules.dtlb_update = self.dtlb_update = DTLBUpdate(self)
1874
1875 # call sub-functions putting everything together, using shared
1876 # signals established above
1877 self.stage_0(m, r0, r1, r0_full)
1878 self.tlb_read(m, r0_stall, tlb_way)
1879 self.tlb_search(m, tlb_req_index, r0, r0_valid,
1880 tlb_way,
1881 pte, tlb_hit, valid_ra, perm_attr, ra)
1882 self.tlb_update(m, r0_valid, r0, tlb_req_index,
1883 tlb_hit, tlb_plru_victim)
1884 self.maybe_plrus(m, r1, plru_victim)
1885 self.maybe_tlb_plrus(m, r1, tlb_plru_victim, tlb_req_index)
1886 self.cache_tag_read(m, r0_stall, req_index, cache_tag_set)
1887 self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
1888 r0_valid, r1, cache_valids, replace_way,
1889 use_forward1_next, use_forward2_next,
1890 req_hit_way, plru_victim, rc_ok, perm_attr,
1891 valid_ra, perm_ok, access_ok, req_op, req_go,
1892 tlb_hit, tlb_way, cache_tag_set,
1893 cancel_store, req_same_tag, r0_stall, early_req_row)
1894 self.reservation_comb(m, cancel_store, set_rsrv, clear_rsrv,
1895 r0_valid, r0, reservation)
1896 self.reservation_reg(m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1897 reservation, r0)
1898 self.writeback_control(m, r1, cache_out_row)
1899 self.rams(m, r1, early_req_row, cache_out_row, replace_way)
1900 self.dcache_fast_hit(m, req_op, r0_valid, r0, r1,
1901 req_hit_way, req_index, req_tag, access_ok,
1902 tlb_hit, tlb_req_index)
1903 self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
1904 r0, replace_way,
1905 req_hit_way, req_same_tag,
1906 r0_valid, req_op, cache_valids, req_go, ra)
1907 #self.dcache_log(m, r1, valid_ra, tlb_hit, stall_out)
1908
1909 return m
1910
1911
1912 if __name__ == '__main__':
1913 dut = DCache()
1914 vl = rtlil.convert(dut, ports=[])
1915 with open("test_dcache.il", "w") as f:
1916 f.write(vl)