fix icache row store issue
[soc.git] / src / soc / experiment / dcache.py
index 5e797f419c1edd893ab697d924839ca9bba9ed48..a5548cfb6114859a35e13e0aaad81dbbb25c1a04 100644 (file)
@@ -2,22 +2,38 @@
 
 based on Anton Blanchard microwatt dcache.vhdl
 
+note that the microwatt dcache wishbone interface expects "stall".
+for simplicity at the moment this is hard-coded to cyc & ~ack.
+see WB4 spec, p84, section 5.2.1
+
+IMPORTANT: for store, the data is sampled the cycle AFTER the "valid"
+is raised.  sigh
+
+Links:
+
+* https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
+* https://bugs.libre-soc.org/show_bug.cgi?id=469
+
 """
 
+import sys
+
+from nmutil.gtkw import write_gtkw
+
+sys.setrecursionlimit(1000000)
+
 from enum import Enum, unique
 
 from nmigen import Module, Signal, Elaboratable, Cat, Repl, Array, Const
-try:
-    from nmigen.hdl.ast import Display
-except ImportError:
-    def Display(*args):
-        return []
+from nmutil.util import Display
+
+from copy import deepcopy
+from random import randint, seed
 
-from random import randint
+from nmigen_soc.wishbone.bus import Interface
 
 from nmigen.cli import main
 from nmutil.iocontrol import RecordObject
-from nmutil.util import wrap
 from nmigen.utils import log2_int
 from soc.experiment.mem_types import (LoadStore1ToDCacheType,
                                      DCacheToLoadStore1Type,
@@ -31,16 +47,19 @@ from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
                                 WBIOMasterOut, WBIOSlaveOut)
 
 from soc.experiment.cache_ram import CacheRam
-from soc.experiment.plru import PLRU
+#from soc.experiment.plru import PLRU
+from nmutil.plru import PLRU
 
 # for test
-from nmigen_soc.wishbone.sram import SRAM
+from soc.bus.sram import SRAM
 from nmigen import Memory
 from nmigen.cli import rtlil
-if True:
-    from nmigen.back.pysim import Simulator, Delay, Settle
-else:
-    from nmigen.sim.cxxsim import Simulator, Delay, Settle
+
+# NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
+# Also, check out the cxxsim nmigen branch, and latest yosys from git
+from nmutil.sim_tmp_alternative import Simulator
+
+from nmutil.util import wrap
 
 
 # TODO: make these parameters of DCache at some point
@@ -55,7 +74,7 @@ LOG_LENGTH = 0    # Non-zero to enable log data collection
 # BRAM organisation: We never access more than
 #     -- WB_DATA_BITS at a time so to save
 #     -- resources we make the array only that wide, and
-#     -- use consecutive indices for to make a cache "line"
+#     -- use consecutive indices to make a cache "line"
 #     --
 #     -- ROW_SIZE is the width in bytes of the BRAM
 #     -- (based on WB, so 64-bits)
@@ -69,6 +88,10 @@ ROW_PER_LINE = LINE_SIZE // ROW_SIZE
 # to represent the full dcache
 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
 
+print ("ROW_SIZE", ROW_SIZE)
+print ("ROW_PER_LINE", ROW_PER_LINE)
+print ("BRAM_ROWS", BRAM_ROWS)
+print ("NUM_WAYS", NUM_WAYS)
 
 # Bit fields counts in the address
 
@@ -120,7 +143,7 @@ layout = """\
   .. --------|              | TAG_BITS      (45)
 """
 print (layout)
-print ("Dcache TAG %d IDX %d ROW %d ROFF %d LOFF %d RLB %d" % \
+print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
             (TAG_BITS, INDEX_BITS, ROW_BITS,
              ROW_OFF_BITS, LINE_OFF_BITS, ROW_LINE_BITS))
 print ("index @: %d-%d" % (LINE_OFF_BITS, SET_SIZE_BITS))
@@ -129,12 +152,14 @@ print ("tag @: %d-%d width %d" % (SET_SIZE_BITS, REAL_ADDR_BITS, TAG_WIDTH))
 
 TAG_RAM_WIDTH = TAG_WIDTH * NUM_WAYS
 
+print ("TAG_RAM_WIDTH", TAG_RAM_WIDTH)
+
 def CacheTagArray():
     return Array(Signal(TAG_RAM_WIDTH, name="cachetag_%d" % x) \
                         for x in range(NUM_LINES))
 
 def CacheValidBitsArray():
-    return Array(Signal(INDEX_BITS, name="cachevalid_%d" % x) \
+    return Array(Signal(NUM_WAYS, name="cachevalid_%d" % x) \
                         for x in range(NUM_LINES))
 
 def RowPerLineValidArray():
@@ -149,10 +174,13 @@ TLB_TAG_WAY_BITS = TLB_NUM_WAYS * TLB_EA_TAG_BITS
 TLB_PTE_BITS     = 64
 TLB_PTE_WAY_BITS = TLB_NUM_WAYS * TLB_PTE_BITS;
 
+def ispow2(x):
+    return (1<<log2_int(x, False)) == x
+
 assert (LINE_SIZE % ROW_SIZE) == 0, "LINE_SIZE not multiple of ROW_SIZE"
-assert (LINE_SIZE % 2) == 0, "LINE_SIZE not power of 2"
-assert (NUM_LINES % 2) == 0, "NUM_LINES not power of 2"
-assert (ROW_PER_LINE % 2) == 0, "ROW_PER_LINE not power of 2"
+assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
+assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
+assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
 assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS), "geometry bits don't add up"
 assert (LINE_OFF_BITS == ROW_OFF_BITS + ROW_LINE_BITS), \
         "geometry bits don't add up"
@@ -165,16 +193,20 @@ assert SET_SIZE_BITS <= TLB_LG_PGSZ, "Set indexed by virtual address"
 
 
 def TLBValidBitsArray():
-    return Array(Signal(TLB_NUM_WAYS) for x in range(TLB_SET_SIZE))
+    return Array(Signal(TLB_NUM_WAYS, name="tlbvalid%d" % x) \
+                for x in range(TLB_SET_SIZE))
 
 def TLBTagEAArray():
-    return Array(Signal(TLB_EA_TAG_BITS) for x in range (TLB_NUM_WAYS))
+    return Array(Signal(TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
+                for x in range (TLB_NUM_WAYS))
 
 def TLBTagsArray():
-    return Array(Signal(TLB_TAG_WAY_BITS) for x in range (TLB_SET_SIZE))
+    return Array(Signal(TLB_TAG_WAY_BITS, name="tlbtags%d" % x) \
+                for x in range (TLB_SET_SIZE))
 
 def TLBPtesArray():
-    return Array(Signal(TLB_PTE_WAY_BITS) for x in range(TLB_SET_SIZE))
+    return Array(Signal(TLB_PTE_WAY_BITS, name="tlbptes%d" % x) \
+                for x in range(TLB_SET_SIZE))
 
 def HitWaySet():
     return Array(Signal(WAY_BITS, name="hitway_%d" % x) \
@@ -187,11 +219,13 @@ def CacheRamOut():
 
 # PLRU output interface
 def PLRUOut():
-    return Array(Signal(WAY_BITS) for x in range(NUM_LINES))
+    return Array(Signal(WAY_BITS, name="plru_out%d" % x) \
+                for x in range(NUM_LINES))
 
 # TLB PLRU output interface
 def TLBPLRUOut():
-    return Array(Signal(TLB_WAY_BITS) for x in range(TLB_SET_SIZE))
+    return Array(Signal(TLB_WAY_BITS, name="tlbplru_out%d" % x) \
+                for x in range(TLB_SET_SIZE))
 
 # Helper functions to decode incoming requests
 #
@@ -261,12 +295,6 @@ class PermAttr(RecordObject):
 
 def extract_perm_attr(pte):
     pa = PermAttr()
-    pa.reference = pte[8]
-    pa.changed   = pte[7]
-    pa.nocache   = pte[5]
-    pa.priv      = pte[3]
-    pa.rd_perm   = pte[2]
-    pa.wr_perm   = pte[1]
     return pa;
 
 
@@ -313,10 +341,11 @@ class RegStage0(RecordObject):
     def __init__(self, name=None):
         super().__init__(name=name)
         self.req     = LoadStore1ToDCacheType(name="lsmem")
-        self.tlbie   = Signal()
-        self.doall   = Signal()
-        self.tlbld   = Signal()
+        self.tlbie   = Signal() # indicates a tlbie request (from MMU)
+        self.doall   = Signal() # with tlbie, indicates flush whole TLB
+        self.tlbld   = Signal() # indicates a TLB load request (from MMU)
         self.mmu_req = Signal() # indicates source of request
+        self.d_valid = Signal() # indicates req.data is valid now
 
 
 class MemAccessRequest(RecordObject):
@@ -417,11 +446,11 @@ class DTLBUpdate(Elaboratable):
         self.eatag           = Signal(TLB_EA_TAG_BITS)
         self.pte_data        = Signal(TLB_PTE_BITS)
 
-        self.dv = Signal(TLB_PTE_WAY_BITS)
+        self.dv = Signal(TLB_NUM_WAYS) # tlb_way_valids_t
 
-        self.tb_out = Signal(TLB_TAG_WAY_BITS)
-        self.pb_out = Signal(TLB_NUM_WAYS)
-        self.db_out = Signal(TLB_PTE_WAY_BITS)
+        self.tb_out = Signal(TLB_TAG_WAY_BITS) # tlb_way_tags_t
+        self.db_out = Signal(TLB_NUM_WAYS)     # tlb_way_valids_t
+        self.pb_out = Signal(TLB_PTE_WAY_BITS) # tlb_way_ptes_t
 
     def elaborate(self, platform):
         m = Module()
@@ -432,13 +461,13 @@ class DTLBUpdate(Elaboratable):
         pteset   = Signal(TLB_PTE_WAY_BITS)
 
         tb_out, pb_out, db_out = self.tb_out, self.pb_out, self.db_out
+        comb += db_out.eq(self.dv)
 
         with m.If(self.tlbie & self.doall):
             pass # clear all back in parent
         with m.Elif(self.tlbie):
             with m.If(self.tlb_hit):
-                comb += db_out.eq(self.dv)
-                comb += db_out.bit_select(self.tlb_hit_way, 1).eq(1)
+                comb += db_out.bit_select(self.tlb_hit_way, 1).eq(0)
                 comb += self.v_updated.eq(1)
 
         with m.Elif(self.tlbwe):
@@ -458,21 +487,11 @@ class DTLBUpdate(Elaboratable):
 
         return m
 
-    def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
-                       r0_valid, r1, cache_valid_bits, replace_way,
-                       use_forward1_next, use_forward2_next,
-                       req_hit_way, plru_victim, rc_ok, perm_attr,
-                       valid_ra, perm_ok, access_ok, req_op, req_go,
-                       tlb_pte_way,
-                       tlb_hit, tlb_hit_way, tlb_valid_way, cache_tag_set,
-                       cancel_store, req_same_tag, r0_stall, early_req_row):
-        """Cache request parsing and hit detection
-        """
 
 class DCachePendingHit(Elaboratable):
 
     def __init__(self, tlb_pte_way, tlb_valid_way, tlb_hit_way,
-                      cache_valid_idx, cache_tag_set,
+                      cache_i_validdx, cache_tag_set,
                     req_addr,
                     hit_set):
 
@@ -488,7 +507,7 @@ class DCachePendingHit(Elaboratable):
         self.tlb_hit_way = tlb_hit_way
         self.tlb_pte_way = tlb_pte_way
         self.tlb_valid_way = tlb_valid_way
-        self.cache_valid_idx = cache_valid_idx
+        self.cache_i_validdx = cache_i_validdx
         self.cache_tag_set = cache_tag_set
         self.req_addr = req_addr
         self.hit_set = hit_set
@@ -503,7 +522,7 @@ class DCachePendingHit(Elaboratable):
         is_hit = self.is_hit
         tlb_pte_way = self.tlb_pte_way
         tlb_valid_way = self.tlb_valid_way
-        cache_valid_idx = self.cache_valid_idx
+        cache_i_validdx = self.cache_i_validdx
         cache_tag_set = self.cache_tag_set
         req_addr = self.req_addr
         tlb_hit_way = self.tlb_hit_way
@@ -525,7 +544,7 @@ class DCachePendingHit(Elaboratable):
         # the TLB, and then decide later which match to use.
 
         with m.If(virt_mode):
-            for j in range(TLB_NUM_WAYS):
+            for j in range(TLB_NUM_WAYS): # tlb_num_way_t
                 s_tag       = Signal(TAG_BITS, name="s_tag%d" % j)
                 s_hit       = Signal()
                 s_pte       = Signal(TLB_PTE_BITS)
@@ -535,9 +554,9 @@ class DCachePendingHit(Elaboratable):
                                     s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
                 comb += s_tag.eq(get_tag(s_ra))
 
-                for i in range(NUM_WAYS):
+                for i in range(NUM_WAYS): # way_t
                     is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
-                    comb += is_tag_hit.eq(go & cache_valid_idx[i] &
+                    comb += is_tag_hit.eq(go & cache_i_validdx[i] &
                                   (read_tag(i, cache_tag_set) == s_tag)
                                   & tlb_valid_way[j])
                     with m.If(is_tag_hit):
@@ -553,9 +572,9 @@ class DCachePendingHit(Elaboratable):
         with m.Else():
             s_tag       = Signal(TAG_BITS)
             comb += s_tag.eq(get_tag(req_addr))
-            for i in range(NUM_WAYS):
+            for i in range(NUM_WAYS): # way_t
                 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
-                comb += is_tag_hit.eq(go & cache_valid_idx[i] &
+                comb += is_tag_hit.eq(go & cache_i_validdx[i] &
                           (read_tag(i, cache_tag_set) == s_tag))
                 with m.If(is_tag_hit):
                     comb += hit_way.eq(i)
@@ -568,6 +587,7 @@ class DCachePendingHit(Elaboratable):
 
 class DCache(Elaboratable):
     """Set associative dcache write-through
+
     TODO (in no specific order):
     * See list in icache.vhdl
     * Complete load misses on the cycle when WB data comes instead of
@@ -583,8 +603,13 @@ class DCache(Elaboratable):
 
         self.stall_out = Signal()
 
-        self.wb_out    = WBMasterOut()
-        self.wb_in     = WBSlaveOut()
+        # standard naming (wired to non-standard for compatibility)
+        self.bus = Interface(addr_width=32,
+                            data_width=64,
+                            granularity=8,
+                            features={'stall'},
+                            alignment=0,
+                            name="dcache")
 
         self.log_out   = Signal(20)
 
@@ -602,29 +627,45 @@ class DCache(Elaboratable):
             sync += Display("request collision loadstore vs MMU")
 
         with m.If(m_in.valid):
-            sync += r.req.valid.eq(1)
-            sync += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))
-            sync += r.req.dcbz.eq(0)
-            sync += r.req.nc.eq(0)
-            sync += r.req.reserve.eq(0)
-            sync += r.req.virt_mode.eq(1)
-            sync += r.req.priv_mode.eq(1)
-            sync += r.req.addr.eq(m_in.addr)
-            sync += r.req.data.eq(m_in.pte)
-            sync += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
-            sync += r.tlbie.eq(m_in.tlbie)
-            sync += r.doall.eq(m_in.doall)
-            sync += r.tlbld.eq(m_in.tlbld)
-            sync += r.mmu_req.eq(1)
+            comb += r.req.valid.eq(1)
+            comb += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))# no invalidate
+            comb += r.req.dcbz.eq(0)
+            comb += r.req.nc.eq(0)
+            comb += r.req.reserve.eq(0)
+            comb += r.req.virt_mode.eq(0)
+            comb += r.req.priv_mode.eq(1)
+            comb += r.req.addr.eq(m_in.addr)
+            comb += r.req.data.eq(m_in.pte)
+            comb += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
+            comb += r.tlbie.eq(m_in.tlbie)
+            comb += r.doall.eq(m_in.doall)
+            comb += r.tlbld.eq(m_in.tlbld)
+            comb += r.mmu_req.eq(1)
+            m.d.sync += Display("    DCACHE req mmu addr %x pte %x ld %d",
+                                 m_in.addr, m_in.pte, r.req.load)
+
         with m.Else():
-            sync += r.req.eq(d_in)
-            sync += r.tlbie.eq(0)
-            sync += r.doall.eq(0)
-            sync += r.tlbld.eq(0)
-            sync += r.mmu_req.eq(0)
-            with m.If(~(r1.full & r0_full)):
-                sync += r0.eq(r)
-                sync += r0_full.eq(r.req.valid)
+            comb += r.req.eq(d_in)
+            comb += r.req.data.eq(0)
+            comb += r.tlbie.eq(0)
+            comb += r.doall.eq(0)
+            comb += r.tlbld.eq(0)
+            comb += r.mmu_req.eq(0)
+        with m.If((~r1.full & ~d_in.hold) | ~r0_full):
+            sync += r0.eq(r)
+            sync += r0_full.eq(r.req.valid)
+            # Sample data the cycle after a request comes in from loadstore1.
+            # If another request has come in already then the data will get
+            # put directly into req.data below.
+            with m.If(r0.req.valid & ~r.req.valid & ~r0.d_valid &
+                     ~r0.mmu_req):
+                sync += r0.req.data.eq(d_in.data)
+                sync += r0.d_valid.eq(1)
+        with m.If(d_in.valid):
+            m.d.sync += Display("    DCACHE req cache "
+                                "virt %d addr %x data %x ld %d",
+                                 r.req.virt_mode, r.req.addr,
+                                 r.req.data, r.req.load)
 
     def tlb_read(self, m, r0_stall, tlb_valid_way,
                  tlb_tag_way, tlb_pte_way, dtlb_valid_bits,
@@ -666,13 +707,13 @@ class DCache(Elaboratable):
             return
         for i in range(TLB_SET_SIZE):
             # TLB PLRU interface
-            tlb_plru        = PLRU(WAY_BITS)
+            tlb_plru        = PLRU(TLB_WAY_BITS)
             setattr(m.submodules, "maybe_plru_%d" % i, tlb_plru)
             tlb_plru_acc_en = Signal()
 
             comb += tlb_plru_acc_en.eq(r1.tlb_hit & (r1.tlb_hit_index == i))
             comb += tlb_plru.acc_en.eq(tlb_plru_acc_en)
-            comb += tlb_plru.acc.eq(r1.tlb_hit_way)
+            comb += tlb_plru.acc_i.eq(r1.tlb_hit_way)
             comb += tlb_plru_victim[i].eq(tlb_plru.lru_o)
 
     def tlb_search(self, m, tlb_req_index, r0, r0_valid,
@@ -680,7 +721,6 @@ class DCache(Elaboratable):
                    tlb_pte_way, pte, tlb_hit, valid_ra, perm_attr, ra):
 
         comb = m.d.comb
-        sync = m.d.sync
 
         hitway = Signal(TLB_WAY_BITS)
         hit    = Signal()
@@ -691,9 +731,10 @@ class DCache(Elaboratable):
         comb += eatag.eq(r0.req.addr[TLB_LG_END : 64 ])
 
         for i in range(TLB_NUM_WAYS):
-            is_tag_hit = Signal()
-            comb += is_tag_hit.eq(tlb_valid_way[i]
-                                  & read_tlb_tag(i, tlb_tag_way) == eatag)
+            is_tag_hit = Signal(name="is_tag_hit%d" % i)
+            tlb_tag = Signal(TLB_EA_TAG_BITS, name="tlb_tag%d" % i)
+            comb += tlb_tag.eq(read_tlb_tag(i, tlb_tag_way))
+            comb += is_tag_hit.eq(tlb_valid_way[i] & (tlb_tag == eatag))
             with m.If(is_tag_hit):
                 comb += hitway.eq(i)
                 comb += hit.eq(1)
@@ -703,18 +744,21 @@ class DCache(Elaboratable):
 
         with m.If(tlb_hit):
             comb += pte.eq(read_tlb_pte(hitway, tlb_pte_way))
-        with m.Else():
-            comb += pte.eq(0)
         comb += valid_ra.eq(tlb_hit | ~r0.req.virt_mode)
+
         with m.If(r0.req.virt_mode):
             comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
                               r0.req.addr[ROW_OFF_BITS:TLB_LG_PGSZ],
                               pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
-            comb += perm_attr.eq(extract_perm_attr(pte))
+            comb += perm_attr.reference.eq(pte[8])
+            comb += perm_attr.changed.eq(pte[7])
+            comb += perm_attr.nocache.eq(pte[5])
+            comb += perm_attr.priv.eq(pte[3])
+            comb += perm_attr.rd_perm.eq(pte[2])
+            comb += perm_attr.wr_perm.eq(pte[1])
         with m.Else():
             comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
                               r0.req.addr[ROW_OFF_BITS:REAL_ADDR_BITS]))
-
             comb += perm_attr.reference.eq(1)
             comb += perm_attr.changed.eq(1)
             comb += perm_attr.nocache.eq(0)
@@ -722,10 +766,22 @@ class DCache(Elaboratable):
             comb += perm_attr.rd_perm.eq(1)
             comb += perm_attr.wr_perm.eq(1)
 
+        with m.If(valid_ra):
+            m.d.sync += Display("DCACHE virt mode %d hit %d ra %x pte %x",
+                                r0.req.virt_mode, tlb_hit, ra, pte)
+            m.d.sync += Display("       perm ref=%d", perm_attr.reference)
+            m.d.sync += Display("       perm chg=%d", perm_attr.changed)
+            m.d.sync += Display("       perm noc=%d", perm_attr.nocache)
+            m.d.sync += Display("       perm prv=%d", perm_attr.priv)
+            m.d.sync += Display("       perm rdp=%d", perm_attr.rd_perm)
+            m.d.sync += Display("       perm wrp=%d", perm_attr.wr_perm)
+
     def tlb_update(self, m, r0_valid, r0, dtlb_valid_bits, tlb_req_index,
                     tlb_hit_way, tlb_hit, tlb_plru_victim, tlb_tag_way,
                     dtlb_tags, tlb_pte_way, dtlb_ptes):
 
+        dtlb_valids = TLBValidBitsArray()
+
         comb = m.d.comb
         sync = m.d.sync
 
@@ -781,7 +837,7 @@ class DCache(Elaboratable):
 
             comb += plru_acc_en.eq(r1.cache_hit & (r1.hit_index == i))
             comb += plru.acc_en.eq(plru_acc_en)
-            comb += plru.acc.eq(r1.hit_way)
+            comb += plru.acc_i.eq(r1.hit_way)
             comb += plru_victim[i].eq(plru.lru_o)
 
     def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set, cache_tags):
@@ -802,7 +858,7 @@ class DCache(Elaboratable):
         sync += cache_tag_set.eq(cache_tags[index])
 
     def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
-                       r0_valid, r1, cache_valid_bits, replace_way,
+                       r0_valid, r1, cache_valids, replace_way,
                        use_forward1_next, use_forward2_next,
                        req_hit_way, plru_victim, rc_ok, perm_attr,
                        valid_ra, perm_ok, access_ok, req_op, req_go,
@@ -813,7 +869,6 @@ class DCache(Elaboratable):
         """
 
         comb = m.d.comb
-        sync = m.d.sync
         m_in, d_in = self.m_in, self.d_in
 
         is_hit      = Signal()
@@ -824,19 +879,23 @@ class DCache(Elaboratable):
         nc          = Signal()
         hit_set     = Array(Signal(name="hit_set_%d" % i) \
                                   for i in range(TLB_NUM_WAYS))
-        cache_valid_idx = Signal(INDEX_BITS)
+        cache_i_validdx = Signal(NUM_WAYS)
 
         # Extract line, row and tag from request
         comb += req_index.eq(get_index(r0.req.addr))
         comb += req_row.eq(get_row(r0.req.addr))
         comb += req_tag.eq(get_tag(ra))
 
+        if False: # display on comb is a bit... busy.
+            comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
+                    r0.req.addr, ra, req_index, req_tag, req_row)
+
         comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
-        comb += cache_valid_idx.eq(cache_valid_bits[req_index])
+        comb += cache_i_validdx.eq(cache_valids[req_index])
 
         m.submodules.dcache_pend = dc = DCachePendingHit(tlb_pte_way,
                                 tlb_valid_way, tlb_hit_way,
-                                cache_valid_idx, cache_tag_set,
+                                cache_i_validdx, cache_tag_set,
                                 r0.req.addr,
                                 hit_set)
 
@@ -855,8 +914,10 @@ class DCache(Elaboratable):
             # For a store, consider this a hit even if the row isn't
             # valid since it will be by the time we perform the store.
             # For a load, check the appropriate row valid bit.
-            valid = r1.rows_valid[req_row % ROW_PER_LINE]
-            comb += is_hit.eq(~r0.req.load | valid)
+            rrow = Signal(ROW_LINE_BITS)
+            comb += rrow.eq(req_row)
+            valid = r1.rows_valid[rrow]
+            comb += is_hit.eq((~r0.req.load) | valid)
             comb += hit_way.eq(replace_way)
 
         # Whether to use forwarded data for a load or not
@@ -886,9 +947,8 @@ class DCache(Elaboratable):
         # work out whether we have permission for this access
         # NB we don't yet implement AMR, thus no KUAP
         comb += rc_ok.eq(perm_attr.reference
-                         & (r0.req.load | perm_attr.changed)
-                )
-        comb += perm_ok.eq((r0.req.priv_mode | ~perm_attr.priv) &
+                         & (r0.req.load | perm_attr.changed))
+        comb += perm_ok.eq((r0.req.priv_mode | (~perm_attr.priv)) &
                            (perm_attr.wr_perm |
                               (r0.req.load & perm_attr.rd_perm)))
         comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
@@ -898,10 +958,15 @@ class DCache(Elaboratable):
         comb += op.eq(Op.OP_NONE)
         with m.If(go):
             with m.If(~access_ok):
+                m.d.sync += Display("DCACHE access fail valid_ra=%d p=%d rc=%d",
+                                 valid_ra, perm_ok, rc_ok)
                 comb += op.eq(Op.OP_BAD)
             with m.Elif(cancel_store):
+                m.d.sync += Display("DCACHE cancel store")
                 comb += op.eq(Op.OP_STCX_FAIL)
             with m.Else():
+                m.d.sync += Display("DCACHE valid_ra=%d nc=%d ld=%d",
+                                 valid_ra, nc, r0.req.load)
                 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
                 with m.Switch(opsel):
                     with m.Case(0b101): comb += op.eq(Op.OP_LOAD_HIT)
@@ -932,16 +997,15 @@ class DCache(Elaboratable):
         """Handle load-with-reservation and store-conditional instructions
         """
         comb = m.d.comb
-        sync = m.d.sync
 
         with m.If(r0_valid & r0.req.reserve):
             # XXX generate alignment interrupt if address
             # is not aligned XXX or if r0.req.nc = '1'
             with m.If(r0.req.load):
-                comb += set_rsrv.eq(1) # load with reservation
+                comb += set_rsrv.eq(r0.req.atomic_last) # load with reservation
             with m.Else():
-                comb += clear_rsrv.eq(1) # store conditional
-                with m.If(~reservation.valid |
+                comb += clear_rsrv.eq(r0.req.atomic_last) # store conditional
+                with m.If((~reservation.valid) |
                          (r0.req.addr[LINE_OFF_BITS:64] != reservation.addr)):
                     comb += cancel_store.eq(1)
 
@@ -958,7 +1022,7 @@ class DCache(Elaboratable):
                 sync += reservation.valid.eq(1)
                 sync += reservation.addr.eq(r0.req.addr[LINE_OFF_BITS:64])
 
-    def writeback_control(self, m, r1, cache_out):
+    def writeback_control(self, m, r1, cache_out_row):
         """Return data for loads & completion control logic
         """
         comb = m.d.comb
@@ -977,7 +1041,7 @@ class DCache(Elaboratable):
         with m.Else():
             comb += data_fwd.eq(r1.forward_data2)
 
-        comb += data_out.eq(cache_out[r1.hit_way])
+        comb += data_out.eq(cache_out_row)
 
         for i in range(8):
             with m.If(r1.forward_sel[i]):
@@ -1022,12 +1086,15 @@ class DCache(Elaboratable):
 
             # error cases complete without stalling
             with m.If(r1.ls_error):
-                sync += Display("completing ld/st with error")
+                with m.If(r1.dcbz):
+                    sync += Display("completing dcbz with error")
+                with m.Else():
+                    sync += Display("completing ld/st with error")
 
             # Slow ops (load miss, NC, stores)
             with m.If(r1.slow_valid):
-                sync += Display("completing store or load miss data=%x",
-                                data_out)
+                sync += Display("completing store or load miss adr=%x data=%x",
+                                r1.req.real_addr, data_out)
 
         with m.Else():
             # Request came from MMU
@@ -1040,10 +1107,10 @@ class DCache(Elaboratable):
 
             # Slow ops (i.e. load miss)
             with m.If(r1.slow_valid):
-                sync += Display("completing MMU load miss, data=%x",
-                                m_out.data)
+                sync += Display("completing MMU load miss, adr=%x data=%x",
+                                r1.req.real_addr, m_out.data)
 
-    def rams(self, m, r1, early_req_row, cache_out, replace_way):
+    def rams(self, m, r1, early_req_row, cache_out_row, replace_way):
         """rams
         Generate a cache RAM for each way. This handles the normal
         reads, writes from reloads and the special store-hit update
@@ -1055,19 +1122,19 @@ class DCache(Elaboratable):
         account by using 1-cycle delayed signals for load hits.
         """
         comb = m.d.comb
-        wb_in = self.wb_in
+        bus = self.bus
 
         for i in range(NUM_WAYS):
             do_read  = Signal(name="do_rd%d" % i)
-            rd_addr  = Signal(ROW_BITS)
+            rd_addr  = Signal(ROW_BITS, name="rd_addr_%d" % i)
             do_write = Signal(name="do_wr%d" % i)
-            wr_addr  = Signal(ROW_BITS)
-            wr_data  = Signal(WB_DATA_BITS)
+            wr_addr  = Signal(ROW_BITS, name="wr_addr_%d" % i)
+            wr_data  = Signal(WB_DATA_BITS, name="din_%d" % i)
             wr_sel   = Signal(ROW_SIZE)
             wr_sel_m = Signal(ROW_SIZE)
-            _d_out   = Signal(WB_DATA_BITS, name="dout_%d" % i)
+            _d_out   = Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
 
-            way = CacheRam(ROW_BITS, WB_DATA_BITS, True)
+            way = CacheRam(ROW_BITS, WB_DATA_BITS, ADD_BUF=True, ram_num=i)
             setattr(m.submodules, "cacheram_%d" % i, way)
 
             comb += way.rd_en.eq(do_read)
@@ -1079,8 +1146,9 @@ class DCache(Elaboratable):
 
             # Cache hit reads
             comb += do_read.eq(1)
-            comb += rd_addr.eq(early_req_row[:ROW_BITS])
-            comb += cache_out[i].eq(_d_out)
+            comb += rd_addr.eq(early_req_row)
+            with m.If(r1.hit_way == i):
+                comb += cache_out_row.eq(_d_out)
 
             # Write mux:
             #
@@ -1103,13 +1171,13 @@ class DCache(Elaboratable):
                 with m.If(r1.dcbz):
                     comb += wr_data.eq(0)
                 with m.Else():
-                    comb += wr_data.eq(wb_in.dat)
+                    comb += wr_data.eq(bus.dat_r)
                 comb += wr_addr.eq(r1.store_row)
                 comb += wr_sel.eq(~0) # all 1s
 
-            with m.If((r1.state == State.RELOAD_WAIT_ACK)
-                      & wb_in.ack & (replace_way == i)):
-                comb += do_write.eq(1)
+                with m.If((r1.state == State.RELOAD_WAIT_ACK)
+                          & bus.ack & (replace_way == i)):
+                    comb += do_write.eq(1)
 
             # Mask write selects with do_write since BRAM
             # doesn't have a global write-enable
@@ -1150,19 +1218,20 @@ class DCache(Elaboratable):
             sync += r1.cache_hit.eq(0)
 
         with m.If(req_op == Op.OP_BAD):
-            # Display(f"Signalling ld/st error valid_ra={valid_ra}"
-            #      f"rc_ok={rc_ok} perm_ok={perm_ok}"
+            sync += Display("Signalling ld/st error "
+                            "ls_error=%i mmu_error=%i cache_paradox=%i",
+                            ~r0.mmu_req,r0.mmu_req,access_ok)
             sync += r1.ls_error.eq(~r0.mmu_req)
             sync += r1.mmu_error.eq(r0.mmu_req)
             sync += r1.cache_paradox.eq(access_ok)
 
-            with m.Else():
-                sync += r1.ls_error.eq(0)
-                sync += r1.mmu_error.eq(0)
-                sync += r1.cache_paradox.eq(0)
+        with m.Else():
+            sync += r1.ls_error.eq(0)
+            sync += r1.mmu_error.eq(0)
+            sync += r1.cache_paradox.eq(0)
 
         with m.If(req_op == Op.OP_STCX_FAIL):
-            r1.stcx_fail.eq(1)
+            sync += r1.stcx_fail.eq(1)
         with m.Else():
             sync += r1.stcx_fail.eq(0)
 
@@ -1180,17 +1249,16 @@ class DCache(Elaboratable):
     # All wishbone requests generation is done here.
     # This machine operates at stage 1.
     def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
-                    cache_valid_bits, r0, replace_way,
+                    cache_valids, r0, replace_way,
                     req_hit_way, req_same_tag,
                     r0_valid, req_op, cache_tags, req_go, ra):
 
         comb = m.d.comb
         sync = m.d.sync
-        wb_in = self.wb_in
+        bus = self.bus
+        d_in = self.d_in
 
         req         = MemAccessRequest("mreq_ds")
-        acks        = Signal(3)
-        adjust_acks = Signal(3)
 
         req_row = Signal(ROW_BITS)
         req_idx = Signal(INDEX_BITS)
@@ -1218,7 +1286,7 @@ class DCache(Elaboratable):
             with m.If(r1.dcbz):
                 sync += r1.forward_data1.eq(0)
             with m.Else():
-                sync += r1.forward_data1.eq(wb_in.dat)
+                sync += r1.forward_data1.eq(bus.dat_r)
             sync += r1.forward_sel1.eq(~0) # all 1s
             sync += r1.forward_way1.eq(replace_way)
             sync += r1.forward_row1.eq(r1.store_row)
@@ -1234,8 +1302,7 @@ class DCache(Elaboratable):
         # complete tlbies and TLB loads in the third cycle
         sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
 
-        with m.If((req_op == Op.OP_LOAD_HIT)
-                  | (req_op == Op.OP_STCX_FAIL)):
+        with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STCX_FAIL)):
             with m.If(~r0.mmu_req):
                 sync += r1.ls_valid.eq(1)
             with m.Else():
@@ -1247,6 +1314,11 @@ class DCache(Elaboratable):
                 with m.If(i == replace_way):
                     ct = Signal(TAG_RAM_WIDTH)
                     comb += ct.eq(cache_tags[r1.store_index])
+                    """
+TODO: check this
+cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
+                    (TAG_WIDTH - 1 downto TAG_BITS => '0') & r1.reload_tag;
+                    """
                     comb += ct.word_select(i, TAG_WIDTH).eq(r1.reload_tag)
                     sync += cache_tags[r1.store_index].eq(ct)
             sync += r1.store_way.eq(replace_way)
@@ -1263,10 +1335,13 @@ class DCache(Elaboratable):
             comb += req.dcbz.eq(r0.req.dcbz)
             comb += req.real_addr.eq(ra)
 
-            with m.If(~r0.req.dcbz):
+            with m.If(r0.req.dcbz):
+                # force data to 0 for dcbz
+                comb += req.data.eq(0)
+            with m.Elif(r0.d_valid):
                 comb += req.data.eq(r0.req.data)
             with m.Else():
-                comb += req.data.eq(0)
+                comb += req.data.eq(d_in.data)
 
             # Select all bytes for dcbz
             # and for cacheable loads
@@ -1291,7 +1366,7 @@ class DCache(Elaboratable):
         with m.Switch(r1.state):
 
             with m.Case(State.IDLE):
-                sync += r1.wb.adr.eq(req.real_addr)
+                sync += r1.wb.adr.eq(req.real_addr[ROW_LINE_BITS:])
                 sync += r1.wb.sel.eq(req.byte_sel)
                 sync += r1.wb.dat.eq(req.data)
                 sync += r1.dcbz.eq(req.dcbz)
@@ -1300,7 +1375,7 @@ class DCache(Elaboratable):
                 # for subsequent stores.
                 sync += r1.store_index.eq(req_idx)
                 sync += r1.store_row.eq(req_row)
-                sync += r1.end_row_ix.eq(get_row_of_line(req_row))
+                sync += r1.end_row_ix.eq(get_row_of_line(req_row)-1)
                 sync += r1.reload_tag.eq(req_tag)
                 sync += r1.req.same_tag.eq(1)
 
@@ -1381,35 +1456,38 @@ class DCache(Elaboratable):
                 # Requests are all sent if stb is 0
                 comb += ld_stbs_done.eq(~r1.wb.stb)
 
-                with m.If((~wb_in.stall) & r1.wb.stb):
-                    # That was the last word?
-                    # We are done sending.
-                    # Clear stb and set ld_stbs_done
-                    # so we can handle an eventual
-                    # last ack on the same cycle.
-                    with m.If(is_last_row_addr(r1.wb.adr, r1.end_row_ix)):
+                # If we are still sending requests, was one accepted?
+                with m.If((~bus.stall) & r1.wb.stb):
+                    # That was the last word?  We are done sending.
+                    # Clear stb and set ld_stbs_done so we can handle an
+                    # eventual last ack on the same cycle.
+                    # sigh - reconstruct wb adr with 3 extra 0s at front
+                    wb_adr = Cat(Const(0, ROW_OFF_BITS), r1.wb.adr)
+                    with m.If(is_last_row_addr(wb_adr, r1.end_row_ix)):
                         sync += r1.wb.stb.eq(0)
                         comb += ld_stbs_done.eq(1)
 
                     # Calculate the next row address in the current cache line
-                    rarange = Signal(LINE_OFF_BITS-ROW_OFF_BITS)
-                    comb += rarange.eq(r1.wb.adr[ROW_OFF_BITS:LINE_OFF_BITS]+1)
-                    sync += r1.wb.adr[ROW_OFF_BITS:LINE_OFF_BITS].eq(rarange)
+                    row = Signal(LINE_OFF_BITS-ROW_OFF_BITS)
+                    comb += row.eq(r1.wb.adr)
+                    sync += r1.wb.adr[:LINE_OFF_BITS-ROW_OFF_BITS].eq(row+1)
 
                 # Incoming acks processing
-                sync += r1.forward_valid1.eq(wb_in.ack)
-                with m.If(wb_in.ack):
-                    sync += r1.rows_valid[r1.store_row % ROW_PER_LINE].eq(1)
+                sync += r1.forward_valid1.eq(bus.ack)
+                with m.If(bus.ack):
+                    srow = Signal(ROW_LINE_BITS)
+                    comb += srow.eq(r1.store_row)
+                    sync += r1.rows_valid[srow].eq(1)
 
                     # If this is the data we were looking for,
                     # we can complete the request next cycle.
                     # Compare the whole address in case the
                     # request in r1.req is not the one that
                     # started this refill.
-                    with m.If(r1.full & r1.req.same_tag &
+                    with m.If(req.valid & r1.req.same_tag &
                               ((r1.dcbz & r1.req.dcbz) |
                                (~r1.dcbz & (r1.req.op == Op.OP_LOAD_MISS))) &
-                                (r1.store_row == get_row(r1.req.real_addr))):
+                                (r1.store_row == get_row(req.real_addr))):
                         sync += r1.full.eq(0)
                         sync += r1.slow_valid.eq(1)
                         with m.If(~r1.mmu_req):
@@ -1427,16 +1505,23 @@ class DCache(Elaboratable):
 
                         # Cache line is now valid
                         cv = Signal(INDEX_BITS)
-                        comb += cv.eq(cache_valid_bits[r1.store_index])
+                        comb += cv.eq(cache_valids[r1.store_index])
                         comb += cv.bit_select(r1.store_way, 1).eq(1)
-                        sync += cache_valid_bits[r1.store_index].eq(cv)
+                        sync += cache_valids[r1.store_index].eq(cv)
+
                         sync += r1.state.eq(State.IDLE)
+                        sync += Display("cache valid set %x "
+                                        "idx %d way %d",
+                                         cv, r1.store_index, r1.store_way)
 
                     # Increment store row counter
                     sync += r1.store_row.eq(next_row(r1.store_row))
 
             with m.Case(State.STORE_WAIT_ACK):
                 st_stbs_done = Signal()
+                acks        = Signal(3)
+                adjust_acks = Signal(3)
+
                 comb += st_stbs_done.eq(~r1.wb.stb)
                 comb += acks.eq(r1.acks_pending)
 
@@ -1451,16 +1536,16 @@ class DCache(Elaboratable):
                 sync += r1.acks_pending.eq(adjust_acks)
 
                 # Clear stb when slave accepted request
-                with m.If(~wb_in.stall):
+                with m.If(~bus.stall):
                     # See if there is another store waiting
                     # to be done which is in the same real page.
                     with m.If(req.valid):
-                        ra = req.real_addr[0:SET_SIZE_BITS]
-                        sync += r1.wb.adr[0:SET_SIZE_BITS].eq(ra)
+                        _ra = req.real_addr[ROW_LINE_BITS:SET_SIZE_BITS]
+                        sync += r1.wb.adr[0:SET_SIZE_BITS].eq(_ra)
                         sync += r1.wb.dat.eq(req.data)
                         sync += r1.wb.sel.eq(req.byte_sel)
 
-                    with m.Elif((adjust_acks < 7) & req.same_tag &
+                    with m.If((adjust_acks < 7) & req.same_tag &
                                 ((req.op == Op.OP_STORE_MISS)
                                  | (req.op == Op.OP_STORE_HIT))):
                         sync += r1.wb.stb.eq(1)
@@ -1480,7 +1565,7 @@ class DCache(Elaboratable):
                         comb += st_stbs_done.eq(1)
 
                 # Got ack ? See if complete.
-                with m.If(wb_in.ack):
+                with m.If(bus.ack):
                     with m.If(st_stbs_done & (adjust_acks == 1)):
                         sync += r1.state.eq(State.IDLE)
                         sync += r1.wb.cyc.eq(0)
@@ -1489,11 +1574,11 @@ class DCache(Elaboratable):
 
             with m.Case(State.NC_LOAD_WAIT_ACK):
                 # Clear stb when slave accepted request
-                with m.If(~wb_in.stall):
+                with m.If(~bus.stall):
                     sync += r1.wb.stb.eq(0)
 
                 # Got ack ? complete.
-                with m.If(wb_in.ack):
+                with m.If(bus.ack):
                     sync += r1.state.eq(State.IDLE)
                     sync += r1.full.eq(0)
                     sync += r1.slow_valid.eq(1)
@@ -1511,22 +1596,23 @@ class DCache(Elaboratable):
     def dcache_log(self, m, r1, valid_ra, tlb_hit_way, stall_out):
 
         sync = m.d.sync
-        d_out, wb_in, log_out = self.d_out, self.wb_in, self.log_out
+        d_out, bus, log_out = self.d_out, self.bus, self.log_out
 
         sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit_way[:3],
                                stall_out, req_op[:3], d_out.valid, d_out.error,
-                               r1.wb.cyc, r1.wb.stb, wb_in.ack, wb_in.stall,
-                               r1.wb.adr[3:6]))
+                               r1.wb.cyc, r1.wb.stb, bus.ack, bus.stall,
+                               r1.real_adr[3:6]))
 
     def elaborate(self, platform):
 
         m = Module()
         comb = m.d.comb
+        d_in = self.d_in
 
         # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
         cache_tags       = CacheTagArray()
         cache_tag_set    = Signal(TAG_RAM_WIDTH)
-        cache_valid_bits = CacheValidBitsArray()
+        cache_valids = CacheValidBitsArray()
 
         # TODO attribute ram_style : string;
         # TODO attribute ram_style of cache_tags : signal is "distributed";
@@ -1571,7 +1657,7 @@ class DCache(Elaboratable):
         use_forward1_next = Signal()
         use_forward2_next = Signal()
 
-        cache_out         = CacheRamOut()
+        cache_out_row     = Signal(WB_DATA_BITS)
 
         plru_victim       = PLRUOut()
         replace_way       = Signal(WAY_BITS)
@@ -1601,12 +1687,23 @@ class DCache(Elaboratable):
         comb += self.m_out.stall.eq(0)
 
         # Hold off the request in r0 when r1 has an uncompleted request
-        comb += r0_stall.eq(r0_full & r1.full)
-        comb += r0_valid.eq(r0_full & ~r1.full)
+        comb += r0_stall.eq(r0_full & (r1.full | d_in.hold))
+        comb += r0_valid.eq(r0_full & ~r1.full & ~d_in.hold)
         comb += self.stall_out.eq(r0_stall)
 
+
+        # deal with litex not doing wishbone pipeline mode
+        # XXX in wrong way.  FIFOs are needed in the SRAM test
+        # so that stb/ack match up. same thing done in icache.py
+        comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
+
         # Wire up wishbone request latch out of stage 1
-        comb += self.wb_out.eq(r1.wb)
+        comb += self.bus.we.eq(r1.wb.we)
+        comb += self.bus.adr.eq(r1.wb.adr)
+        comb += self.bus.sel.eq(r1.wb.sel)
+        comb += self.bus.stb.eq(r1.wb.stb)
+        comb += self.bus.dat_w.eq(r1.wb.dat)
+        comb += self.bus.cyc.eq(r1.wb.cyc)
 
         # call sub-functions putting everything together, using shared
         # signals established above
@@ -1624,7 +1721,7 @@ class DCache(Elaboratable):
         self.maybe_tlb_plrus(m, r1, tlb_plru_victim)
         self.cache_tag_read(m, r0_stall, req_index, cache_tag_set, cache_tags)
         self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
-                           r0_valid, r1, cache_valid_bits, replace_way,
+                           r0_valid, r1, cache_valids, replace_way,
                            use_forward1_next, use_forward2_next,
                            req_hit_way, plru_victim, rc_ok, perm_attr,
                            valid_ra, perm_ok, access_ok, req_op, req_go,
@@ -1635,206 +1732,22 @@ class DCache(Elaboratable):
                            r0_valid, r0, reservation)
         self.reservation_reg(m, r0_valid, access_ok, set_rsrv, clear_rsrv,
                            reservation, r0)
-        self.writeback_control(m, r1, cache_out)
-        self.rams(m, r1, early_req_row, cache_out, replace_way)
+        self.writeback_control(m, r1, cache_out_row)
+        self.rams(m, r1, early_req_row, cache_out_row, replace_way)
         self.dcache_fast_hit(m, req_op, r0_valid, r0, r1,
                         req_hit_way, req_index, req_tag, access_ok,
                         tlb_hit, tlb_hit_way, tlb_req_index)
         self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
-                    cache_valid_bits, r0, replace_way,
+                    cache_valids, r0, replace_way,
                     req_hit_way, req_same_tag,
                          r0_valid, req_op, cache_tags, req_go, ra)
         #self.dcache_log(m, r1, valid_ra, tlb_hit_way, stall_out)
 
         return m
 
-def dcache_load(dut, addr, nc=0):
-    yield dut.d_in.load.eq(1)
-    yield dut.d_in.nc.eq(nc)
-    yield dut.d_in.addr.eq(addr)
-    yield dut.d_in.byte_sel.eq(~0)
-    yield dut.d_in.valid.eq(1)
-    yield
-    yield dut.d_in.valid.eq(0)
-    yield dut.d_in.byte_sel.eq(0)
-    yield
-    while not (yield dut.d_out.valid):
-        yield
-    data = yield dut.d_out.data
-    return data
-
-
-def dcache_store(dut, addr, data, nc=0):
-    yield dut.d_in.load.eq(0)
-    yield dut.d_in.nc.eq(nc)
-    yield dut.d_in.data.eq(data)
-    yield dut.d_in.byte_sel.eq(~0)
-    yield dut.d_in.addr.eq(addr)
-    yield dut.d_in.valid.eq(1)
-    yield
-    yield dut.d_in.valid.eq(0)
-    yield dut.d_in.byte_sel.eq(0)
-    yield
-    while not (yield dut.d_out.valid):
-        yield
-
-
-def dcache_random_sim(dut):
-
-    # start with stack of zeros
-    sim_mem = [0] * 512
-
-    # clear stuff
-    yield dut.d_in.valid.eq(0)
-    yield dut.d_in.load.eq(0)
-    yield dut.d_in.priv_mode.eq(1)
-    yield dut.d_in.nc.eq(0)
-    yield dut.d_in.addr.eq(0)
-    yield dut.d_in.data.eq(0)
-    yield dut.m_in.valid.eq(0)
-    yield dut.m_in.addr.eq(0)
-    yield dut.m_in.pte.eq(0)
-    # wait 4 * clk_period
-    yield
-    yield
-    yield
-    yield
-
-    print ()
-
-    for i in range(256):
-        addr = randint(0, 255)
-        data = randint(0, (1<<64)-1)
-        sim_mem[addr] = data
-        addr *= 8
-
-        print ("testing %x data %x" % (addr, data))
-
-        yield from dcache_load(dut, addr)
-        yield from dcache_store(dut, addr, data)
-
-        addr = randint(0, 255)
-        sim_data = sim_mem[addr]
-        addr *= 8
-
-        data = yield from dcache_load(dut, addr)
-        assert data == sim_data, \
-            "check %x data %x != %x" % (addr, data, sim_data)
-
-    for addr in range(8):
-        data = yield from dcache_load(dut, addr*8)
-        assert data == sim_mem[addr], \
-            "final check %x data %x != %x" % (addr*8, data, sim_mem[addr])
-
-def dcache_sim(dut):
-    # clear stuff
-    yield dut.d_in.valid.eq(0)
-    yield dut.d_in.load.eq(0)
-    yield dut.d_in.priv_mode.eq(1)
-    yield dut.d_in.nc.eq(0)
-    yield dut.d_in.addr.eq(0)
-    yield dut.d_in.data.eq(0)
-    yield dut.m_in.valid.eq(0)
-    yield dut.m_in.addr.eq(0)
-    yield dut.m_in.pte.eq(0)
-    # wait 4 * clk_period
-    yield
-    yield
-    yield
-    yield
-
-    # Cacheable read of address 4
-    data = yield from dcache_load(dut, 0x4)
-    addr = yield dut.d_in.addr
-    assert data == 0x0000000100000000, \
-        f"data @%x=%x expected 0x0000000100000000" % (addr, data)
-
-    # Cacheable read of address 20
-    data = yield from dcache_load(dut, 0x20)
-    addr = yield dut.d_in.addr
-    assert data == 0x0000000100000000, \
-        f"data @%x=%x expected 0x0000000100000000" % (addr, data)
-
-    # Cacheable read of address 30
-    data = yield from dcache_load(dut, 0x530)
-    addr = yield dut.d_in.addr
-    assert data == 0x0000014D0000014C, \
-        f"data @%x=%x expected 0000014D0000014C" % (addr, data)
-
-    # 2nd Cacheable read of address 30
-    data = yield from dcache_load(dut, 0x530)
-    addr = yield dut.d_in.addr
-    assert data == 0x0000014D0000014C, \
-        f"data @%x=%x expected 0000014D0000014C" % (addr, data)
-
-    # Non-cacheable read of address 100
-    data = yield from dcache_load(dut, 0x100, nc=1)
-    addr = yield dut.d_in.addr
-    assert data == 0x0000004100000040, \
-        f"data @%x=%x expected 0000004100000040" % (addr, data)
-
-    # Store at address 530
-    yield from dcache_store(dut, 0x530, 0x121)
-
-    # Store at address 30
-    yield from dcache_store(dut, 0x530, 0x12345678)
-
-    # 3nd Cacheable read of address 530
-    data = yield from dcache_load(dut, 0x530)
-    addr = yield dut.d_in.addr
-    assert data == 0x12345678, \
-        f"data @%x=%x expected 0x12345678" % (addr, data)
-
-    # 4th Cacheable read of address 30
-    data = yield from dcache_load(dut, 0x20)
-    addr = yield dut.d_in.addr
-    assert data == 0x12345678, \
-        f"data @%x=%x expected 0x12345678" % (addr, data)
-
-    yield
-    yield
-    yield
-    yield
-
-
-def test_dcache(mem, test_fn, test_name):
-    dut = DCache()
-
-    memory = Memory(width=64, depth=16*64, init=mem)
-    sram = SRAM(memory=memory, granularity=8)
-
-    m = Module()
-    m.submodules.dcache = dut
-    m.submodules.sram = sram
-
-    m.d.comb += sram.bus.cyc.eq(dut.wb_out.cyc)
-    m.d.comb += sram.bus.stb.eq(dut.wb_out.stb)
-    m.d.comb += sram.bus.we.eq(dut.wb_out.we)
-    m.d.comb += sram.bus.sel.eq(dut.wb_out.sel)
-    m.d.comb += sram.bus.adr.eq(dut.wb_out.adr[3:])
-    m.d.comb += sram.bus.dat_w.eq(dut.wb_out.dat)
-
-    m.d.comb += dut.wb_in.ack.eq(sram.bus.ack)
-    m.d.comb += dut.wb_in.dat.eq(sram.bus.dat_r)
-
-    # nmigen Simulation
-    sim = Simulator(m)
-    sim.add_clock(1e-6)
-
-    sim.add_sync_process(wrap(test_fn(dut)))
-    with sim.write_vcd('test_dcache%s.vcd' % test_name):
-        sim.run()
 
 if __name__ == '__main__':
     dut = DCache()
     vl = rtlil.convert(dut, ports=[])
     with open("test_dcache.il", "w") as f:
         f.write(vl)
-
-    mem = []
-    for i in range(0,512):
-        mem.append((i*2)| ((i*2+1)<<32))
-
-    test_dcache(mem, dcache_sim, "")
-    #test_dcache(None, dcache_random_sim, "random")
-