reduce icache/dcache TLB sizes
[soc.git] / src / soc / experiment / dcache.py
index 7223ef3f3168a4f062d24d53778bd00307c78453..910149e50a5dcc73955592d37dc97f8ec4a58690 100644 (file)
@@ -1,3 +1,17 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2020,2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
+# Copyright (C) 2020 Cole Poirier
+# Copyright (C) 2020,2021 Cesar Strauss
+# Copyright (C) 2021 Tobias Platen
+#
+# Original dcache.vhdl Copyright of its authors and licensed
+# by IBM under CC-BY 4.0
+# https://github.com/antonblanchard/microwatt
+#
+# Conversion to nmigen funded by NLnet and NGI POINTER under EU Grants
+# 871528 and 957073, under the LGPL-v3+ License
+
 """DCache
 
 based on Anton Blanchard microwatt dcache.vhdl
@@ -13,6 +27,8 @@ Links:
 
 * https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
 * https://bugs.libre-soc.org/show_bug.cgi?id=469
+* https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
+  (discussion about brams for ECP5)
 
 """
 
@@ -25,8 +41,9 @@ sys.setrecursionlimit(1000000)
 from enum import Enum, unique
 
 from nmigen import (Module, Signal, Elaboratable, Cat, Repl, Array, Const,
-                    Record)
+                    Record, Memory)
 from nmutil.util import Display
+from nmigen.lib.coding import Decoder
 
 from copy import deepcopy
 from random import randint, seed
@@ -48,8 +65,8 @@ from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
                                 WBIOMasterOut, WBIOSlaveOut)
 
 from soc.experiment.cache_ram import CacheRam
-#from soc.experiment.plru import PLRU
-from nmutil.plru import PLRU
+from soc.experiment.plru import PLRU, PLRUs
+#from nmutil.plru import PLRU, PLRUs
 
 # for test
 from soc.bus.sram import SRAM
@@ -62,223 +79,248 @@ from nmutil.sim_tmp_alternative import Simulator
 
 from nmutil.util import wrap
 
-
-# TODO: make these parameters of DCache at some point
-LINE_SIZE = 64    # Line size in bytes
-NUM_LINES = 16    # Number of lines in a set
-NUM_WAYS = 4      # Number of ways
-TLB_SET_SIZE = 64 # L1 DTLB entries per set
-TLB_NUM_WAYS = 2  # L1 DTLB number of sets
-TLB_LG_PGSZ = 12  # L1 DTLB log_2(page_size)
 LOG_LENGTH = 0    # Non-zero to enable log data collection
 
-# BRAM organisation: We never access more than
-#     -- WB_DATA_BITS at a time so to save
-#     -- resources we make the array only that wide, and
-#     -- use consecutive indices to make a cache "line"
-#     --
-#     -- ROW_SIZE is the width in bytes of the BRAM
-#     -- (based on WB, so 64-bits)
-ROW_SIZE = WB_DATA_BITS // 8;
-
-# ROW_PER_LINE is the number of row (wishbone
-# transactions) in a line
-ROW_PER_LINE = LINE_SIZE // ROW_SIZE
-
-# BRAM_ROWS is the number of rows in BRAM needed
-# to represent the full dcache
-BRAM_ROWS = NUM_LINES * ROW_PER_LINE
-
-print ("ROW_SIZE", ROW_SIZE)
-print ("ROW_PER_LINE", ROW_PER_LINE)
-print ("BRAM_ROWS", BRAM_ROWS)
-print ("NUM_WAYS", NUM_WAYS)
-
-# Bit fields counts in the address
-
-# REAL_ADDR_BITS is the number of real address
-# bits that we store
-REAL_ADDR_BITS = 56
-
-# ROW_BITS is the number of bits to select a row
-ROW_BITS = log2_int(BRAM_ROWS)
-
-# ROW_LINE_BITS is the number of bits to select
-# a row within a line
-ROW_LINE_BITS = log2_int(ROW_PER_LINE)
-
-# LINE_OFF_BITS is the number of bits for
-# the offset in a cache line
-LINE_OFF_BITS = log2_int(LINE_SIZE)
-
-# ROW_OFF_BITS is the number of bits for
-# the offset in a row
-ROW_OFF_BITS = log2_int(ROW_SIZE)
-
-# INDEX_BITS is the number if bits to
-# select a cache line
-INDEX_BITS = log2_int(NUM_LINES)
-
-# SET_SIZE_BITS is the log base 2 of the set size
-SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
-
-# TAG_BITS is the number of bits of
-# the tag part of the address
-TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
-
-# TAG_WIDTH is the width in bits of each way of the tag RAM
-TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
-
-# WAY_BITS is the number of bits to select a way
-WAY_BITS = log2_int(NUM_WAYS)
-
-# Example of layout for 32 lines of 64 bytes:
-layout = """\
-  ..  tag    |index|  line  |
-  ..         |   row   |    |
-  ..         |     |---|    | ROW_LINE_BITS  (3)
-  ..         |     |--- - --| LINE_OFF_BITS (6)
-  ..         |         |- --| ROW_OFF_BITS  (3)
-  ..         |----- ---|    | ROW_BITS      (8)
-  ..         |-----|        | INDEX_BITS    (5)
-  .. --------|              | TAG_BITS      (45)
-"""
-print (layout)
-print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
-            (TAG_BITS, INDEX_BITS, ROW_BITS,
-             ROW_OFF_BITS, LINE_OFF_BITS, ROW_LINE_BITS))
-print ("index @: %d-%d" % (LINE_OFF_BITS, SET_SIZE_BITS))
-print ("row @: %d-%d" % (LINE_OFF_BITS, ROW_OFF_BITS))
-print ("tag @: %d-%d width %d" % (SET_SIZE_BITS, REAL_ADDR_BITS, TAG_WIDTH))
-
-TAG_RAM_WIDTH = TAG_WIDTH * NUM_WAYS
-
-print ("TAG_RAM_WIDTH", TAG_RAM_WIDTH)
-
-def CacheTagArray():
-    tag_layout = [('valid', 1),
-                  ('tag', TAG_RAM_WIDTH),
-                 ]
-    return Array(Record(tag_layout, name="tag%d" % x) for x in range(NUM_LINES))
-
-def RowPerLineValidArray():
-    return Array(Signal(name="rows_valid%d" % x) \
-                        for x in range(ROW_PER_LINE))
-
-# L1 TLB
-TLB_SET_BITS     = log2_int(TLB_SET_SIZE)
-TLB_WAY_BITS     = log2_int(TLB_NUM_WAYS)
-TLB_EA_TAG_BITS  = 64 - (TLB_LG_PGSZ + TLB_SET_BITS)
-TLB_TAG_WAY_BITS = TLB_NUM_WAYS * TLB_EA_TAG_BITS
-TLB_PTE_BITS     = 64
-TLB_PTE_WAY_BITS = TLB_NUM_WAYS * TLB_PTE_BITS;
-
 def ispow2(x):
     return (1<<log2_int(x, False)) == x
 
-assert (LINE_SIZE % ROW_SIZE) == 0, "LINE_SIZE not multiple of ROW_SIZE"
-assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
-assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
-assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
-assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS), "geometry bits don't add up"
-assert (LINE_OFF_BITS == ROW_OFF_BITS + ROW_LINE_BITS), \
-        "geometry bits don't add up"
-assert REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS), \
-        "geometry bits don't add up"
-assert REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS), \
-         "geometry bits don't add up"
-assert 64 == WB_DATA_BITS, "Can't yet handle wb width that isn't 64-bits"
-assert SET_SIZE_BITS <= TLB_LG_PGSZ, "Set indexed by virtual address"
-
-def TLBHit(name):
-    return Record([('valid', 1),
-                   ('way', TLB_WAY_BITS)], name=name)
-
-def TLBTagEAArray():
-    return Array(Signal(TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
-                for x in range (TLB_NUM_WAYS))
-
-def TLBRecord(name):
-    tlb_layout = [('valid', TLB_NUM_WAYS),
-                  ('tag', TLB_TAG_WAY_BITS),
-                  ('pte', TLB_PTE_WAY_BITS)
-                 ]
-    return Record(tlb_layout, name=name)
-
-def TLBArray():
-    return Array(TLBRecord(name="tlb%d" % x) for x in range(TLB_SET_SIZE))
-
-def HitWaySet():
-    return Array(Signal(WAY_BITS, name="hitway_%d" % x) \
-                        for x in range(TLB_NUM_WAYS))
-
-# Cache RAM interface
-def CacheRamOut():
-    return Array(Signal(WB_DATA_BITS, name="cache_out%d" % x) \
-                 for x in range(NUM_WAYS))
-
-# PLRU output interface
-def PLRUOut():
-    return Array(Signal(WAY_BITS, name="plru_out%d" % x) \
-                for x in range(NUM_LINES))
-
-# TLB PLRU output interface
-def TLBPLRUOut():
-    return Array(Signal(TLB_WAY_BITS, name="tlbplru_out%d" % x) \
-                for x in range(TLB_SET_SIZE))
-
-# Helper functions to decode incoming requests
-#
-# Return the cache line index (tag index) for an address
-def get_index(addr):
-    return addr[LINE_OFF_BITS:SET_SIZE_BITS]
 
-# Return the cache row index (data memory) for an address
-def get_row(addr):
-    return addr[ROW_OFF_BITS:SET_SIZE_BITS]
+class DCacheConfig:
+    def __init__(self, LINE_SIZE = 64,    # Line size in bytes
+                       NUM_LINES = 64,    # Number of lines in a set
+                       NUM_WAYS = 2,      # Number of ways
+                       TLB_SET_SIZE = 64, # L1 DTLB entries per set
+                       TLB_NUM_WAYS = 2,  # L1 DTLB number of sets
+                       TLB_LG_PGSZ = 12): # L1 DTLB log_2(page_size)
+        self.LINE_SIZE = LINE_SIZE
+        self.NUM_LINES = NUM_LINES
+        self.NUM_WAYS = NUM_WAYS
+        self.TLB_SET_SIZE = TLB_SET_SIZE
+        self.TLB_NUM_WAYS = TLB_NUM_WAYS
+        self.TLB_LG_PGSZ = TLB_LG_PGSZ
+
+        # BRAM organisation: We never access more than
+        #     -- WB_DATA_BITS at a time so to save
+        #     -- resources we make the array only that wide, and
+        #     -- use consecutive indices to make a cache "line"
+        #     --
+        #     -- ROW_SIZE is the width in bytes of the BRAM
+        #     -- (based on WB, so 64-bits)
+        self.ROW_SIZE = WB_DATA_BITS // 8;
+
+        # ROW_PER_LINE is the number of row (wishbone
+        # transactions) in a line
+        self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
+
+        # BRAM_ROWS is the number of rows in BRAM needed
+        # to represent the full dcache
+        self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
+
+        print ("ROW_SIZE", self.ROW_SIZE)
+        print ("ROW_PER_LINE", self.ROW_PER_LINE)
+        print ("BRAM_ROWS", self.BRAM_ROWS)
+        print ("NUM_WAYS", self.NUM_WAYS)
+
+        # Bit fields counts in the address
+
+        # REAL_ADDR_BITS is the number of real address
+        # bits that we store
+        self.REAL_ADDR_BITS = 56
+
+        # ROW_BITS is the number of bits to select a row
+        self.ROW_BITS = log2_int(self.BRAM_ROWS)
+
+        # ROW_LINE_BITS is the number of bits to select
+        # a row within a line
+        self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
+
+        # LINE_OFF_BITS is the number of bits for
+        # the offset in a cache line
+        self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
+
+        # ROW_OFF_BITS is the number of bits for
+        # the offset in a row
+        self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
+
+        # INDEX_BITS is the number if bits to
+        # select a cache line
+        self.INDEX_BITS = log2_int(self.NUM_LINES)
+
+        # SET_SIZE_BITS is the log base 2 of the set size
+        self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
+
+        # TAG_BITS is the number of bits of
+        # the tag part of the address
+        self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
+
+        # TAG_WIDTH is the width in bits of each way of the tag RAM
+        self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
+
+        # WAY_BITS is the number of bits to select a way
+        self.WAY_BITS = log2_int(self.NUM_WAYS)
+
+        # Example of layout for 32 lines of 64 bytes:
+        layout = f"""\
+          DCache Layout:
+         |.. -----------------------| REAL_ADDR_BITS ({self.REAL_ADDR_BITS})
+          ..         |--------------| SET_SIZE_BITS ({self.SET_SIZE_BITS})
+          ..  tag    |index|  line  |
+          ..         |   row   |    |
+          ..         |     |---|    | ROW_LINE_BITS ({self.ROW_LINE_BITS})
+          ..         |     |--- - --| LINE_OFF_BITS ({self.LINE_OFF_BITS})
+          ..         |         |- --| ROW_OFF_BITS  ({self.ROW_OFF_BITS})
+          ..         |----- ---|    | ROW_BITS      ({self.ROW_BITS})
+          ..         |-----|        | INDEX_BITS    ({self.INDEX_BITS})
+          .. --------|              | TAG_BITS      ({self.TAG_BITS})
+        """
+        print (layout)
+        print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
+                    (self.TAG_BITS, self.INDEX_BITS, self.ROW_BITS,
+                     self.ROW_OFF_BITS, self.LINE_OFF_BITS, self.ROW_LINE_BITS))
+        print ("index @: %d-%d" % (self.LINE_OFF_BITS, self.SET_SIZE_BITS))
+        print ("row @: %d-%d" % (self.LINE_OFF_BITS, self.ROW_OFF_BITS))
+        print ("tag @: %d-%d width %d" % (self.SET_SIZE_BITS,
+                                          self.REAL_ADDR_BITS, self.TAG_WIDTH))
+
+        self.TAG_RAM_WIDTH = self.TAG_WIDTH * self.NUM_WAYS
+
+        print ("TAG_RAM_WIDTH", self.TAG_RAM_WIDTH)
+        print ("    TAG_WIDTH", self.TAG_WIDTH)
+        print ("     NUM_WAYS", self.NUM_WAYS)
+        print ("    NUM_LINES", self.NUM_LINES)
+
+        # L1 TLB
+        self.TLB_SET_BITS     = log2_int(self.TLB_SET_SIZE)
+        self.TLB_WAY_BITS     = log2_int(self.TLB_NUM_WAYS)
+        self.TLB_EA_TAG_BITS  = 64 - (self.TLB_LG_PGSZ + self.TLB_SET_BITS)
+        self.TLB_TAG_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_EA_TAG_BITS
+        self.TLB_PTE_BITS     = 64
+        self.TLB_PTE_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_PTE_BITS;
+
+        assert (self.LINE_SIZE % self.ROW_SIZE) == 0, \
+                "LINE_SIZE not multiple of ROW_SIZE"
+        assert ispow2(self.LINE_SIZE), "LINE_SIZE not power of 2"
+        assert ispow2(self.NUM_LINES), "NUM_LINES not power of 2"
+        assert ispow2(self.ROW_PER_LINE), "ROW_PER_LINE not power of 2"
+        assert self.ROW_BITS == \
+                (self.INDEX_BITS + self.ROW_LINE_BITS), \
+                "geometry bits don't add up"
+        assert (self.LINE_OFF_BITS == \
+                self.ROW_OFF_BITS + self.ROW_LINE_BITS), \
+                "geometry bits don't add up"
+        assert self.REAL_ADDR_BITS == \
+                (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS), \
+                "geometry bits don't add up"
+        assert self.REAL_ADDR_BITS == \
+                (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS), \
+                 "geometry bits don't add up"
+        assert 64 == WB_DATA_BITS, \
+                "Can't yet handle wb width that isn't 64-bits"
+        assert self.SET_SIZE_BITS <= self.TLB_LG_PGSZ, \
+                "Set indexed by virtual address"
+
+    def CacheTagArray(self):
+        return Array(Signal(self.TAG_RAM_WIDTH, name="tag%d" % x) \
+                       for x in range(self.NUM_LINES))
+
+    def CacheValidsArray(self):
+        return Array(Signal(self.NUM_WAYS, name="tag_valids%d" % x)
+                     for x in range(self.NUM_LINES))
+
+    def RowPerLineValidArray(self):
+        return Array(Signal(name="rows_valid%d" % x) \
+                            for x in range(self.ROW_PER_LINE))
+
+    def TLBHit(self, name):
+        return Record([('valid', 1),
+                       ('way', self.TLB_WAY_BITS)], name=name)
+
+    def TLBTagEAArray(self):
+        return Array(Signal(self.TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
+                    for x in range (self.TLB_NUM_WAYS))
+
+    def TLBRecord(self, name):
+        tlb_layout = [('valid', self.TLB_NUM_WAYS),
+                      ('tag', self.TLB_TAG_WAY_BITS),
+                      ('pte', self.TLB_PTE_WAY_BITS)
+                     ]
+        return Record(tlb_layout, name=name)
+
+    def TLBValidArray(self):
+        return Array(Signal(self.TLB_NUM_WAYS, name="tlb_valid%d" % x)
+                            for x in range(self.TLB_SET_SIZE))
+
+    def HitWaySet(self):
+        return Array(Signal(self.WAY_BITS, name="hitway_%d" % x) \
+                            for x in range(self.TLB_NUM_WAYS))
+
+    # Cache RAM interface
+    def CacheRamOut(self):
+        return Array(Signal(self.WB_DATA_BITS, name="cache_out%d" % x) \
+                     for x in range(self.NUM_WAYS))
+
+    # PLRU output interface
+    def PLRUOut(self):
+        return Array(Signal(self.WAY_BITS, name="plru_out%d" % x) \
+                    for x in range(self.NUM_LINES))
+
+    # TLB PLRU output interface
+    def TLBPLRUOut(self):
+        return Array(Signal(self.TLB_WAY_BITS, name="tlbplru_out%d" % x) \
+                    for x in range(self.TLB_SET_SIZE))
+
+    # Helper functions to decode incoming requests
+    #
+    # Return the cache line index (tag index) for an address
+    def get_index(self, addr):
+        return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
+
+    # Return the cache row index (data memory) for an address
+    def get_row(self, addr):
+        return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
 
-# Return the index of a row within a line
-def get_row_of_line(row):
-    return row[:ROW_BITS][:ROW_LINE_BITS]
+    # Return the index of a row within a line
+    def get_row_of_line(self, row):
+        return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
 
-# Returns whether this is the last row of a line
-def is_last_row_addr(addr, last):
-    return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
+    # Returns whether this is the last row of a line
+    def is_last_row_addr(self, addr, last):
+        return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
 
-# Returns whether this is the last row of a line
-def is_last_row(row, last):
-    return get_row_of_line(row) == last
+    # Returns whether this is the last row of a line
+    def is_last_row(self, row, last):
+        return self.get_row_of_line(row) == last
 
-# Return the next row in the current cache line. We use a
-# dedicated function in order to limit the size of the
-# generated adder to be only the bits within a cache line
-# (3 bits with default settings)
-def next_row(row):
-    row_v = row[0:ROW_LINE_BITS] + 1
-    return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
+    # Return the next row in the current cache line. We use a
+    # dedicated function in order to limit the size of the
+    # generated adder to be only the bits within a cache line
+    # (3 bits with default settings)
+    def next_row(self, row):
+        row_v = row[0:self.ROW_LINE_BITS] + 1
+        return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
 
-# Get the tag value from the address
-def get_tag(addr):
-    return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
+    # Get the tag value from the address
+    def get_tag(self, addr):
+        return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
 
-# Read a tag from a tag memory row
-def read_tag(way, tagset):
-    return tagset.word_select(way, TAG_WIDTH)[:TAG_BITS]
+    # Read a tag from a tag memory row
+    def read_tag(self, way, tagset):
+        return tagset.word_select(way, self.TAG_WIDTH)[:self.TAG_BITS]
 
-# Read a TLB tag from a TLB tag memory row
-def read_tlb_tag(way, tags):
-    return tags.word_select(way, TLB_EA_TAG_BITS)
+    # Read a TLB tag from a TLB tag memory row
+    def read_tlb_tag(self, way, tags):
+        return tags.word_select(way, self.TLB_EA_TAG_BITS)
 
-# Write a TLB tag to a TLB tag memory row
-def write_tlb_tag(way, tags, tag):
-    return read_tlb_tag(way, tags).eq(tag)
+    # Write a TLB tag to a TLB tag memory row
+    def write_tlb_tag(self, way, tags, tag):
+        return self.read_tlb_tag(way, tags).eq(tag)
 
-# Read a PTE from a TLB PTE memory row
-def read_tlb_pte(way, ptes):
-    return ptes.word_select(way, TLB_PTE_BITS)
+    # Read a PTE from a TLB PTE memory row
+    def read_tlb_pte(self, way, ptes):
+        return ptes.word_select(way, self.TLB_PTE_BITS)
 
-def write_tlb_pte(way, ptes, newpte):
-    return read_tlb_pte(way, ptes).eq(newpte)
+    def write_tlb_pte(self, way, ptes, newpte):
+        return self.read_tlb_pte(way, ptes).eq(newpte)
 
 
 # Record for storing permission, attribute, etc. bits from a PTE
@@ -349,15 +391,15 @@ class RegStage0(RecordObject):
 
 
 class MemAccessRequest(RecordObject):
-    def __init__(self, name=None):
+    def __init__(self, cfg, name=None):
         super().__init__(name=name)
         self.op        = Signal(Op)
         self.valid     = Signal()
         self.dcbz      = Signal()
-        self.real_addr = Signal(REAL_ADDR_BITS)
+        self.real_addr = Signal(cfg.REAL_ADDR_BITS)
         self.data      = Signal(64)
         self.byte_sel  = Signal(8)
-        self.hit_way   = Signal(WAY_BITS)
+        self.hit_way   = Signal(cfg.WAY_BITS)
         self.same_tag  = Signal()
         self.mmu_req   = Signal()
 
@@ -365,30 +407,30 @@ class MemAccessRequest(RecordObject):
 # First stage register, contains state for stage 1 of load hits
 # and for the state machine used by all other operations
 class RegStage1(RecordObject):
-    def __init__(self, name=None):
+    def __init__(self, cfg, name=None):
         super().__init__(name=name)
         # Info about the request
         self.full             = Signal() # have uncompleted request
         self.mmu_req          = Signal() # request is from MMU
-        self.req              = MemAccessRequest(name="reqmem")
+        self.req              = MemAccessRequest(cfg, name="reqmem")
 
         # Cache hit state
-        self.hit_way          = Signal(WAY_BITS)
+        self.hit_way          = Signal(cfg.WAY_BITS)
         self.hit_load_valid   = Signal()
-        self.hit_index        = Signal(INDEX_BITS)
+        self.hit_index        = Signal(cfg.INDEX_BITS)
         self.cache_hit        = Signal()
 
         # TLB hit state
-        self.tlb_hit          = TLBHit("tlb_hit")
-        self.tlb_hit_index    = Signal(TLB_SET_BITS)
+        self.tlb_hit          = cfg.TLBHit("tlb_hit")
+        self.tlb_hit_index    = Signal(cfg.TLB_SET_BITS)
 
         # 2-stage data buffer for data forwarded from writes to reads
         self.forward_data1    = Signal(64)
         self.forward_data2    = Signal(64)
         self.forward_sel1     = Signal(8)
         self.forward_valid1   = Signal()
-        self.forward_way1     = Signal(WAY_BITS)
-        self.forward_row1     = Signal(ROW_BITS)
+        self.forward_way1     = Signal(cfg.WAY_BITS)
+        self.forward_row1     = Signal(cfg.ROW_BITS)
         self.use_forward1     = Signal()
         self.forward_sel      = Signal(8)
 
@@ -399,12 +441,12 @@ class RegStage1(RecordObject):
         self.write_tag        = Signal()
         self.slow_valid       = Signal()
         self.wb               = WBMasterOut("wb")
-        self.reload_tag       = Signal(TAG_BITS)
-        self.store_way        = Signal(WAY_BITS)
-        self.store_row        = Signal(ROW_BITS)
-        self.store_index      = Signal(INDEX_BITS)
-        self.end_row_ix       = Signal(ROW_LINE_BITS)
-        self.rows_valid       = RowPerLineValidArray()
+        self.reload_tag       = Signal(cfg.TAG_BITS)
+        self.store_way        = Signal(cfg.WAY_BITS)
+        self.store_row        = Signal(cfg.ROW_BITS)
+        self.store_index      = Signal(cfg.INDEX_BITS)
+        self.end_row_ix       = Signal(cfg.ROW_LINE_BITS)
+        self.rows_valid       = cfg.RowPerLineValidArray()
         self.acks_pending     = Signal(3)
         self.inc_acks         = Signal()
         self.dec_acks         = Signal()
@@ -422,91 +464,178 @@ class RegStage1(RecordObject):
 
 # Reservation information
 class Reservation(RecordObject):
-    def __init__(self):
-        super().__init__()
+    def __init__(self, cfg, name=None):
+        super().__init__(name=name)
         self.valid = Signal()
-        self.addr  = Signal(64-LINE_OFF_BITS)
+        self.addr  = Signal(64-cfg.LINE_OFF_BITS)
 
 
 class DTLBUpdate(Elaboratable):
-    def __init__(self):
+    def __init__(self, cfg):
+        self.cfg = cfg
         self.tlbie    = Signal()
         self.tlbwe    = Signal()
         self.doall    = Signal()
-        self.updated  = Signal()
-        self.v_updated  = Signal()
-        self.tlb_hit     = TLBHit("tlb_hit")
-        self.tlb_req_index = Signal(TLB_SET_BITS)
-
-        self.tlb_tag_way     = Signal(TLB_TAG_WAY_BITS)
-        self.tlb_pte_way     = Signal(TLB_PTE_WAY_BITS)
-        self.repl_way        = Signal(TLB_WAY_BITS)
-        self.eatag           = Signal(TLB_EA_TAG_BITS)
-        self.pte_data        = Signal(TLB_PTE_BITS)
+        self.tlb_hit     = cfg.TLBHit("tlb_hit")
+        self.tlb_req_index = Signal(cfg.TLB_SET_BITS)
 
-        self.dv = Signal(TLB_NUM_WAYS) # tlb_way_valids_t
+        self.repl_way        = Signal(cfg.TLB_WAY_BITS)
+        self.eatag           = Signal(cfg.TLB_EA_TAG_BITS)
+        self.pte_data        = Signal(cfg.TLB_PTE_BITS)
 
-        self.tb_out = Signal(TLB_TAG_WAY_BITS) # tlb_way_tags_t
-        self.db_out = Signal(TLB_NUM_WAYS)     # tlb_way_valids_t
-        self.pb_out = Signal(TLB_PTE_WAY_BITS) # tlb_way_ptes_t
+        # read from dtlb array
+        self.tlb_read       = Signal()
+        self.tlb_read_index = Signal(cfg.TLB_SET_BITS)
+        self.tlb_way        = cfg.TLBRecord("o_tlb_way")
 
     def elaborate(self, platform):
         m = Module()
         comb = m.d.comb
         sync = m.d.sync
-
-        tagset   = Signal(TLB_TAG_WAY_BITS)
-        pteset   = Signal(TLB_PTE_WAY_BITS)
-
-        tb_out, pb_out, db_out = self.tb_out, self.pb_out, self.db_out
-        comb += db_out.eq(self.dv)
+        cfg = self.cfg
+
+        # there are 3 parts to this:
+        # QTY TLB_NUM_WAYs TAGs - of width (say) 46 bits of Effective Address
+        # QTY TLB_NUM_WAYs PTEs - of width (say) 64 bits
+        # "Valid" bits, one per "way", of QTY TLB_NUM_WAYs.  these cannot
+        # be a Memory because they can all be cleared (tlbie, doall), i mean,
+        # we _could_, in theory, by overriding the Reset Signal of the Memory,
+        # hmmm....
+
+        dtlb_valid = cfg.TLBValidArray()
+        tlb_req_index = self.tlb_req_index
+
+        print ("TLB_TAG_WAY_BITS", cfg.TLB_TAG_WAY_BITS)
+        print ("     TLB_EA_TAG_BITS", cfg.TLB_EA_TAG_BITS)
+        print ("        TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
+        print ("TLB_PTE_WAY_BITS", cfg.TLB_PTE_WAY_BITS)
+        print ("    TLB_PTE_BITS", cfg.TLB_PTE_BITS)
+        print ("    TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
+
+        # TAG and PTE Memory SRAMs. transparent, write-enables are TLB_NUM_WAYS
+        tagway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_TAG_WAY_BITS,
+                             attrs={'syn_ramstyle': "block_ram"})
+        m.submodules.rd_tagway = rd_tagway = tagway.read_port()
+        m.submodules.wr_tagway = wr_tagway = tagway.write_port(
+                                    granularity=cfg.TLB_EA_TAG_BITS)
+
+        pteway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_PTE_WAY_BITS,
+                             attrs={'syn_ramstyle': "block_ram"})
+        m.submodules.rd_pteway = rd_pteway = pteway.read_port()
+        m.submodules.wr_pteway = wr_pteway = pteway.write_port(
+                                    granularity=cfg.TLB_PTE_BITS)
+
+        # commented out for now, can be put in if Memory.reset can be
+        # used for tlbie&doall to reset the entire Memory to zero in 1 cycle
+        #validm = Memory(depth=TLB_SET_SIZE, width=TLB_NUM_WAYS)
+        #m.submodules.rd_valid = rd_valid = validm.read_port()
+        #m.submodules.wr_valid = wr_valid = validm.write_port(
+                                    #granularity=1)
+
+        # connect up read and write addresses to Valid/PTE/TAG SRAMs
+        m.d.comb += rd_pteway.addr.eq(self.tlb_read_index)
+        m.d.comb += rd_tagway.addr.eq(self.tlb_read_index)
+        #m.d.comb += rd_valid.addr.eq(self.tlb_read_index)
+        m.d.comb += wr_tagway.addr.eq(tlb_req_index)
+        m.d.comb += wr_pteway.addr.eq(tlb_req_index)
+        #m.d.comb += wr_valid.addr.eq(tlb_req_index)
+
+        updated  = Signal()
+        v_updated  = Signal()
+        tb_out = Signal(cfg.TLB_TAG_WAY_BITS) # tlb_way_tags_t
+        db_out = Signal(cfg.TLB_NUM_WAYS)     # tlb_way_valids_t
+        pb_out = Signal(cfg.TLB_PTE_WAY_BITS) # tlb_way_ptes_t
+        dv = Signal(cfg.TLB_NUM_WAYS) # tlb_way_valids_t
+
+        comb += dv.eq(dtlb_valid[tlb_req_index])
+        comb += db_out.eq(dv)
 
         with m.If(self.tlbie & self.doall):
-            pass # clear all back in parent
+            # clear all valid bits at once
+            # XXX hmmm, validm _could_ use Memory reset here...
+            for i in range(cfg.TLB_SET_SIZE):
+                sync += dtlb_valid[i].eq(0)
         with m.Elif(self.tlbie):
+            # invalidate just the hit_way
             with m.If(self.tlb_hit.valid):
                 comb += db_out.bit_select(self.tlb_hit.way, 1).eq(0)
-                comb += self.v_updated.eq(1)
-
+                comb += v_updated.eq(1)
         with m.Elif(self.tlbwe):
-
-            comb += tagset.eq(self.tlb_tag_way)
-            comb += write_tlb_tag(self.repl_way, tagset, self.eatag)
-            comb += tb_out.eq(tagset)
-
-            comb += pteset.eq(self.tlb_pte_way)
-            comb += write_tlb_pte(self.repl_way, pteset, self.pte_data)
-            comb += pb_out.eq(pteset)
-
+            # write to the requested tag and PTE
+            comb += cfg.write_tlb_tag(self.repl_way, tb_out, self.eatag)
+            comb += cfg.write_tlb_pte(self.repl_way, pb_out, self.pte_data)
+            # set valid bit
             comb += db_out.bit_select(self.repl_way, 1).eq(1)
 
-            comb += self.updated.eq(1)
-            comb += self.v_updated.eq(1)
+            comb += updated.eq(1)
+            comb += v_updated.eq(1)
+
+        # above, sometimes valid is requested to be updated but data not
+        # therefore split them out, here.  note the granularity thing matches
+        # with the shift-up of the eatag/pte_data into the correct TLB way.
+        # thus is it not necessary to write the entire lot, just the portion
+        # being altered: hence writing the *old* copy of the row is not needed
+        with m.If(updated): # PTE and TAG to be written
+            comb += wr_pteway.data.eq(pb_out)
+            comb += wr_pteway.en.eq(1<<self.repl_way)
+            comb += wr_tagway.data.eq(tb_out)
+            comb += wr_tagway.en.eq(1<<self.repl_way)
+        with m.If(v_updated): # Valid to be written
+            sync += dtlb_valid[tlb_req_index].eq(db_out)
+            #comb += wr_valid.data.eq(db_out)
+            #comb += wr_valid.en.eq(1<<self.repl_way)
+
+        # select one TLB way, use a register here
+        r_delay = Signal()
+        sync += r_delay.eq(self.tlb_read)
+        # first deal with the valids, which are not in a Memory.
+        # tlb way valid is output on a 1 clock delay with sync,
+        # but have to explicitly deal with "forwarding" here
+        with m.If(self.tlb_read):
+            with m.If(v_updated): # write *and* read in same cycle: forward
+                sync += self.tlb_way.valid.eq(db_out)
+            with m.Else():
+                sync += self.tlb_way.valid.eq(dtlb_valid[self.tlb_read_index])
+        # now deal with the Memory-read case. the output must remain
+        # valid (stable) even when a read-request is not made, but stable
+        # on a one-clock delay, hence the register
+        r_tlb_way        = cfg.TLBRecord("r_tlb_way")
+        with m.If(r_delay):
+            # on one clock delay, capture the contents of the read port(s)
+            comb += self.tlb_way.tag.eq(rd_tagway.data)
+            comb += self.tlb_way.pte.eq(rd_pteway.data)
+            sync += r_tlb_way.tag.eq(rd_tagway.data)
+            sync += r_tlb_way.pte.eq(rd_pteway.data)
+        with m.Else():
+            # ... so that the register can output it when no read is requested
+            # it's rather overkill but better to be safe than sorry
+            comb += self.tlb_way.tag.eq(r_tlb_way.tag)
+            comb += self.tlb_way.pte.eq(r_tlb_way.pte)
+            #comb += self.tlb_way.eq(r_tlb_way)
 
         return m
 
 
 class DCachePendingHit(Elaboratable):
 
-    def __init__(self, tlb_way,
+    def __init__(self, cfg, tlb_way,
                       cache_i_validdx, cache_tag_set,
-                    req_addr,
-                    hit_set):
+                    req_addr):
 
         self.go          = Signal()
         self.virt_mode   = Signal()
         self.is_hit      = Signal()
-        self.tlb_hit      = TLBHit("tlb_hit")
-        self.hit_way     = Signal(WAY_BITS)
+        self.tlb_hit     = cfg.TLBHit("tlb_hit")
+        self.hit_way     = Signal(cfg.WAY_BITS)
         self.rel_match   = Signal()
-        self.req_index   = Signal(INDEX_BITS)
-        self.reload_tag  = Signal(TAG_BITS)
+        self.req_index   = Signal(cfg.INDEX_BITS)
+        self.reload_tag  = Signal(cfg.TAG_BITS)
 
         self.tlb_way = tlb_way
         self.cache_i_validdx = cache_i_validdx
         self.cache_tag_set = cache_tag_set
         self.req_addr = req_addr
-        self.hit_set = hit_set
+        self.cfg = cfg
 
     def elaborate(self, platform):
         m = Module()
@@ -521,15 +650,17 @@ class DCachePendingHit(Elaboratable):
         cache_tag_set = self.cache_tag_set
         req_addr = self.req_addr
         tlb_hit = self.tlb_hit
-        hit_set = self.hit_set
         hit_way = self.hit_way
         rel_match = self.rel_match
         req_index = self.req_index
         reload_tag = self.reload_tag
+        cfg = self.cfg
 
+        hit_set     = Array(Signal(name="hit_set_%d" % i) \
+                                  for i in range(cfg.TLB_NUM_WAYS))
         rel_matches = Array(Signal(name="rel_matches_%d" % i) \
-                                    for i in range(TLB_NUM_WAYS))
-        hit_way_set = HitWaySet()
+                                    for i in range(cfg.TLB_NUM_WAYS))
+        hit_way_set = cfg.HitWaySet()
 
         # Test if pending request is a hit on any way
         # In order to make timing in virtual mode,
@@ -538,38 +669,38 @@ class DCachePendingHit(Elaboratable):
         # the TLB, and then decide later which match to use.
 
         with m.If(virt_mode):
-            for j in range(TLB_NUM_WAYS): # tlb_num_way_t
-                s_tag       = Signal(TAG_BITS, name="s_tag%d" % j)
-                s_hit       = Signal()
-                s_pte       = Signal(TLB_PTE_BITS)
-                s_ra        = Signal(REAL_ADDR_BITS)
-                comb += s_pte.eq(read_tlb_pte(j, tlb_way.pte))
-                comb += s_ra.eq(Cat(req_addr[0:TLB_LG_PGSZ],
-                                    s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
-                comb += s_tag.eq(get_tag(s_ra))
-
-                for i in range(NUM_WAYS): # way_t
+            for j in range(cfg.TLB_NUM_WAYS): # tlb_num_way_t
+                s_tag       = Signal(cfg.TAG_BITS, name="s_tag%d" % j)
+                s_hit       = Signal(name="s_hit%d" % j)
+                s_pte       = Signal(cfg.TLB_PTE_BITS, name="s_pte%d" % j)
+                s_ra        = Signal(cfg.REAL_ADDR_BITS, name="s_ra%d" % j)
+                # read the PTE, calc the Real Address, get tge tag
+                comb += s_pte.eq(cfg.read_tlb_pte(j, tlb_way.pte))
+                comb += s_ra.eq(Cat(req_addr[0:cfg.TLB_LG_PGSZ],
+                                    s_pte[cfg.TLB_LG_PGSZ:cfg.REAL_ADDR_BITS]))
+                comb += s_tag.eq(cfg.get_tag(s_ra))
+                # for each way check tge tag against the cache tag set
+                for i in range(cfg.NUM_WAYS): # way_t
                     is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
                     comb += is_tag_hit.eq(go & cache_i_validdx[i] &
-                                  (read_tag(i, cache_tag_set) == s_tag)
+                                  (cfg.read_tag(i, cache_tag_set) == s_tag)
                                   & (tlb_way.valid[j]))
                     with m.If(is_tag_hit):
                         comb += hit_way_set[j].eq(i)
                         comb += s_hit.eq(1)
                 comb += hit_set[j].eq(s_hit)
-                with m.If(s_tag == reload_tag):
-                    comb += rel_matches[j].eq(1)
-            with m.If(tlb_hit.way):
+                comb += rel_matches[j].eq(s_tag == reload_tag)
+            with m.If(tlb_hit.valid):
                 comb += is_hit.eq(hit_set[tlb_hit.way])
                 comb += hit_way.eq(hit_way_set[tlb_hit.way])
                 comb += rel_match.eq(rel_matches[tlb_hit.way])
         with m.Else():
-            s_tag       = Signal(TAG_BITS)
-            comb += s_tag.eq(get_tag(req_addr))
-            for i in range(NUM_WAYS): # way_t
+            s_tag       = Signal(cfg.TAG_BITS)
+            comb += s_tag.eq(cfg.get_tag(req_addr))
+            for i in range(cfg.NUM_WAYS): # way_t
                 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
                 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
-                          (read_tag(i, cache_tag_set) == s_tag))
+                          (cfg.read_tag(i, cache_tag_set) == s_tag))
                 with m.If(is_tag_hit):
                     comb += hit_way.eq(i)
                     comb += is_hit.eq(1)
@@ -579,7 +710,7 @@ class DCachePendingHit(Elaboratable):
         return m
 
 
-class DCache(Elaboratable):
+class DCache(Elaboratable, DCacheConfig):
     """Set associative dcache write-through
 
     TODO (in no specific order):
@@ -588,7 +719,7 @@ class DCache(Elaboratable):
       at the end of line (this requires dealing with requests coming in
       while not idle...)
     """
-    def __init__(self):
+    def __init__(self, pspec=None):
         self.d_in      = LoadStore1ToDCacheType("d_in")
         self.d_out     = DCacheToLoadStore1Type("d_out")
 
@@ -596,17 +727,51 @@ class DCache(Elaboratable):
         self.m_out     = DCacheToMMUType("m_out")
 
         self.stall_out = Signal()
+        self.any_stall_out = Signal()
+        self.dreq_when_stall = Signal()
+        self.mreq_when_stall = Signal()
 
         # standard naming (wired to non-standard for compatibility)
         self.bus = Interface(addr_width=32,
                             data_width=64,
                             granularity=8,
                             features={'stall'},
-                            alignment=0,
+                            #alignment=0,
                             name="dcache")
 
         self.log_out   = Signal(20)
 
+        # test if small cache to be enabled
+        self.small_cache = (hasattr(pspec, "small_cache") and
+                                 (pspec.small_cache == True))
+        # test if microwatt compatibility is to be enabled
+        self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
+                                 (pspec.microwatt_compat == True))
+
+        XLEN = pspec.XLEN
+        TLB_SET_SIZE = 8
+        TLB_NUM_WAYS = 2
+        NUM_LINES = 8
+        NUM_WAYS = 2
+
+        if self.small_cache:
+            # reduce way sizes and num lines to ridiculously small
+            TLB_SET_SIZE = 2
+            TLB_NUM_WAYS = 1
+            NUM_LINES = 2
+            NUM_WAYS = 1
+        if self.microwatt_compat:
+            # reduce way sizes
+            NUM_WAYS = 1
+            TLB_NUM_WAYS = 1
+
+        super().__init__(TLB_SET_SIZE=TLB_SET_SIZE,
+                         # XLEN=XLEN, # TODO
+                         TLB_NUM_WAYS = TLB_NUM_WAYS,
+                         NUM_LINES = NUM_LINES,
+                         NUM_WAYS = NUM_WAYS
+                        )
+
     def stage_0(self, m, r0, r1, r0_full):
         """Latch the request in r0.req as long as we're not stalling
         """
@@ -635,6 +800,7 @@ class DCache(Elaboratable):
             comb += r.doall.eq(m_in.doall)
             comb += r.tlbld.eq(m_in.tlbld)
             comb += r.mmu_req.eq(1)
+            comb += r.d_valid.eq(1)
             m.d.sync += Display("    DCACHE req mmu addr %x pte %x ld %d",
                                  m_in.addr, m_in.pte, r.req.load)
 
@@ -645,23 +811,25 @@ class DCache(Elaboratable):
             comb += r.doall.eq(0)
             comb += r.tlbld.eq(0)
             comb += r.mmu_req.eq(0)
+            comb += r.d_valid.eq(0)
+
+        sync += r0_full.eq(0)
         with m.If((~r1.full & ~d_in.hold) | ~r0_full):
             sync += r0.eq(r)
             sync += r0_full.eq(r.req.valid)
+        with m.Elif(~r0.d_valid):
             # Sample data the cycle after a request comes in from loadstore1.
             # If another request has come in already then the data will get
             # put directly into req.data below.
-            with m.If(r0.req.valid & ~r.req.valid & ~r0.d_valid &
-                     ~r0.mmu_req):
-                sync += r0.req.data.eq(d_in.data)
-                sync += r0.d_valid.eq(1)
+            sync += r0.req.data.eq(d_in.data)
+            sync += r0.d_valid.eq(1)
         with m.If(d_in.valid):
             m.d.sync += Display("    DCACHE req cache "
                                 "virt %d addr %x data %x ld %d",
                                  r.req.virt_mode, r.req.addr,
                                  r.req.data, r.req.load)
 
-    def tlb_read(self, m, r0_stall, tlb_way, dtlb):
+    def tlb_read(self, m, r0_stall, tlb_way):
         """TLB
         Operates in the second cycle on the request latched in r0.req.
         TLB updates write the entry at the end of the second cycle.
@@ -670,42 +838,40 @@ class DCache(Elaboratable):
         sync = m.d.sync
         m_in, d_in = self.m_in, self.d_in
 
-        index    = Signal(TLB_SET_BITS)
-        addrbits = Signal(TLB_SET_BITS)
+        addrbits = Signal(self.TLB_SET_BITS)
 
-        amin = TLB_LG_PGSZ
-        amax = TLB_LG_PGSZ + TLB_SET_BITS
+        amin = self.TLB_LG_PGSZ
+        amax = self.TLB_LG_PGSZ + self.TLB_SET_BITS
 
         with m.If(m_in.valid):
             comb += addrbits.eq(m_in.addr[amin : amax])
         with m.Else():
             comb += addrbits.eq(d_in.addr[amin : amax])
-        comb += index.eq(addrbits)
 
         # If we have any op and the previous op isn't finished,
         # then keep the same output for next cycle.
-        with m.If(~r0_stall):
-            sync += tlb_way.eq(dtlb[index])
+        d = self.dtlb_update
+        comb += d.tlb_read_index.eq(addrbits)
+        comb += d.tlb_read.eq(~r0_stall)
+        comb += tlb_way.eq(d.tlb_way)
 
-    def maybe_tlb_plrus(self, m, r1, tlb_plru_victim):
+    def maybe_tlb_plrus(self, m, r1, tlb_plru_victim, tlb_req_index):
         """Generate TLB PLRUs
         """
         comb = m.d.comb
         sync = m.d.sync
 
-        if TLB_NUM_WAYS == 0:
+        if self.TLB_NUM_WAYS == 0:
             return
-        for i in range(TLB_SET_SIZE):
-            # TLB PLRU interface
-            tlb_plru        = PLRU(TLB_WAY_BITS)
-            setattr(m.submodules, "maybe_plru_%d" % i, tlb_plru)
-            tlb_plru_acc_en = Signal()
-
-            comb += tlb_plru_acc_en.eq(r1.tlb_hit.valid &
-                                       (r1.tlb_hit_index == i))
-            comb += tlb_plru.acc_en.eq(tlb_plru_acc_en)
-            comb += tlb_plru.acc_i.eq(r1.tlb_hit.way)
-            comb += tlb_plru_victim[i].eq(tlb_plru.lru_o)
+
+        # suite of PLRUs with a selection and output mechanism
+        tlb_plrus = PLRUs("d_tlb", self.TLB_SET_SIZE, self.TLB_WAY_BITS)
+        m.submodules.tlb_plrus = tlb_plrus
+        comb += tlb_plrus.way.eq(r1.tlb_hit.way)
+        comb += tlb_plrus.valid.eq(r1.tlb_hit.valid)
+        comb += tlb_plrus.index.eq(r1.tlb_hit_index)
+        comb += tlb_plrus.isel.eq(tlb_req_index) # select victim
+        comb += tlb_plru_victim.eq(tlb_plrus.o_index) # selected victim
 
     def tlb_search(self, m, tlb_req_index, r0, r0_valid,
                    tlb_way,
@@ -713,18 +879,19 @@ class DCache(Elaboratable):
 
         comb = m.d.comb
 
-        hitway = Signal(TLB_WAY_BITS)
+        hitway = Signal(self.TLB_WAY_BITS)
         hit    = Signal()
-        eatag  = Signal(TLB_EA_TAG_BITS)
+        eatag  = Signal(self.TLB_EA_TAG_BITS)
 
-        TLB_LG_END = TLB_LG_PGSZ + TLB_SET_BITS
-        comb += tlb_req_index.eq(r0.req.addr[TLB_LG_PGSZ : TLB_LG_END])
-        comb += eatag.eq(r0.req.addr[TLB_LG_END : 64 ])
+        self.TLB_LG_END = self.TLB_LG_PGSZ + self.TLB_SET_BITS
+        r0_req_addr = r0.req.addr[self.TLB_LG_PGSZ : self.TLB_LG_END]
+        comb += tlb_req_index.eq(r0_req_addr)
+        comb += eatag.eq(r0.req.addr[self.TLB_LG_END : 64 ])
 
-        for i in range(TLB_NUM_WAYS):
+        for i in range(self.TLB_NUM_WAYS):
             is_tag_hit = Signal(name="is_tag_hit%d" % i)
-            tlb_tag = Signal(TLB_EA_TAG_BITS, name="tlb_tag%d" % i)
-            comb += tlb_tag.eq(read_tlb_tag(i, tlb_way.tag))
+            tlb_tag = Signal(self.TLB_EA_TAG_BITS, name="tlb_tag%d" % i)
+            comb += tlb_tag.eq(self.read_tlb_tag(i, tlb_way.tag))
             comb += is_tag_hit.eq((tlb_way.valid[i]) & (tlb_tag == eatag))
             with m.If(is_tag_hit):
                 comb += hitway.eq(i)
@@ -734,13 +901,13 @@ class DCache(Elaboratable):
         comb += tlb_hit.way.eq(hitway)
 
         with m.If(tlb_hit.valid):
-            comb += pte.eq(read_tlb_pte(hitway, tlb_way.pte))
+            comb += pte.eq(self.read_tlb_pte(hitway, tlb_way.pte))
         comb += valid_ra.eq(tlb_hit.valid | ~r0.req.virt_mode)
 
         with m.If(r0.req.virt_mode):
-            comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
-                              r0.req.addr[ROW_OFF_BITS:TLB_LG_PGSZ],
-                              pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
+            comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
+                              r0.req.addr[self.ROW_OFF_BITS:self.TLB_LG_PGSZ],
+                              pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
             comb += perm_attr.reference.eq(pte[8])
             comb += perm_attr.changed.eq(pte[7])
             comb += perm_attr.nocache.eq(pte[5])
@@ -748,8 +915,8 @@ class DCache(Elaboratable):
             comb += perm_attr.rd_perm.eq(pte[2])
             comb += perm_attr.wr_perm.eq(pte[1])
         with m.Else():
-            comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
-                              r0.req.addr[ROW_OFF_BITS:REAL_ADDR_BITS]))
+            comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
+                          r0.req.addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS]))
             comb += perm_attr.reference.eq(1)
             comb += perm_attr.changed.eq(1)
             comb += perm_attr.nocache.eq(0)
@@ -767,8 +934,8 @@ class DCache(Elaboratable):
             m.d.sync += Display("       perm rdp=%d", perm_attr.rd_perm)
             m.d.sync += Display("       perm wrp=%d", perm_attr.wr_perm)
 
-    def tlb_update(self, m, r0_valid, r0, dtlb, tlb_req_index,
-                    tlb_hit, tlb_plru_victim, tlb_way):
+    def tlb_update(self, m, r0_valid, r0, tlb_req_index,
+                    tlb_hit, tlb_plru_victim):
 
         comb = m.d.comb
         sync = m.d.sync
@@ -779,32 +946,19 @@ class DCache(Elaboratable):
         comb += tlbie.eq(r0_valid & r0.tlbie)
         comb += tlbwe.eq(r0_valid & r0.tlbld)
 
-        m.submodules.tlb_update = d = DTLBUpdate()
-        with m.If(tlbie & r0.doall):
-            # clear all valid bits at once
-            for i in range(TLB_SET_SIZE):
-                sync += dtlb[i].valid.eq(0)
-        with m.If(d.updated):
-            sync += dtlb[tlb_req_index].tag.eq(d.tb_out)
-            sync += dtlb[tlb_req_index].pte.eq(d.pb_out)
-        with m.If(d.v_updated):
-            sync += dtlb[tlb_req_index].valid.eq(d.db_out)
-
-        comb += d.dv.eq(dtlb[tlb_req_index].valid)
+        d = self.dtlb_update
 
         comb += d.tlbie.eq(tlbie)
         comb += d.tlbwe.eq(tlbwe)
         comb += d.doall.eq(r0.doall)
         comb += d.tlb_hit.eq(tlb_hit)
-        comb += d.tlb_tag_way.eq(tlb_way.tag)
-        comb += d.tlb_pte_way.eq(tlb_way.pte)
         comb += d.tlb_req_index.eq(tlb_req_index)
 
         with m.If(tlb_hit.valid):
             comb += d.repl_way.eq(tlb_hit.way)
         with m.Else():
-            comb += d.repl_way.eq(tlb_plru_victim[tlb_req_index])
-        comb += d.eatag.eq(r0.req.addr[TLB_LG_PGSZ + TLB_SET_BITS:64])
+            comb += d.repl_way.eq(tlb_plru_victim)
+        comb += d.eatag.eq(r0.req.addr[self.TLB_LG_PGSZ + self.TLB_SET_BITS:64])
         comb += d.pte_data.eq(r0.req.data)
 
     def maybe_plrus(self, m, r1, plru_victim):
@@ -813,39 +967,43 @@ class DCache(Elaboratable):
         comb = m.d.comb
         sync = m.d.sync
 
-        if TLB_NUM_WAYS == 0:
+        if self.TLB_NUM_WAYS == 0:
             return
 
-        for i in range(NUM_LINES):
-            # PLRU interface
-            plru        = PLRU(WAY_BITS)
-            setattr(m.submodules, "plru%d" % i, plru)
-            plru_acc_en = Signal()
-
-            comb += plru_acc_en.eq(r1.cache_hit & (r1.hit_index == i))
-            comb += plru.acc_en.eq(plru_acc_en)
-            comb += plru.acc_i.eq(r1.hit_way)
-            comb += plru_victim[i].eq(plru.lru_o)
+        # suite of PLRUs with a selection and output mechanism
+        m.submodules.plrus = plrus = PLRUs("dtag", self.NUM_LINES,
+                                                   self.WAY_BITS)
+        comb += plrus.way.eq(r1.hit_way)
+        comb += plrus.valid.eq(r1.cache_hit)
+        comb += plrus.index.eq(r1.hit_index)
+        comb += plrus.isel.eq(r1.store_index) # select victim
+        comb += plru_victim.eq(plrus.o_index) # selected victim
 
-    def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set, cache_tags):
+    def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set):
         """Cache tag RAM read port
         """
         comb = m.d.comb
         sync = m.d.sync
+
         m_in, d_in = self.m_in, self.d_in
 
-        index = Signal(INDEX_BITS)
+        # synchronous tag read-port: NOT TRANSPARENT (cannot pass through
+        # write-to-a-read at the same time), seems to pass tests ok
+        m.submodules.rd_tag = rd_tag = self.tagmem.read_port(transparent=False)
+
+        index = Signal(self.INDEX_BITS)
 
         with m.If(r0_stall):
             comb += index.eq(req_index)
         with m.Elif(m_in.valid):
-            comb += index.eq(get_index(m_in.addr))
+            comb += index.eq(self.get_index(m_in.addr))
         with m.Else():
-            comb += index.eq(get_index(d_in.addr))
-        sync += cache_tag_set.eq(cache_tags[index].tag)
+            comb += index.eq(self.get_index(d_in.addr))
+        comb += rd_tag.addr.eq(index)
+        comb += cache_tag_set.eq(rd_tag.data) # read-port is a 1-clock delay
 
     def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
-                       r0_valid, r1, cache_tags, replace_way,
+                       r0_valid, r1, cache_valids, replace_way,
                        use_forward1_next, use_forward2_next,
                        req_hit_way, plru_victim, rc_ok, perm_attr,
                        valid_ra, perm_ok, access_ok, req_op, req_go,
@@ -858,31 +1016,28 @@ class DCache(Elaboratable):
         m_in, d_in = self.m_in, self.d_in
 
         is_hit      = Signal()
-        hit_way     = Signal(WAY_BITS)
+        hit_way     = Signal(self.WAY_BITS)
         op          = Signal(Op)
         opsel       = Signal(3)
         go          = Signal()
         nc          = Signal()
-        hit_set     = Array(Signal(name="hit_set_%d" % i) \
-                                  for i in range(TLB_NUM_WAYS))
-        cache_i_validdx = Signal(NUM_WAYS)
+        cache_i_validdx = Signal(self.NUM_WAYS)
 
         # Extract line, row and tag from request
-        comb += req_index.eq(get_index(r0.req.addr))
-        comb += req_row.eq(get_row(r0.req.addr))
-        comb += req_tag.eq(get_tag(ra))
+        comb += req_index.eq(self.get_index(r0.req.addr))
+        comb += req_row.eq(self.get_row(r0.req.addr))
+        comb += req_tag.eq(self.get_tag(ra))
 
         if False: # display on comb is a bit... busy.
             comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
                     r0.req.addr, ra, req_index, req_tag, req_row)
 
         comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
-        comb += cache_i_validdx.eq(cache_tags[req_index].valid)
+        comb += cache_i_validdx.eq(cache_valids[req_index])
 
-        m.submodules.dcache_pend = dc = DCachePendingHit(tlb_way,
+        m.submodules.dcache_pend = dc = DCachePendingHit(self, tlb_way,
                                             cache_i_validdx, cache_tag_set,
-                                            r0.req.addr,
-                                            hit_set)
+                                            r0.req.addr)
         comb += dc.tlb_hit.eq(tlb_hit)
         comb += dc.reload_tag.eq(r1.reload_tag)
         comb += dc.virt_mode.eq(r0.req.virt_mode)
@@ -899,14 +1054,14 @@ class DCache(Elaboratable):
             # For a store, consider this a hit even if the row isn't
             # valid since it will be by the time we perform the store.
             # For a load, check the appropriate row valid bit.
-            rrow = Signal(ROW_LINE_BITS)
+            rrow = Signal(self.ROW_LINE_BITS)
             comb += rrow.eq(req_row)
             valid = r1.rows_valid[rrow]
             comb += is_hit.eq((~r0.req.load) | valid)
             comb += hit_way.eq(replace_way)
 
         # Whether to use forwarded data for a load or not
-        with m.If((get_row(r1.req.real_addr) == req_row) &
+        with m.If((self.get_row(r1.req.real_addr) == req_row) &
                   (r1.req.hit_way == hit_way)):
             # Only need to consider r1.write_bram here, since if we
             # are writing refill data here, then we don't have a
@@ -925,7 +1080,7 @@ class DCache(Elaboratable):
 
         # The way to replace on a miss
         with m.If(r1.write_tag):
-            comb += replace_way.eq(plru_victim[r1.store_index])
+            comb += replace_way.eq(plru_victim)
         with m.Else():
             comb += replace_way.eq(r1.store_way)
 
@@ -937,6 +1092,7 @@ class DCache(Elaboratable):
                            (perm_attr.wr_perm |
                               (r0.req.load & perm_attr.rd_perm)))
         comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
+
         # Combine the request and cache hit status to decide what
         # operation needs to be done
         comb += nc.eq(r0.req.nc | perm_attr.nocache)
@@ -971,9 +1127,9 @@ class DCache(Elaboratable):
         # row requested.
         with m.If(~r0_stall):
             with m.If(m_in.valid):
-                comb += early_req_row.eq(get_row(m_in.addr))
+                comb += early_req_row.eq(self.get_row(m_in.addr))
             with m.Else():
-                comb += early_req_row.eq(get_row(d_in.addr))
+                comb += early_req_row.eq(self.get_row(d_in.addr))
         with m.Else():
             comb += early_req_row.eq(req_row)
 
@@ -991,12 +1147,12 @@ class DCache(Elaboratable):
             with m.Else():
                 comb += clear_rsrv.eq(r0.req.atomic_last) # store conditional
                 with m.If((~reservation.valid) |
-                         (r0.req.addr[LINE_OFF_BITS:64] != reservation.addr)):
+                         (r0.req.addr[self.LINE_OFF_BITS:64] !=
+                          reservation.addr)):
                     comb += cancel_store.eq(1)
 
     def reservation_reg(self, m, r0_valid, access_ok, set_rsrv, clear_rsrv,
                         reservation, r0):
-
         comb = m.d.comb
         sync = m.d.sync
 
@@ -1005,7 +1161,7 @@ class DCache(Elaboratable):
                 sync += reservation.valid.eq(0)
             with m.Elif(set_rsrv):
                 sync += reservation.valid.eq(1)
-                sync += reservation.addr.eq(r0.req.addr[LINE_OFF_BITS:64])
+                sync += reservation.addr.eq(r0.req.addr[self.LINE_OFF_BITS:64])
 
     def writeback_control(self, m, r1, cache_out_row):
         """Return data for loads & completion control logic
@@ -1033,6 +1189,7 @@ class DCache(Elaboratable):
                 dsel = data_fwd.word_select(i, 8)
                 comb += data_out.word_select(i, 8).eq(dsel)
 
+        # DCache output to LoadStore
         comb += d_out.valid.eq(r1.ls_valid)
         comb += d_out.data.eq(data_out)
         comb += d_out.store_done.eq(~r1.stcx_fail)
@@ -1109,60 +1266,78 @@ class DCache(Elaboratable):
         comb = m.d.comb
         bus = self.bus
 
-        for i in range(NUM_WAYS):
-            do_read  = Signal(name="do_rd%d" % i)
-            rd_addr  = Signal(ROW_BITS, name="rd_addr_%d" % i)
+        # a Binary-to-Unary one-hots here.  replace-way one-hot is gated
+        # (enabled) by bus.ack, not-write-bram, and state RELOAD_WAIT_ACK
+        m.submodules.rams_replace_way_e = rwe = Decoder(self.NUM_WAYS)
+        comb += rwe.n.eq(~((r1.state == State.RELOAD_WAIT_ACK) & bus.ack &
+                   ~r1.write_bram))
+        comb += rwe.i.eq(replace_way)
+
+        m.submodules.rams_hit_way_e = hwe = Decoder(self.NUM_WAYS)
+        comb += hwe.i.eq(r1.hit_way)
+
+        # this one is gated with write_bram, and replace_way_e can never be
+        # set at the same time.  that means that do_write can OR the outputs
+        m.submodules.rams_hit_req_way_e = hre = Decoder(self.NUM_WAYS)
+        comb += hre.n.eq(~r1.write_bram) # Decoder.n is inverted
+        comb += hre.i.eq(r1.req.hit_way)
+
+        # common Signals
+        do_read  = Signal()
+        wr_addr  = Signal(self.ROW_BITS)
+        wr_data  = Signal(WB_DATA_BITS)
+        wr_sel   = Signal(self.ROW_SIZE)
+        rd_addr  = Signal(self.ROW_BITS)
+
+        comb += do_read.eq(1) # always enable
+        comb += rd_addr.eq(early_req_row)
+
+        # Write mux:
+        #
+        # Defaults to wishbone read responses (cache refill)
+        #
+        # For timing, the mux on wr_data/sel/addr is not
+        # dependent on anything other than the current state.
+
+        with m.If(r1.write_bram):
+            # Write store data to BRAM.  This happens one
+            # cycle after the store is in r0.
+            comb += wr_data.eq(r1.req.data)
+            comb += wr_sel.eq(r1.req.byte_sel)
+            comb += wr_addr.eq(self.get_row(r1.req.real_addr))
+
+        with m.Else():
+            # Otherwise, we might be doing a reload or a DCBZ
+            with m.If(r1.dcbz):
+                comb += wr_data.eq(0)
+            with m.Else():
+                comb += wr_data.eq(bus.dat_r)
+            comb += wr_addr.eq(r1.store_row)
+            comb += wr_sel.eq(~0) # all 1s
+
+        # set up Cache Rams
+        for i in range(self.NUM_WAYS):
             do_write = Signal(name="do_wr%d" % i)
-            wr_addr  = Signal(ROW_BITS, name="wr_addr_%d" % i)
-            wr_data  = Signal(WB_DATA_BITS, name="din_%d" % i)
-            wr_sel   = Signal(ROW_SIZE)
-            wr_sel_m = Signal(ROW_SIZE)
-            _d_out   = Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
+            wr_sel_m = Signal(self.ROW_SIZE, name="wr_sel_m_%d" % i)
+            d_out= Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
 
-            way = CacheRam(ROW_BITS, WB_DATA_BITS, ADD_BUF=True, ram_num=i)
-            setattr(m.submodules, "cacheram_%d" % i, way)
+            way = CacheRam(self.ROW_BITS, WB_DATA_BITS, ADD_BUF=True, ram_num=i)
+            m.submodules["cacheram_%d" % i] = way
 
             comb += way.rd_en.eq(do_read)
             comb += way.rd_addr.eq(rd_addr)
-            comb += _d_out.eq(way.rd_data_o)
+            comb += d_out.eq(way.rd_data_o)
             comb += way.wr_sel.eq(wr_sel_m)
             comb += way.wr_addr.eq(wr_addr)
             comb += way.wr_data.eq(wr_data)
 
             # Cache hit reads
-            comb += do_read.eq(1)
-            comb += rd_addr.eq(early_req_row)
-            with m.If(r1.hit_way == i):
-                comb += cache_out_row.eq(_d_out)
-
-            # Write mux:
-            #
-            # Defaults to wishbone read responses (cache refill)
-            #
-            # For timing, the mux on wr_data/sel/addr is not
-            # dependent on anything other than the current state.
-
-            with m.If(r1.write_bram):
-                # Write store data to BRAM.  This happens one
-                # cycle after the store is in r0.
-                comb += wr_data.eq(r1.req.data)
-                comb += wr_sel.eq(r1.req.byte_sel)
-                comb += wr_addr.eq(get_row(r1.req.real_addr))
-
-                with m.If(i == r1.req.hit_way):
-                    comb += do_write.eq(1)
-            with m.Else():
-                # Otherwise, we might be doing a reload or a DCBZ
-                with m.If(r1.dcbz):
-                    comb += wr_data.eq(0)
-                with m.Else():
-                    comb += wr_data.eq(bus.dat_r)
-                comb += wr_addr.eq(r1.store_row)
-                comb += wr_sel.eq(~0) # all 1s
+            with m.If(hwe.o[i]):
+                comb += cache_out_row.eq(d_out)
 
-                with m.If((r1.state == State.RELOAD_WAIT_ACK)
-                          & bus.ack & (replace_way == i)):
-                    comb += do_write.eq(1)
+            # these are mutually-exclusive via their Decoder-enablers
+            # (note: Decoder-enable is inverted)
+            comb += do_write.eq(hre.o[i] | rwe.o[i])
 
             # Mask write selects with do_write since BRAM
             # doesn't have a global write-enable
@@ -1175,7 +1350,6 @@ class DCache(Elaboratable):
     def dcache_fast_hit(self, m, req_op, r0_valid, r0, r1,
                         req_hit_way, req_index, req_tag, access_ok,
                         tlb_hit, tlb_req_index):
-
         comb = m.d.comb
         sync = m.d.sync
 
@@ -1192,15 +1366,9 @@ class DCache(Elaboratable):
         sync += r1.hit_way.eq(req_hit_way)
         sync += r1.hit_index.eq(req_index)
 
-        with m.If(req_op == Op.OP_LOAD_HIT):
-            sync += r1.hit_load_valid.eq(1)
-        with m.Else():
-            sync += r1.hit_load_valid.eq(0)
-
-        with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STORE_HIT)):
-            sync += r1.cache_hit.eq(1)
-        with m.Else():
-            sync += r1.cache_hit.eq(0)
+        sync += r1.hit_load_valid.eq(req_op == Op.OP_LOAD_HIT)
+        sync += r1.cache_hit.eq((req_op == Op.OP_LOAD_HIT) |
+                                (req_op == Op.OP_STORE_HIT))
 
         with m.If(req_op == Op.OP_BAD):
             sync += Display("Signalling ld/st error "
@@ -1209,16 +1377,12 @@ class DCache(Elaboratable):
             sync += r1.ls_error.eq(~r0.mmu_req)
             sync += r1.mmu_error.eq(r0.mmu_req)
             sync += r1.cache_paradox.eq(access_ok)
-
         with m.Else():
             sync += r1.ls_error.eq(0)
             sync += r1.mmu_error.eq(0)
             sync += r1.cache_paradox.eq(0)
 
-        with m.If(req_op == Op.OP_STCX_FAIL):
-            sync += r1.stcx_fail.eq(1)
-        with m.Else():
-            sync += r1.stcx_fail.eq(0)
+        sync += r1.stcx_fail.eq(req_op == Op.OP_STCX_FAIL)
 
         # Record TLB hit information for updating TLB PLRU
         sync += r1.tlb_hit.eq(tlb_hit)
@@ -1235,21 +1399,25 @@ class DCache(Elaboratable):
     def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
                     r0, replace_way,
                     req_hit_way, req_same_tag,
-                    r0_valid, req_op, cache_tags, req_go, ra):
+                    r0_valid, req_op, cache_valids, req_go, ra):
 
         comb = m.d.comb
         sync = m.d.sync
         bus = self.bus
         d_in = self.d_in
 
-        req         = MemAccessRequest("mreq_ds")
+        m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
+                                                    granularity=self.TAG_WIDTH)
 
-        req_row = Signal(ROW_BITS)
-        req_idx = Signal(INDEX_BITS)
-        req_tag = Signal(TAG_BITS)
-        comb += req_idx.eq(get_index(req.real_addr))
-        comb += req_row.eq(get_row(req.real_addr))
-        comb += req_tag.eq(get_tag(req.real_addr))
+        req         = MemAccessRequest(self, "mreq_ds")
+
+        r1_next_cycle = Signal()
+        req_row = Signal(self.ROW_BITS)
+        req_idx = Signal(self.INDEX_BITS)
+        req_tag = Signal(self.TAG_BITS)
+        comb += req_idx.eq(self.get_index(req.real_addr))
+        comb += req_row.eq(self.get_row(req.real_addr))
+        comb += req_tag.eq(self.get_tag(req.real_addr))
 
         sync += r1.use_forward1.eq(use_forward1_next)
         sync += r1.forward_sel.eq(0)
@@ -1264,7 +1432,7 @@ class DCache(Elaboratable):
             sync += r1.forward_data1.eq(r1.req.data)
             sync += r1.forward_sel1.eq(r1.req.byte_sel)
             sync += r1.forward_way1.eq(r1.req.hit_way)
-            sync += r1.forward_row1.eq(get_row(r1.req.real_addr))
+            sync += r1.forward_row1.eq(self.get_row(r1.req.real_addr))
             sync += r1.forward_valid1.eq(1)
         with m.Else():
             with m.If(r1.dcbz):
@@ -1287,24 +1455,21 @@ class DCache(Elaboratable):
         sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
 
         with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STCX_FAIL)):
-            with m.If(~r0.mmu_req):
-                sync += r1.ls_valid.eq(1)
-            with m.Else():
+            with m.If(r0.mmu_req):
                 sync += r1.mmu_done.eq(1)
+            with m.Else():
+                sync += r1.ls_valid.eq(1)
 
         with m.If(r1.write_tag):
             # Store new tag in selected way
-            for i in range(NUM_WAYS):
-                with m.If(i == replace_way):
-                    ct = Signal(TAG_RAM_WIDTH)
-                    comb += ct.eq(cache_tags[r1.store_index].tag)
-                    """
-TODO: check this
-cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
-                    (TAG_WIDTH - 1 downto TAG_BITS => '0') & r1.reload_tag;
-                    """
-                    comb += ct.word_select(i, TAG_WIDTH).eq(r1.reload_tag)
-                    sync += cache_tags[r1.store_index].tag.eq(ct)
+            replace_way_onehot = Signal(self.NUM_WAYS)
+            comb += replace_way_onehot.eq(1<<replace_way)
+            ct = Signal(self.TAG_RAM_WIDTH)
+            comb += ct.eq(r1.reload_tag << (replace_way*self.TAG_WIDTH))
+            comb += wr_tag.en.eq(replace_way_onehot)
+            comb += wr_tag.addr.eq(r1.store_index)
+            comb += wr_tag.data.eq(ct)
+
             sync += r1.store_way.eq(replace_way)
             sync += r1.write_tag.eq(0)
 
@@ -1345,12 +1510,15 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                       | (req_op == Op.OP_STORE_HIT)):
                 sync += r1.req.eq(req)
                 sync += r1.full.eq(1)
+                # do not let r1.state RELOAD_WAIT_ACK or STORE_WAIT_ACK
+                # destroy r1.req by overwriting r1.full back to zero
+                comb += r1_next_cycle.eq(1)
 
         # Main state machine
         with m.Switch(r1.state):
 
             with m.Case(State.IDLE):
-                sync += r1.wb.adr.eq(req.real_addr[ROW_LINE_BITS:])
+                sync += r1.wb.adr.eq(req.real_addr[self.ROW_OFF_BITS:])
                 sync += r1.wb.sel.eq(req.byte_sel)
                 sync += r1.wb.dat.eq(req.data)
                 sync += r1.dcbz.eq(req.dcbz)
@@ -1359,16 +1527,19 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                 # for subsequent stores.
                 sync += r1.store_index.eq(req_idx)
                 sync += r1.store_row.eq(req_row)
-                sync += r1.end_row_ix.eq(get_row_of_line(req_row)-1)
+                sync += r1.end_row_ix.eq(self.get_row_of_line(req_row)-1)
                 sync += r1.reload_tag.eq(req_tag)
                 sync += r1.req.same_tag.eq(1)
 
                 with m.If(req.op == Op.OP_STORE_HIT):
                     sync += r1.store_way.eq(req.hit_way)
 
+                #with m.If(r1.dec_acks):
+                #    sync += r1.acks_pending.eq(r1.acks_pending - 1)
+
                 # Reset per-row valid bits,
                 # ready for handling OP_LOAD_MISS
-                for i in range(ROW_PER_LINE):
+                for i in range(self.ROW_PER_LINE):
                     sync += r1.rows_valid[i].eq(0)
 
                 with m.If(req_op != Op.OP_NONE):
@@ -1404,12 +1575,13 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                             sync += r1.state.eq(State.STORE_WAIT_ACK)
                             sync += r1.acks_pending.eq(1)
                             sync += r1.full.eq(0)
+                            comb += r1_next_cycle.eq(0)
                             sync += r1.slow_valid.eq(1)
 
-                            with m.If(~req.mmu_req):
-                                sync += r1.ls_valid.eq(1)
-                            with m.Else():
+                            with m.If(req.mmu_req):
                                 sync += r1.mmu_done.eq(1)
+                            with m.Else():
+                                sync += r1.ls_valid.eq(1)
 
                             with m.If(req.op == Op.OP_STORE_HIT):
                                 sync += r1.write_bram.eq(1)
@@ -1436,30 +1608,25 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                         pass
 
             with m.Case(State.RELOAD_WAIT_ACK):
-                ld_stbs_done = Signal()
-                # Requests are all sent if stb is 0
-                comb += ld_stbs_done.eq(~r1.wb.stb)
 
                 # If we are still sending requests, was one accepted?
                 with m.If((~bus.stall) & r1.wb.stb):
-                    # That was the last word?  We are done sending.
-                    # Clear stb and set ld_stbs_done so we can handle an
-                    # eventual last ack on the same cycle.
+                    # That was the last word?  We are done sending.  Clear stb
                     # sigh - reconstruct wb adr with 3 extra 0s at front
-                    wb_adr = Cat(Const(0, ROW_OFF_BITS), r1.wb.adr)
-                    with m.If(is_last_row_addr(wb_adr, r1.end_row_ix)):
+                    wb_adr = Cat(Const(0, self.ROW_OFF_BITS), r1.wb.adr)
+                    with m.If(self.is_last_row_addr(wb_adr, r1.end_row_ix)):
                         sync += r1.wb.stb.eq(0)
-                        comb += ld_stbs_done.eq(1)
 
                     # Calculate the next row address in the current cache line
-                    row = Signal(LINE_OFF_BITS-ROW_OFF_BITS)
+                    rlen = self.LINE_OFF_BITS-self.ROW_OFF_BITS
+                    row = Signal(rlen)
                     comb += row.eq(r1.wb.adr)
-                    sync += r1.wb.adr[:LINE_OFF_BITS-ROW_OFF_BITS].eq(row+1)
+                    sync += r1.wb.adr[:rlen].eq(row+1)
 
                 # Incoming acks processing
                 sync += r1.forward_valid1.eq(bus.ack)
                 with m.If(bus.ack):
-                    srow = Signal(ROW_LINE_BITS)
+                    srow = Signal(self.ROW_LINE_BITS)
                     comb += srow.eq(r1.store_row)
                     sync += r1.rows_valid[srow].eq(1)
 
@@ -1468,30 +1635,34 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                     # Compare the whole address in case the
                     # request in r1.req is not the one that
                     # started this refill.
-                    with m.If(req.valid & r1.req.same_tag &
-                              ((r1.dcbz & r1.req.dcbz) |
-                               (~r1.dcbz & (r1.req.op == Op.OP_LOAD_MISS))) &
-                                (r1.store_row == get_row(req.real_addr))):
-                        sync += r1.full.eq(0)
+                    rowmatch = Signal()
+                    lastrow = Signal()
+                    comb += rowmatch.eq(r1.store_row ==
+                                        self.get_row(r1.req.real_addr))
+                    comb += lastrow.eq(self.is_last_row(r1.store_row,
+                                                      r1.end_row_ix))
+                    with m.If(r1.full & r1.req.same_tag &
+                              ((r1.dcbz & req.dcbz) |
+                               (r1.req.op == Op.OP_LOAD_MISS)) & rowmatch):
+                        sync += r1.full.eq(r1_next_cycle)
                         sync += r1.slow_valid.eq(1)
-                        with m.If(~r1.mmu_req):
-                            sync += r1.ls_valid.eq(1)
-                        with m.Else():
+                        with m.If(r1.mmu_req):
                             sync += r1.mmu_done.eq(1)
+                        with m.Else():
+                            sync += r1.ls_valid.eq(1)
                         sync += r1.forward_sel.eq(~0) # all 1s
                         sync += r1.use_forward1.eq(1)
 
                     # Check for completion
-                    with m.If(ld_stbs_done & is_last_row(r1.store_row,
-                                                      r1.end_row_ix)):
+                    with m.If(lastrow):
                         # Complete wishbone cycle
                         sync += r1.wb.cyc.eq(0)
 
                         # Cache line is now valid
-                        cv = Signal(INDEX_BITS)
-                        comb += cv.eq(cache_tags[r1.store_index].valid)
+                        cv = Signal(self.INDEX_BITS)
+                        comb += cv.eq(cache_valids[r1.store_index])
                         comb += cv.bit_select(r1.store_way, 1).eq(1)
-                        sync += cache_tags[r1.store_index].valid.eq(cv)
+                        sync += cache_valids[r1.store_index].eq(cv)
 
                         sync += r1.state.eq(State.IDLE)
                         sync += Display("cache valid set %x "
@@ -1499,23 +1670,21 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                                          cv, r1.store_index, r1.store_way)
 
                     # Increment store row counter
-                    sync += r1.store_row.eq(next_row(r1.store_row))
+                    sync += r1.store_row.eq(self.next_row(r1.store_row))
 
             with m.Case(State.STORE_WAIT_ACK):
                 st_stbs_done = Signal()
-                acks        = Signal(3)
                 adjust_acks = Signal(3)
 
                 comb += st_stbs_done.eq(~r1.wb.stb)
-                comb += acks.eq(r1.acks_pending)
 
                 with m.If(r1.inc_acks != r1.dec_acks):
                     with m.If(r1.inc_acks):
-                        comb += adjust_acks.eq(acks + 1)
+                        comb += adjust_acks.eq(r1.acks_pending + 1)
                     with m.Else():
-                        comb += adjust_acks.eq(acks - 1)
+                        comb += adjust_acks.eq(r1.acks_pending - 1)
                 with m.Else():
-                    comb += adjust_acks.eq(acks)
+                    comb += adjust_acks.eq(r1.acks_pending)
 
                 sync += r1.acks_pending.eq(adjust_acks)
 
@@ -1523,21 +1692,26 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                 with m.If(~bus.stall):
                     # See if there is another store waiting
                     # to be done which is in the same real page.
+                    # (this is when same_tsg is true)
                     with m.If(req.valid):
-                        _ra = req.real_addr[ROW_LINE_BITS:SET_SIZE_BITS]
-                        sync += r1.wb.adr[0:SET_SIZE_BITS].eq(_ra)
+                        _ra = req.real_addr[self.ROW_OFF_BITS:
+                                            self.SET_SIZE_BITS]
+                        alen = self.SET_SIZE_BITS-self.ROW_OFF_BITS
+                        sync += r1.wb.adr[0:alen].eq(_ra)
                         sync += r1.wb.dat.eq(req.data)
                         sync += r1.wb.sel.eq(req.byte_sel)
 
                     with m.If((adjust_acks < 7) & req.same_tag &
-                                ((req.op == Op.OP_STORE_MISS)
-                                 (req.op == Op.OP_STORE_HIT))):
+                                ((req.op == Op.OP_STORE_MISS) |
+                                 (req.op == Op.OP_STORE_HIT))):
                         sync += r1.wb.stb.eq(1)
                         comb += st_stbs_done.eq(0)
+                        sync += r1.store_way.eq(req.hit_way)
+                        sync += r1.store_row.eq(self.get_row(req.real_addr))
 
                         with m.If(req.op == Op.OP_STORE_HIT):
                             sync += r1.write_bram.eq(1)
-                        sync += r1.full.eq(0)
+                        sync += r1.full.eq(r1_next_cycle)
                         sync += r1.slow_valid.eq(1)
 
                         # Store requests never come from the MMU
@@ -1549,6 +1723,8 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                         comb += st_stbs_done.eq(1)
 
                 # Got ack ? See if complete.
+                sync += Display("got ack %d %d stbs %d adjust_acks %d",
+                                bus.ack, bus.ack, st_stbs_done, adjust_acks)
                 with m.If(bus.ack):
                     with m.If(st_stbs_done & (adjust_acks == 1)):
                         sync += r1.state.eq(State.IDLE)
@@ -1564,13 +1740,13 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
                 # Got ack ? complete.
                 with m.If(bus.ack):
                     sync += r1.state.eq(State.IDLE)
-                    sync += r1.full.eq(0)
+                    sync += r1.full.eq(r1_next_cycle)
                     sync += r1.slow_valid.eq(1)
 
-                    with m.If(~r1.mmu_req):
-                        sync += r1.ls_valid.eq(1)
-                    with m.Else():
+                    with m.If(r1.mmu_req):
                         sync += r1.mmu_done.eq(1)
+                    with m.Else():
+                        sync += r1.ls_valid.eq(1)
 
                     sync += r1.forward_sel.eq(~0) # all 1s
                     sync += r1.use_forward1.eq(1)
@@ -1590,20 +1766,19 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
     def elaborate(self, platform):
 
         m = Module()
-        comb = m.d.comb
-        d_in = self.d_in
+        comb, sync = m.d.comb, m.d.sync
+        m_in, d_in = self.m_in, self.d_in
 
         # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
-        cache_tags       = CacheTagArray()
-        cache_tag_set    = Signal(TAG_RAM_WIDTH)
+        cache_valids     = self.CacheValidsArray()
+        cache_tag_set    = Signal(self.TAG_RAM_WIDTH)
 
-        # TODO attribute ram_style : string;
-        # TODO attribute ram_style of cache_tags : signal is "distributed";
+        self.tagmem = Memory(depth=self.NUM_LINES, width=self.TAG_RAM_WIDTH,
+                             attrs={'syn_ramstyle': "block_ram"})
 
         """note: these are passed to nmigen.hdl.Memory as "attributes".
            don't know how, just that they are.
         """
-        dtlb            = TLBArray()
         # TODO attribute ram_style of
         #  dtlb_tags : signal is "distributed";
         # TODO attribute ram_style of
@@ -1612,21 +1787,21 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
         r0      = RegStage0("r0")
         r0_full = Signal()
 
-        r1 = RegStage1("r1")
+        r1 = RegStage1(self, "r1")
 
-        reservation = Reservation()
+        reservation = Reservation(self, "rsrv")
 
         # Async signals on incoming request
-        req_index    = Signal(INDEX_BITS)
-        req_row      = Signal(ROW_BITS)
-        req_hit_way  = Signal(WAY_BITS)
-        req_tag      = Signal(TAG_BITS)
+        req_index    = Signal(self.INDEX_BITS)
+        req_row      = Signal(self.ROW_BITS)
+        req_hit_way  = Signal(self.WAY_BITS)
+        req_tag      = Signal(self.TAG_BITS)
         req_op       = Signal(Op)
         req_data     = Signal(64)
         req_same_tag = Signal()
         req_go       = Signal()
 
-        early_req_row     = Signal(ROW_BITS)
+        early_req_row     = Signal(self.ROW_BITS)
 
         cancel_store      = Signal()
         set_rsrv          = Signal()
@@ -1640,25 +1815,25 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
 
         cache_out_row     = Signal(WB_DATA_BITS)
 
-        plru_victim       = PLRUOut()
-        replace_way       = Signal(WAY_BITS)
+        plru_victim       = Signal(self.WAY_BITS)
+        replace_way       = Signal(self.WAY_BITS)
 
         # Wishbone read/write/cache write formatting signals
         bus_sel           = Signal(8)
 
         # TLB signals
-        tlb_way       = TLBRecord("tlb_way")
-        tlb_req_index = Signal(TLB_SET_BITS)
-        tlb_hit       = TLBHit("tlb_hit")
-        pte           = Signal(TLB_PTE_BITS)
-        ra            = Signal(REAL_ADDR_BITS)
+        tlb_way       = self.TLBRecord("tlb_way")
+        tlb_req_index = Signal(self.TLB_SET_BITS)
+        tlb_hit       = self.TLBHit("tlb_hit")
+        pte           = Signal(self.TLB_PTE_BITS)
+        ra            = Signal(self.REAL_ADDR_BITS)
         valid_ra      = Signal()
         perm_attr     = PermAttr("dc_perms")
         rc_ok         = Signal()
         perm_ok       = Signal()
         access_ok     = Signal()
 
-        tlb_plru_victim = TLBPLRUOut()
+        tlb_plru_victim = Signal(self.TLB_WAY_BITS)
 
         # we don't yet handle collisions between loadstore1 requests
         # and MMU requests
@@ -1668,11 +1843,20 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
         comb += r0_stall.eq(r0_full & (r1.full | d_in.hold))
         comb += r0_valid.eq(r0_full & ~r1.full & ~d_in.hold)
         comb += self.stall_out.eq(r0_stall)
+        # debugging: detect if any stall ever requested, which is fine,
+        # but if a request comes in when stall requested, that's bad.
+        with m.If(r0_stall):
+            sync += self.any_stall_out.eq(1)
+            with m.If(d_in.valid):
+                sync += self.dreq_when_stall.eq(1)
+            with m.If(m_in.valid):
+                sync += self.mreq_when_stall.eq(1)
 
         # deal with litex not doing wishbone pipeline mode
         # XXX in wrong way.  FIFOs are needed in the SRAM test
         # so that stb/ack match up. same thing done in icache.py
-        comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
+        if not self.microwatt_compat:
+            comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
 
         # Wire up wishbone request latch out of stage 1
         comb += self.bus.we.eq(r1.wb.we)
@@ -1682,21 +1866,23 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
         comb += self.bus.dat_w.eq(r1.wb.dat)
         comb += self.bus.cyc.eq(r1.wb.cyc)
 
+        # create submodule TLBUpdate
+        m.submodules.dtlb_update = self.dtlb_update = DTLBUpdate(self)
+
         # call sub-functions putting everything together, using shared
         # signals established above
         self.stage_0(m, r0, r1, r0_full)
-        self.tlb_read(m, r0_stall, tlb_way, dtlb)
+        self.tlb_read(m, r0_stall, tlb_way)
         self.tlb_search(m, tlb_req_index, r0, r0_valid,
                         tlb_way,
                         pte, tlb_hit, valid_ra, perm_attr, ra)
-        self.tlb_update(m, r0_valid, r0, dtlb, tlb_req_index,
-                        tlb_hit, tlb_plru_victim,
-                        tlb_way)
+        self.tlb_update(m, r0_valid, r0, tlb_req_index,
+                        tlb_hit, tlb_plru_victim)
         self.maybe_plrus(m, r1, plru_victim)
-        self.maybe_tlb_plrus(m, r1, tlb_plru_victim)
-        self.cache_tag_read(m, r0_stall, req_index, cache_tag_set, cache_tags)
+        self.maybe_tlb_plrus(m, r1, tlb_plru_victim, tlb_req_index)
+        self.cache_tag_read(m, r0_stall, req_index, cache_tag_set)
         self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
-                           r0_valid, r1, cache_tags, replace_way,
+                           r0_valid, r1, cache_valids, replace_way,
                            use_forward1_next, use_forward2_next,
                            req_hit_way, plru_victim, rc_ok, perm_attr,
                            valid_ra, perm_ok, access_ok, req_op, req_go,
@@ -1714,7 +1900,7 @@ cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
         self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
                     r0, replace_way,
                     req_hit_way, req_same_tag,
-                         r0_valid, req_op, cache_tags, req_go, ra)
+                         r0_valid, req_op, cache_valids, req_go, ra)
         #self.dcache_log(m, r1, valid_ra, tlb_hit, stall_out)
 
         return m