dcache.py move MemAccessRequest RecordObject to top of file, small
[soc.git] / src / soc / experiment / dcache.py
index e11ba0d63b0be71f1ad0ef7776ff67644e9626c8..a5264dedb89ac3c3252073a48a6d3de4e3674b84 100644 (file)
@@ -19,80 +19,115 @@ from experiment.mem_types import LoadStore1ToDcacheType,
 
 from experiment.wb_types import WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
                                 WBAddrType, WBDataType, WBSelType,
-                                WbMasterOut, WBSlaveOut, WBMasterOutVector,
-                                WBSlaveOutVector, WBIOMasterOut,
-                                WBIOSlaveOut
-
-# --
-# -- Set associative dcache write-through
-# --
-# -- TODO (in no specific order):
-# --
-# -- * See list in icache.vhdl
-# -- * Complete load misses on the cycle when WB data comes instead of
-# --   at the end of line (this requires dealing with requests coming in
-# --   while not idle...)
-# --
-# library ieee;
-# use ieee.std_logic_1164.all;
-# use ieee.numeric_std.all;
-#
-# library work;
-# use work.utils.all;
-# use work.common.all;
-# use work.helpers.all;
-# use work.wishbone_types.all;
-#
-# entity dcache is
-class Dcache(Elaboratable):
-#     generic (
-#         -- Line size in bytes
-#         LINE_SIZE : positive := 64;
-#         -- Number of lines in a set
-#         NUM_LINES : positive := 32;
-#         -- Number of ways
-#         NUM_WAYS  : positive := 4;
-#         -- L1 DTLB entries per set
-#         TLB_SET_SIZE : positive := 64;
-#         -- L1 DTLB number of sets
-#         TLB_NUM_WAYS : positive := 2;
-#         -- L1 DTLB log_2(page_size)
-#         TLB_LG_PGSZ : positive := 12;
-#         -- Non-zero to enable log data collection
-#         LOG_LENGTH : natural := 0
-#         );
+                                WbMasterOut, WBSlaveOut,
+                                WBMasterOutVector, WBSlaveOutVector,
+                                WBIOMasterOut, WBIOSlaveOut
+
+
+# Record for storing permission, attribute, etc. bits from a PTE
+class PermAttr(RecordObject):
     def __init__(self):
-        # Line size in bytes
-        self.LINE_SIZE = 64
-        # Number of lines in a set
-        self.NUM_LINES = 32
-        # Number of ways
-        self.NUM_WAYS = 4
-        # L1 DTLB entries per set
-        self.TLB_SET_SIZE = 64
-        # L1 DTLB number of sets
-        self.TLB_NUM_WAYS = 2
-        # L1 DTLB log_2(page_size)
-        self.TLB_LG_PGSZ = 12
-        # Non-zero to enable log data collection
-        self.LOG_LENGTH = 0
-#     port (
-#         clk          : in std_ulogic;
-#         rst          : in std_ulogic;
+        super().__init__()
+        self.reference = Signal()
+        self.changed   = Signal()
+        self.nocache   = Signal()
+        self.priv      = Signal()
+        self.rd_perm   = Signal()
+        self.wr_perm   = Signal()
+
+
+def extract_perm_attr(pte):
+    pa = PermAttr()
+    pa.reference = pte[8]
+    pa.changed   = pte[7]
+    pa.nocache   = pte[5]
+    pa.priv      = pte[3]
+    pa.rd_perm   = pte[2]
+    pa.wr_perm   = pte[1]
+    return pa;
+
+
+# Type of operation on a "valid" input
+@unique
+class Op(Enum):
+    OP_NONE       = 0
+    OP_BAD        = 1 # NC cache hit, TLB miss, prot/RC failure
+    OP_STCX_FAIL  = 2 # conditional store w/o reservation
+    OP_LOAD_HIT   = 3 # Cache hit on load
+    OP_LOAD_MISS  = 4 # Load missing cache
+    OP_LOAD_NC    = 5 # Non-cachable load
+    OP_STORE_HIT  = 6 # Store hitting cache
+    OP_STORE_MISS = 7 # Store missing cache
+
+
+# Cache state machine
+@unique
+class State(Enum):
+    IDLE             = 0 # Normal load hit processing
+    RELOAD_WAIT_ACK  = 1 # Cache reload wait ack
+    STORE_WAIT_ACK   = 2 # Store wait ack
+    NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
+
+
+# Dcache operations:
 #
-#         d_in         : in Loadstore1ToDcacheType;
-#         d_out        : out DcacheToLoadstore1Type;
+# In order to make timing, we use the BRAMs with
+# an output buffer, which means that the BRAM
+# output is delayed by an extra cycle.
 #
-#         m_in         : in MmuToDcacheType;
-#         m_out        : out DcacheToMmuType;
+# Thus, the dcache has a 2-stage internal pipeline
+# for cache hits with no stalls.
 #
-#      stall_out    : out std_ulogic;
+# All other operations are handled via stalling
+# in the first stage.
 #
-#         wishbone_out : out wishbone_master_out;
-#         wishbone_in  : in wishbone_slave_out;
+# The second stage can thus complete a hit at the same
+# time as the first stage emits a stall for a complex op.
 #
-#         log_out      : out std_ulogic_vector(19 downto 0)
-#         );
+# Stage 0 register, basically contains just the latched request
+class RegStage0(RecordObject):
+    def __init__(self):
+        super().__init__()
+        self.req     = LoadStore1ToDcacheType()
+        self.tlbie   = Signal()
+        self.doall   = Signal()
+        self.tlbld   = Signal()
+        self.mmu_req = Signal() # indicates source of request
+
+
+class MemAccessRequest(RecordObject):
+    def __init__(self):
+        super().__init__()
+        self.op        = Op()
+        self.valid     = Signal()
+        self.dcbz      = Signal()
+        self.real_addr = Signal(REAL_ADDR_BITS)
+        self.data      = Signal(64)
+        self.byte_sel  = Signal(8)
+        self.hit_way   = Signal(WAY_BITS)
+        self.same_tag  = Signal()
+        self.mmu_req   = Signal()
+
+
+# Set associative dcache write-through
+#
+# TODO (in no specific order):
+#
+# * See list in icache.vhdl
+# * Complete load misses on the cycle when WB data comes instead of
+#   at the end of line (this requires dealing with requests coming in
+#   while not idle...)
+class Dcache(Elaboratable):
+    def __init__(self):
+        # TODO: make these parameters of Dcache at some point
+        self.LINE_SIZE = 64    # Line size in bytes
+        self.NUM_LINES = 32    # Number of lines in a set
+        self.NUM_WAYS = 4      # Number of ways
+        self.TLB_SET_SIZE = 64 # L1 DTLB entries per set
+        self.TLB_NUM_WAYS = 2  # L1 DTLB number of sets
+        self.TLB_LG_PGSZ = 12  # L1 DTLB log_2(page_size)
+        self.LOG_LENGTH = 0    # Non-zero to enable log data collection
+
         self.d_in      = LoadStore1ToDcacheType()
         self.d_out     = DcacheToLoadStore1Type()
 
@@ -105,9 +140,7 @@ class Dcache(Elaboratable):
         self.wb_in     = WBSlaveOut()
 
         self.log_out   = Signal(20)
-# end entity dcache;
 
-# architecture rtl of dcache is
     def elaborate(self, platform):
         LINE_SIZE    = self.LINE_SIZE
         NUM_LINES    = self.NUM_LINES
@@ -117,14 +150,6 @@ class Dcache(Elaboratable):
         TLB_LG_PGSZ  = self.TLB_LG_PGSZ
         LOG_LENGTH   = self.LOG_LENGTH
 
-#     -- BRAM organisation: We never access more than
-#     -- wishbone_data_bits at a time so to save
-#     -- resources we make the array only that wide, and
-#     -- use consecutive indices for to make a cache "line"
-#     --
-#     -- ROW_SIZE is the width in bytes of the BRAM
-#     -- (based on WB, so 64-bits)
-#     constant ROW_SIZE : natural := wishbone_data_bits / 8;
         # BRAM organisation: We never access more than
         #     -- wishbone_data_bits at a time so to save
         #     -- resources we make the array only that wide, and
@@ -134,88 +159,53 @@ class Dcache(Elaboratable):
         #     -- (based on WB, so 64-bits)
         ROW_SIZE = WB_DATA_BITS / 8;
 
-#     -- ROW_PER_LINE is the number of row (wishbone
-#     -- transactions) in a line
-#     constant ROW_PER_LINE  : natural := LINE_SIZE / ROW_SIZE;
-#     -- BRAM_ROWS is the number of rows in BRAM needed
-#     -- to represent the full dcache
-#     constant BRAM_ROWS : natural := NUM_LINES * ROW_PER_LINE;
         # ROW_PER_LINE is the number of row (wishbone
         # transactions) in a line
-        ROW_PER_LINE = LINE_SIZE / ROW_SIZE
+        ROW_PER_LINE = LINE_SIZE // ROW_SIZE
+
         # BRAM_ROWS is the number of rows in BRAM needed
         # to represent the full dcache
         BRAM_ROWS = NUM_LINES * ROW_PER_LINE
 
-#     -- Bit fields counts in the address
-#
-#     -- REAL_ADDR_BITS is the number of real address
-#     -- bits that we store
-#     constant REAL_ADDR_BITS : positive := 56;
-#     -- ROW_BITS is the number of bits to select a row
-#     constant ROW_BITS      : natural := log2(BRAM_ROWS);
-#     -- ROW_LINEBITS is the number of bits to select
-#     -- a row within a line
-#     constant ROW_LINEBITS  : natural := log2(ROW_PER_LINE);
-#     -- LINE_OFF_BITS is the number of bits for
-#     -- the offset in a cache line
-#     constant LINE_OFF_BITS : natural := log2(LINE_SIZE);
-#     -- ROW_OFF_BITS is the number of bits for
-#     -- the offset in a row
-#     constant ROW_OFF_BITS : natural := log2(ROW_SIZE);
-#     -- INDEX_BITS is the number if bits to
-#     -- select a cache line
-#     constant INDEX_BITS : natural := log2(NUM_LINES);
-#     -- SET_SIZE_BITS is the log base 2 of the set size
-#     constant SET_SIZE_BITS : natural := LINE_OFF_BITS
-#                                         + INDEX_BITS;
-#     -- TAG_BITS is the number of bits of
-#     -- the tag part of the address
-#     constant TAG_BITS : natural := REAL_ADDR_BITS - SET_SIZE_BITS;
-#     -- TAG_WIDTH is the width in bits of each way of the tag RAM
-#     constant TAG_WIDTH : natural := TAG_BITS + 7
-#                                     - ((TAG_BITS + 7) mod 8);
-#     -- WAY_BITS is the number of bits to select a way
-#     constant WAY_BITS : natural := log2(NUM_WAYS);
+
         # Bit fields counts in the address
 
         # REAL_ADDR_BITS is the number of real address
         # bits that we store
         REAL_ADDR_BITS = 56
+
         # ROW_BITS is the number of bits to select a row
         ROW_BITS = log2_int(BRAM_ROWS)
+
         # ROW_LINE_BITS is the number of bits to select
         # a row within a line
         ROW_LINE_BITS = log2_int(ROW_PER_LINE)
+
         # LINE_OFF_BITS is the number of bits for
         # the offset in a cache line
         LINE_OFF_BITS = log2_int(LINE_SIZE)
+
         # ROW_OFF_BITS is the number of bits for
         # the offset in a row
         ROW_OFF_BITS = log2_int(ROW_SIZE)
+
         # INDEX_BITS is the number if bits to
         # select a cache line
         INDEX_BITS = log2_int(NUM_LINES)
+
         # SET_SIZE_BITS is the log base 2 of the set size
         SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
+
         # TAG_BITS is the number of bits of
         # the tag part of the address
         TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
+
         # TAG_WIDTH is the width in bits of each way of the tag RAM
         TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
+
         # WAY_BITS is the number of bits to select a way
         WAY_BITS = log2_int(NUM_WAYS)
 
-#     -- Example of layout for 32 lines of 64 bytes:
-#     --
-#     -- ..  tag    |index|  line  |
-#     -- ..         |   row   |    |
-#     -- ..         |     |---|    | ROW_LINEBITS  (3)
-#     -- ..         |     |--- - --| LINE_OFF_BITS (6)
-#     -- ..         |         |- --| ROW_OFF_BITS  (3)
-#     -- ..         |----- ---|    | ROW_BITS      (8)
-#     -- ..         |-----|        | INDEX_BITS    (5)
-#     -- .. --------|              | TAG_BITS      (45)
         # Example of layout for 32 lines of 64 bytes:
         #
         # ..  tag    |index|  line  |
@@ -230,12 +220,17 @@ class Dcache(Elaboratable):
 
 #     subtype row_t is integer range 0 to BRAM_ROWS-1;
 #     subtype index_t is integer range 0 to NUM_LINES-1;
+"""wherever way_t is used to make a Signal it must be substituted with
+   log2_int(NUM_WAYS) i.e. WAY_BITS.  this because whilst the *range*
+   of the number is 0..NUM_WAYS it requires log2_int(NUM_WAYS) i.e.
+   WAY_BITS of space to store it
+"""
 #     subtype way_t is integer range 0 to NUM_WAYS-1;
 #     subtype row_in_line_t is unsigned(ROW_LINE_BITS-1 downto 0);
-        ROW         = BRAM_ROWS
-        INDEX       = NUM_LINES
-        WAY         = NUM_WAYS
-        ROW_IN_LINE = ROW_LINE_BITS
+        ROW         = BRAM_ROWS   # yyyeah not really necessary, delete
+        INDEX       = NUM_LINES   # yyyeah not really necessary, delete
+        WAY         = NUM_WAYS   # yyyeah not really necessary, delete
+        ROW_IN_LINE = ROW_LINE_BITS   # yyyeah not really necessary, delete
 
 #     -- The cache data BRAM organized as described above for each way
 #     subtype cache_row_t is
@@ -367,7 +362,7 @@ class Dcache(Elaboratable):
             )
 
         def HitWaySet():
-            return Array(Signal(WAY) for x in range(TLB_NUM_WAYS))
+            return Array(Signal(NUM_WAYS) for x in range(TLB_NUM_WAYS))
 
 #     signal dtlb_valids : tlb_valids_t;
 #     signal dtlb_tags : tlb_tags_t;
@@ -384,175 +379,11 @@ class Dcache(Elaboratable):
         # TODO attribute ram_style of dtlb_tags : signal is "distributed";
         # TODO attribute ram_style of dtlb_ptes : signal is "distributed";
 
-
-#     -- Record for storing permission, attribute, etc. bits from a PTE
-#     type perm_attr_t is record
-#         reference : std_ulogic;
-#         changed   : std_ulogic;
-#         nocache   : std_ulogic;
-#         priv      : std_ulogic;
-#         rd_perm   : std_ulogic;
-#         wr_perm   : std_ulogic;
-#     end record;
-        # Record for storing permission, attribute, etc. bits from a PTE
-        class PermAttr(RecordObject):
-            def __init__(self):
-                super().__init__()
-                self.reference = Signal()
-                self.changed   = Signal()
-                self.nocache   = Signal()
-                self.priv      = Signal()
-                self.rd_perm   = Signal()
-                self.wr_perm   = Signal()
-
-#     function extract_perm_attr(
-#      pte : std_ulogic_vector(TLB_PTE_BITS - 1 downto 0))
-#      return perm_attr_t is
-#         variable pa : perm_attr_t;
-#     begin
-#         pa.reference := pte(8);
-#         pa.changed := pte(7);
-#         pa.nocache := pte(5);
-#         pa.priv := pte(3);
-#         pa.rd_perm := pte(2);
-#         pa.wr_perm := pte(1);
-#         return pa;
-#     end;
-        def extract_perm_attr(pte=Signal(TLB_PTE_BITS)):
-            pa = PermAttr()
-            pa.reference = pte[8]
-            pa.changed   = pte[7]
-            pa.nocache   = pte[5]
-            pa.priv      = pte[3]
-            pa.rd_perm   = pte[2]
-            pa.wr_perm   = pte[1]
-            return pa;
-
-#     constant real_mode_perm_attr : perm_attr_t :=
-#      (nocache => '0', others => '1');
-        REAL_MODE_PERM_ATTR = PermAttr()
-        REAL_MODE_PERM_ATTR.reference = 1
-        REAL_MODE_PERM_ATTR.changed   = 1
-        REAL_MODE_PERM_ATTR.priv      = 1
-        REAL_MODE_PERM_ATTR.rd_perm   = 1
-        REAL_MODE_PERM_ATTR.wr_perm   = 1
-
-#     -- Type of operation on a "valid" input
-#     type op_t is
-#      (
-#       OP_NONE,
-#      OP_BAD,        -- NC cache hit, TLB miss, prot/RC failure
-#       OP_STCX_FAIL,  -- conditional store w/o reservation
-#      OP_LOAD_HIT,   -- Cache hit on load
-#      OP_LOAD_MISS,  -- Load missing cache
-#      OP_LOAD_NC,    -- Non-cachable load
-#      OP_STORE_HIT,  -- Store hitting cache
-#      OP_STORE_MISS  -- Store missing cache
-#      );
-        # Type of operation on a "valid" input
-        @unique
-        class OP(Enum):
-          OP_NONE       = 0
-          OP_BAD        = 1 # NC cache hit, TLB miss, prot/RC failure
-          OP_STCX_FAIL  = 2 # conditional store w/o reservation
-          OP_LOAD_HIT   = 3 # Cache hit on load
-          OP_LOAD_MISS  = 4 # Load missing cache
-          OP_LOAD_NC    = 5 # Non-cachable load
-          OP_STORE_HIT  = 6 # Store hitting cache
-          OP_STORE_MISS = 7 # Store missing cache
-
-#     -- Cache state machine
-#     type state_t is
-#      (
-#       IDLE,            -- Normal load hit processing
-#       RELOAD_WAIT_ACK, -- Cache reload wait ack
-#       STORE_WAIT_ACK,  -- Store wait ack
-#       NC_LOAD_WAIT_ACK -- Non-cachable load wait ack
-#      );
-        # Cache state machine
-        @unique
-        class State(Enum):
-            IDLE             = 0 # Normal load hit processing
-            RELOAD_WAIT_ACK  = 1 # Cache reload wait ack
-            STORE_WAIT_ACK   = 2 # Store wait ack
-            NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
-
-#     -- Dcache operations:
-#     --
-#     -- In order to make timing, we use the BRAMs with
-#     -- an output buffer, which means that the BRAM
-#     -- output is delayed by an extra cycle.
-#     --
-#     -- Thus, the dcache has a 2-stage internal pipeline
-#     -- for cache hits with no stalls.
-#     --
-#     -- All other operations are handled via stalling
-#     -- in the first stage.
-#     --
-#     -- The second stage can thus complete a hit at the same
-#     -- time as the first stage emits a stall for a complex op.
-#
-#     -- Stage 0 register, basically contains just the latched request
-#     type reg_stage_0_t is record
-#         req   : Loadstore1ToDcacheType;
-#         tlbie : std_ulogic;
-#         doall : std_ulogic;
-#         tlbld : std_ulogic;
-#         mmu_req : std_ulogic;   -- indicates source of request
-#     end record;
-# Dcache operations:
-#
-# In order to make timing, we use the BRAMs with
-# an output buffer, which means that the BRAM
-# output is delayed by an extra cycle.
-#
-# Thus, the dcache has a 2-stage internal pipeline
-# for cache hits with no stalls.
-#
-# All other operations are handled via stalling
-# in the first stage.
-#
-# The second stage can thus complete a hit at the same
-# time as the first stage emits a stall for a complex op.
-#
-        # Stage 0 register, basically contains just the latched request
-        class RegStage0(RecordObject):
-            def __init__(self):
-                super().__init__()
-                self.req     = LoadStore1ToDcacheType()
-                self.tlbie   = Signal()
-                self.doall   = Signal()
-                self.tlbld   = Signal()
-                self.mmu_req = Signal() # indicates source of request
-
 #     signal r0 : reg_stage_0_t;
 #     signal r0_full : std_ulogic;
         r0      = RegStage0()
         r0_full = Signal()
 
-#     type mem_access_request_t is record
-#         op        : op_t;
-#         valid     : std_ulogic;
-#         dcbz      : std_ulogic;
-#         real_addr : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
-#         data      : std_ulogic_vector(63 downto 0);
-#         byte_sel  : std_ulogic_vector(7 downto 0);
-#         hit_way   : way_t;
-#         same_tag  : std_ulogic;
-#         mmu_req   : std_ulogic;
-#     end record;
-        class MemAccessRequest(RecordObject):
-            def __init__(self):
-                super().__init__()
-                self.op        = Op()
-                self.valid     = Signal()
-                self.dcbz      = Signal()
-                self.real_addr = Signal(REAL_ADDR_BITS)
-                self.data      = Signal(64)
-                self.byte_sel  = Signal(8)
-                self.hit_way   = Signal(WAY)
-                self.same_tag  = Signal()
-                self.mmu_req   = Signal()
 
 #     -- First stage register, contains state for stage 1 of load hits
 #     -- and for the state machine used by all other operations
@@ -621,7 +452,7 @@ class Dcache(Elaboratable):
                 self.req              = MemAccessRequest()
 
                 # Cache hit state
-                self.hit_way          = Signal(WAY)
+                self.hit_way          = Signal(WAY_BITS)
                 self.hit_load_valid   = Signal()
                 self.hit_index        = Signal(INDEX)
                 self.cache_hit        = Signal()
@@ -636,7 +467,7 @@ class Dcache(Elaboratable):
                 self.forward_data2    = Signal(64)
                 self.forward_sel1     = Signal(8)
                 self.forward_valid1   = Signal()
-                self.forward_way1     = Signal(WAY)
+                self.forward_way1     = Signal(WAY_BITS)
                 self.forward_row1     = Signal(ROW)
                 self.use_forward1     = Signal()
                 self.forward_sel      = Signal(8)
@@ -649,7 +480,7 @@ class Dcache(Elaboratable):
                 self.slow_valid       = Signal()
                 self.wb               = WishboneMasterOut()
                 self.reload_tag       = Signal(CACHE_TAG)
-                self.store_way        = Signal(WAY)
+                self.store_way        = Signal(WAY_BITS)
                 self.store_row        = Signal(ROW)
                 self.store_index      = Signal(INDEX)
                 self.end_row_ix       = Signal(ROW_IN_LINE)
@@ -701,7 +532,7 @@ class Dcache(Elaboratable):
         # Async signals on incoming request
         req_index    = Signal(INDEX)
         req_row      = Signal(ROW)
-        req_hit_way  = Signal(WAY)
+        req_hit_way  = Signal(WAY_BITS)
         req_tag      = Signal(CACHE_TAG)
         req_op       = Op()
         req_data     = Signal(64)
@@ -750,7 +581,7 @@ class Dcache(Elaboratable):
             return Array(Signal(WAY_BITS) for x in range(Index()))
 
         plru_victim = PLRUOut()
-        replace_way = Signal(WAY)
+        replace_way = Signal(WAY_BITS)
 
 #     -- Wishbone read/write/cache write formatting signals
 #     signal bus_sel     : std_ulogic_vector(7 downto 0);
@@ -1017,35 +848,32 @@ class Dcache(Elaboratable):
 #     assert SET_SIZE_BITS <= TLB_LG_PGSZ
 #      report "Set indexed by virtual address" severity FAILURE;
         assert (LINE_SIZE % ROW_SIZE) == 0 "LINE_SIZE not " \
-                          "multiple of ROW_SIZE -!- severity FAILURE"
+         "multiple of ROW_SIZE"
 
-        assert (LINE_SIZE % 2) == 0 "LINE_SIZE not power of" \
-                        "2 -!- severity FAILURE"
+        assert (LINE_SIZE % 2) == 0 "LINE_SIZE not power of 2"
 
-        assert (NUM_LINES % 2) == 0 "NUM_LINES not power of
-         2 -!- severity FAILURE"
+        assert (NUM_LINES % 2) == 0 "NUM_LINES not power of 2"
 
-        assert (ROW_PER_LINE % 2) == 0 "ROW_PER_LINE not
-         power of 2 -!- severity FAILURE"
+        assert (ROW_PER_LINE % 2) == 0 "ROW_PER_LINE not" \
+         "power of 2"
 
-        assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS)
-         "geometry bits don't add up -!- severity FAILURE"
+        assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS) \
+         "geometry bits don't add up"
 
-        assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
-         "geometry bits don't add up -!- severity FAILURE"
+        assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS) \
+         "geometry bits don't add up"
 
-        assert REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS
-         + LINE_OFF_BITS) "geometry bits don't add up -!-
-         severity FAILURE"
+        assert REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS \
+         + LINE_OFF_BITS) "geometry bits don't add up"
 
-        assert REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS)
-         "geometry bits don't add up -!- severity FAILURE"
+        assert REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS) \
+         "geometry bits don't add up"
 
-        assert 64 == wishbone_data_bits "Can't yet handle a
-         wishbone width that isn't 64-bits -!- severity FAILURE"
+        assert 64 == wishbone_data_bits "Can't yet handle a" \
+         "wishbone width that isn't 64-bits"
 
-        assert SET_SIZE_BITS <= TLB_LG_PGSZ "Set indexed by
-         virtual address -!- severity FAILURE"
+        assert SET_SIZE_BITS <= TLB_LG_PGSZ "Set indexed by" \
+         "virtual address"
 
 #     -- Latch the request in r0.req as long as we're not stalling
 #     stage_0 : process(clk)
@@ -1338,7 +1166,12 @@ class TLBSearch(Elaboratable):
                     )
 
 #             perm_attr <= real_mode_perm_attr;
-            comb += perm_attr.eq(real_mode_perm_attr)
+            comb += perm_attr.reference.eq(1)
+            comb += perm_attr.changed.eq(1)
+            comb += perm_attr.priv.eq(1)
+            comb += perm_attr.nocache.eq(0)
+            comb += perm_attr.rd_perm.eq(1)
+            comb += perm_attr.wr_perm.eq(1)
 #         end if;
 #     end process;
 
@@ -1469,36 +1302,73 @@ class MaybePLRUs(Elaboratable):
 #          begin
 #              -- PLRU interface
 #              if r1.hit_index = i then
+        # PLRU interface
+        with m.If(r1.hit_index == i):
 #                  plru_acc_en <= r1.cache_hit;
+            comb += plru_acc_en.eq(r1.cache_hit)
 #              else
+        with m.Else():
 #                  plru_acc_en <= '0';
+            comb += plru_acc_en.eq(0)
 #              end if;
 #              plru_acc <= std_ulogic_vector(to_unsigned(
 #                            r1.hit_way, WAY_BITS
 #                           ));
 #              plru_victim(i) <= plru_out;
+        comb += plru_acc.eq(r1.hit_way)
+        comb += plru_victime[i].eq(plru_out)
 #          end process;
 #      end generate;
 #     end generate;
-#
+
 #     -- Cache tag RAM read port
 #     cache_tag_read : process(clk)
+# Cache tag RAM read port
+class CacheTagRead(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #         variable index : index_t;
+        index = Signal(INDEX)
+
+        comb += index
+
 #     begin
 #         if rising_edge(clk) then
 #             if r0_stall = '1' then
+        with m.If(r0_stall):
 #                 index := req_index;
+            sync += index.eq(req_index)
+
 #             elsif m_in.valid = '1' then
+        with m.Elif(m_in.valid):
 #                 index := get_index(m_in.addr);
+            sync += index.eq(get_index(m_in.addr))
+
 #             else
+        with m.Else():
 #                 index := get_index(d_in.addr);
+            sync += index.eq(get_index(d_in.addr))
 #             end if;
 #             cache_tag_set <= cache_tags(index);
+        sync += cache_tag_set.eq(cache_tags(index))
 #         end if;
 #     end process;
-#
+
 #     -- Cache request parsing and hit detection
 #     dcache_request : process(all)
+# Cache request parsing and hit detection
+class DcacheRequest(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
 #         variable is_hit  : std_ulogic;
 #         variable hit_way : way_t;
 #         variable op      : op_t;
@@ -1518,7 +1388,38 @@ class MaybePLRUs(Elaboratable):
 #         variable rel_matches : std_ulogic_vector(
 #                                 TLB_NUM_WAYS - 1 downto 0
 #                                );
-#         variable rel_match   : std_ulogic;
+        rel_match   = Signal()
+        is_hit      = Signal()
+        hit_way     = Signal(WAY_BITS)
+        op          = Op()
+        opsel       = Signal(3)
+        go          = Signal()
+        nc          = Signal()
+        s_hit       = Signal()
+        s_tag       = Signal(CACHE_TAG)
+        s_pte       = Signal(TLB_PTE)
+        s_ra        = Signal(REAL_ADDR_BITS)
+        hit_set     = Signal(TLB_NUM_WAYS)
+        hit_way_set = HitWaySet()
+        rel_matches = Signal(TLB_NUM_WAYS)
+        rel_match   = Signal()
+
+        comb += rel_match
+        comb += is_hit
+        comb += hit_way
+        comb += op
+        comb += opsel
+        comb += go
+        comb += nc
+        comb += s_hit
+        comb += s_tag
+        comb += s_pte
+        comb += s_ra
+        comb += hit_set
+        comb += hit_way_set
+        comb += rel_matches
+        comb += rel_match
+
 #     begin
 #        -- Extract line, row and tag from request
 #         req_index <= get_index(r0.req.addr);
@@ -1527,7 +1428,13 @@ class MaybePLRUs(Elaboratable):
 #
 #         go := r0_valid and not (r0.tlbie or r0.tlbld)
 #               and not r1.ls_error;
-#
+        # Extract line, row and tag from request
+        comb += req_index.eq(get_index(r0.req.addr))
+        comb += req_row.eq(get_row(r0.req.addr))
+        comb += req_tag.eq(get_tag(ra))
+
+        comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
+
 #        -- Test if pending request is a hit on any way
 #         -- In order to make timing in virtual mode,
 #         -- when we are using the TLB, we compare each
@@ -1536,64 +1443,120 @@ class MaybePLRUs(Elaboratable):
 #         hit_way := 0;
 #         is_hit := '0';
 #         rel_match := '0';
+        # Test if pending request is a hit on any way
+        # In order to make timing in virtual mode,
+        # when we are using the TLB, we compare each
+        # way with each of the real addresses from each way of
+        # the TLB, and then decide later which match to use.
+        comb += hit_way.eq(0)
+        comb += is_hit.eq(0)
+        comb += rel_match.eq(0)
+
 #         if r0.req.virt_mode = '1' then
+        with m.If(r0.req.virt_mode):
 #             rel_matches := (others => '0');
+            comb += rel_matches.eq(0)
 #             for j in tlb_way_t loop
+            for j in range(TLB_WAY):
 #                 hit_way_set(j) := 0;
 #                 s_hit := '0';
 #                 s_pte := read_tlb_pte(j, tlb_pte_way);
-#                 s_ra :=
-#                  s_pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ)
-#                  & r0.req.addr(TLB_LG_PGSZ - 1 downto 0);
+#                 s_ra  := s_pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ)
+#                          & r0.req.addr(TLB_LG_PGSZ - 1 downto 0);
 #                 s_tag := get_tag(s_ra);
+                comb += hit_way_set[j].eq(0)
+                comb += s_hit.eq(0)
+                comb += s_pte.eq(read_tlb_pte(j, tlb_pte_way))
+                comb += s_ra.eq(Cat(
+                         r0.req.addr[0:TLB_LG_PGSZ],
+                         s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]
+                        ))
+                comb += s_tag.eq(get_tag(s_ra))
+
 #                 for i in way_t loop
+                for i in range(NUM_WAYS):
 #                     if go = '1' and cache_valids(req_index)(i) = '1'
 #                      and read_tag(i, cache_tag_set) = s_tag
 #                      and tlb_valid_way(j) = '1' then
+                    with m.If(go & cache_valid_bits[req_index][i] &
+                              read_tag(i, cache_tag_set) == s_tag
+                              & tlb_valid_way[j]):
 #                         hit_way_set(j) := i;
 #                         s_hit := '1';
+                        comb += hit_way_set[j].eq(i)
+                        comb += s_hit.eq(1)
 #                     end if;
 #                 end loop;
 #                 hit_set(j) := s_hit;
+                comb += hit_set[j].eq(s_hit)
 #                 if s_tag = r1.reload_tag then
+                with m.If(s_tag == r1.reload_tag):
 #                     rel_matches(j) := '1';
+                    comb += rel_matches[j].eq(1)
 #                 end if;
 #             end loop;
 #             if tlb_hit = '1' then
+            with m.If(tlb_hit):
 #                 is_hit := hit_set(tlb_hit_way);
 #                 hit_way := hit_way_set(tlb_hit_way);
 #                 rel_match := rel_matches(tlb_hit_way);
+                comb += is_hit.eq(hit_set[tlb_hit_way])
+                comb += hit_way.eq(hit_way_set[tlb_hit_way])
+                comb += rel_match.eq(rel_matches[tlb_hit_way])
 #             end if;
 #         else
+        with m.Else():
 #             s_tag := get_tag(r0.req.addr);
+            comb += s_tag.eq(get_tag(r0.req.addr))
 #             for i in way_t loop
+            for i in range(NUM_WAYS):
 #                 if go = '1' and cache_valids(req_index)(i) = '1' and
 #                     read_tag(i, cache_tag_set) = s_tag then
+                with m.If(go & cache_valid_bits[req_index][i] &
+                          read_tag(i, cache_tag_set) == s_tag):
 #                     hit_way := i;
 #                     is_hit := '1';
+                    comb += hit_way.eq(i)
+                    comb += is_hit.eq(1)
 #                 end if;
 #             end loop;
 #             if s_tag = r1.reload_tag then
+            with m.If(s_tag == r1.reload_tag):
 #                 rel_match := '1';
+                comb += rel_match.eq(1)
 #             end if;
 #         end if;
 #         req_same_tag <= rel_match;
-#
+        comb += req_same_tag.eq(rel_match)
+
 #         -- See if the request matches the line currently being reloaded
 #         if r1.state = RELOAD_WAIT_ACK and req_index = r1.store_index
 #          and rel_match = '1' then
+        # See if the request matches the line currently being reloaded
+        with m.If(r1.state == State.RELOAD_WAIT_ACK & req_index ==
+                  r1.store_index & rel_match):
 #             -- For a store, consider this a hit even if the row isn't
 #             -- valid since it will be by the time we perform the store.
 #             -- For a load, check the appropriate row valid bit.
+            # For a store, consider this a hit even if the row isn't
+            # valid since it will be by the time we perform the store.
+            # For a load, check the appropriate row valid bit.
 #             is_hit :=
 #              not r0.req.load or r1.rows_valid(req_row mod ROW_PER_LINE);
 #             hit_way := replace_way;
+            comb += is_hit.eq(~r0.req.load
+                     | r1.rows_valid[req_row % ROW_PER_LINE])
+            comb += hit_way.eq(replace_way)
 #         end if;
-#
+
 #         -- Whether to use forwarded data for a load or not
+        # Whether to use forwarded data for a load or not
 #         use_forward1_next <= '0';
+        comb += use_forward1_next.eq(0)
 #         if get_row(r1.req.real_addr) = req_row
 #          and r1.req.hit_way = hit_way then
+        with m.If(get_row(r1.req.real_addr) == req_row
+                  & r1.req.hit_way == hit_way)
 #             -- Only need to consider r1.write_bram here, since if we
 #             -- are writing refill data here, then we don't have a
 #             -- cache hit this cycle on the line being refilled.
@@ -1602,47 +1565,86 @@ class MaybePLRUs(Elaboratable):
 #             -- contents of the victim line, since it is a couple of
 #             -- cycles after the refill starts before we see the updated
 #             -- cache tag. In that case we don't use the bypass.)
+            # Only need to consider r1.write_bram here, since if we
+            # are writing refill data here, then we don't have a
+            # cache hit this cycle on the line being refilled.
+            # (There is the possibility that the load following the
+            # load miss that started the refill could be to the old
+            # contents of the victim line, since it is a couple of
+            # cycles after the refill starts before we see the updated
+            # cache tag. In that case we don't use the bypass.)
 #             use_forward1_next <= r1.write_bram;
+            comb += use_forward1_next.eq(r1.write_bram)
 #         end if;
 #         use_forward2_next <= '0';
+        comb += use_forward2_next.eq(0)
 #         if r1.forward_row1 = req_row and r1.forward_way1 = hit_way then
+        with m.If(r1.forward_row1 == req_row & r1.forward_way1 == hit_way):
 #             use_forward2_next <= r1.forward_valid1;
+            comb += use_forward2_next.eq(r1.forward_valid1)
 #         end if;
-#
-#      -- The way that matched on a hit
-#      req_hit_way <= hit_way;
-#
+
+#          -- The way that matched on a hit
+        # The way that matched on a hit
+#          req_hit_way <= hit_way;
+        comb += req_hit_way.eq(hit_way)
+
 #         -- The way to replace on a miss
+        # The way to replace on a miss
 #         if r1.write_tag = '1' then
+        with m.If(r1.write_tag):
 #             replace_way <= to_integer(unsigned(
 #                             plru_victim(r1.store_index)
 #                            ));
+            replace_way.eq(plru_victim[r1.store_index])
 #         else
+        with m.Else():
 #             replace_way <= r1.store_way;
+            comb += replace_way.eq(r1.store_way)
 #         end if;
-#
+
 #         -- work out whether we have permission for this access
 #         -- NB we don't yet implement AMR, thus no KUAP
+        # work out whether we have permission for this access
+        # NB we don't yet implement AMR, thus no KUAP
 #         rc_ok <= perm_attr.reference and
 #                  (r0.req.load or perm_attr.changed);
 #         perm_ok <= (r0.req.priv_mode or not perm_attr.priv) and
 #                    (perm_attr.wr_perm or (r0.req.load
 #                    and perm_attr.rd_perm));
 #         access_ok <= valid_ra and perm_ok and rc_ok;
-#
+        comb += rc_ok.eq(
+                 perm_attr.reference & (r0.req.load | perm_attr.changed)
+                )
+        comb += perm_ok.eq((r0.req.prive_mode | ~perm_attr.priv)
+                           & perm_attr.wr_perm
+                           | (r0.req.load & perm_attr.rd_perm)
+                          )
+        comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
 #      -- Combine the request and cache hit status to decide what
 #      -- operation needs to be done
-#      --
 #         nc := r0.req.nc or perm_attr.nocache;
 #         op := OP_NONE;
+        # Combine the request and cache hit status to decide what
+        # operation needs to be done
+        comb += nc.eq(r0.req.nc | perm_attr.nocache)
+        comb += op.eq(Op.OP_NONE)
 #         if go = '1' then
+        with m.If(go):
 #             if access_ok = '0' then
+            with m.If(~access_ok):
 #                 op := OP_BAD;
+                comb += op.eq(Op.OP_BAD)
 #             elsif cancel_store = '1' then
+            with m.Elif(cancel_store):
 #                 op := OP_STCX_FAIL;
+                comb += op.eq(Op.OP_STCX_FAIL)
 #             else
+            with m.Else():
 #                 opsel := r0.req.load & nc & is_hit;
+                comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
 #                 case opsel is
+                with m.Switch(opsel):
 #                     when "101" => op := OP_LOAD_HIT;
 #                     when "100" => op := OP_LOAD_MISS;
 #                     when "110" => op := OP_LOAD_NC;
@@ -1652,105 +1654,225 @@ class MaybePLRUs(Elaboratable):
 #                     when "011" => op := OP_BAD;
 #                     when "111" => op := OP_BAD;
 #                     when others => op := OP_NONE;
+                    with m.Case(Const(0b101, 3)):
+                        comb += op.eq(Op.OP_LOAD_HIT)
+
+                    with m.Case(Cosnt(0b100, 3)):
+                        comb += op.eq(Op.OP_LOAD_MISS)
+
+                    with m.Case(Const(0b110, 3)):
+                        comb += op.eq(Op.OP_LOAD_NC)
+
+                    with m.Case(Const(0b001, 3)):
+                        comb += op.eq(Op.OP_STORE_HIT)
+
+                    with m.Case(Const(0b000, 3)):
+                        comb += op.eq(Op.OP_STORE_MISS)
+
+                    with m.Case(Const(0b010, 3)):
+                        comb += op.eq(Op.OP_STORE_MISS)
+
+                    with m.Case(Const(0b011, 3)):
+                        comb += op.eq(Op.OP_BAD)
+
+                    with m.Case(Const(0b111, 3)):
+                        comb += op.eq(Op.OP_BAD)
+
+                    with m.Default():
+                        comb += op.eq(Op.OP_NONE)
 #                 end case;
 #             end if;
 #         end if;
 #      req_op <= op;
 #         req_go <= go;
-#
+        comb += req_op.eq(op)
+        comb += req_go.eq(go)
+
 #         -- Version of the row number that is valid one cycle earlier
 #         -- in the cases where we need to read the cache data BRAM.
 #         -- If we're stalling then we need to keep reading the last
 #         -- row requested.
+        # Version of the row number that is valid one cycle earlier
+        # in the cases where we need to read the cache data BRAM.
+        # If we're stalling then we need to keep reading the last
+        # row requested.
 #         if r0_stall = '0' then
+        with m.If(~r0_stall):
 #             if m_in.valid = '1' then
+            with m.If(m_in.valid):
 #                 early_req_row <= get_row(m_in.addr);
+                comb += early_req_row.eq(get_row(m_in.addr))
 #             else
+            with m.Else():
 #                 early_req_row <= get_row(d_in.addr);
+                comb += early_req_row.eq(get_row(d_in.addr))
 #             end if;
 #         else
+        with m.Else():
 #             early_req_row <= req_row;
+            comb += early_req_row.eq(req_row)
 #         end if;
 #     end process;
-#
+
 #     -- Wire up wishbone request latch out of stage 1
 #     wishbone_out <= r1.wb;
-#
+    # Wire up wishbone request latch out of stage 1
+    comb += wishbone_out.eq(r1.wb)
+
 #     -- Handle load-with-reservation and store-conditional instructions
 #     reservation_comb: process(all)
+# Handle load-with-reservation and store-conditional instructions
+class ReservationComb(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #     begin
 #         cancel_store <= '0';
 #         set_rsrv <= '0';
 #         clear_rsrv <= '0';
 #         if r0_valid = '1' and r0.req.reserve = '1' then
+        with m.If(r0_valid & r0.req.reserve):
+
 #             -- XXX generate alignment interrupt if address
 #             -- is not aligned XXX or if r0.req.nc = '1'
 #             if r0.req.load = '1' then
+            # XXX generate alignment interrupt if address
+            # is not aligned XXX or if r0.req.nc = '1'
+            with m.If(r0.req.load):
 #                 -- load with reservation
 #                 set_rsrv <= '1';
+                # load with reservation
+                comb += set_rsrv(1)
 #             else
+            with m.Else():
 #                 -- store conditional
 #                 clear_rsrv <= '1';
+                # store conditional
+                comb += clear_rsrv.eq(1)
 #                 if reservation.valid = '0' or r0.req.addr(63
 #                  downto LINE_OFF_BITS) /= reservation.addr then
+                with m.If(~reservation.valid
+                          | r0.req.addr[LINE_OFF_BITS:64]):
 #                     cancel_store <= '1';
+                    comb += cancel_store.eq(1)
 #                 end if;
 #             end if;
 #         end if;
 #     end process;
-#
+
 #     reservation_reg: process(clk)
+class ReservationReg(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #     begin
 #         if rising_edge(clk) then
 #             if rst = '1' then
 #                 reservation.valid <= '0';
+            # TODO understand how resets work in nmigen
 #             elsif r0_valid = '1' and access_ok = '1' then
+            with m.Elif(r0_valid & access_ok)""
 #                 if clear_rsrv = '1' then
+                with m.If(clear_rsrv):
 #                     reservation.valid <= '0';
+                    sync += reservation.valid.ea(0)
 #                 elsif set_rsrv = '1' then
+                with m.Elif(set_rsrv):
 #                     reservation.valid <= '1';
 #                     reservation.addr <=
 #                      r0.req.addr(63 downto LINE_OFF_BITS);
+                    sync += reservation.valid.eq(1)
+                    sync += reservation.addr(r0.req.addr[LINE_OFF_BITS:64])
 #                 end if;
 #             end if;
 #         end if;
 #     end process;
 #
 #     -- Return data for loads & completion control logic
-#     --
 #     writeback_control: process(all)
+# Return data for loads & completion control logic
+class WriteBackControl(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #         variable data_out : std_ulogic_vector(63 downto 0);
 #         variable data_fwd : std_ulogic_vector(63 downto 0);
 #         variable j        : integer;
+        data_out = Signal(64)
+        data_fwd = Signal(64)
+        j        = Signal()
+
 #     begin
 #         -- Use the bypass if are reading the row that was
 #         -- written 1 or 2 cycles ago, including for the
 #         -- slow_valid = 1 case (i.e. completing a load
 #         -- miss or a non-cacheable load).
 #         if r1.use_forward1 = '1' then
+        # Use the bypass if are reading the row that was
+        # written 1 or 2 cycles ago, including for the
+        # slow_valid = 1 case (i.e. completing a load
+        # miss or a non-cacheable load).
+        with m.If(r1.use_forward1):
 #             data_fwd := r1.forward_data1;
+            comb += data_fwd.eq(r1.forward_data1)
 #         else
+        with m.Else():
 #             data_fwd := r1.forward_data2;
+            comb += data_fwd.eq(r1.forward_data2)
 #         end if;
+
 #         data_out := cache_out(r1.hit_way);
+        comb += data_out.eq(cache_out[r1.hit_way])
+
 #         for i in 0 to 7 loop
+        for i in range(8):
 #             j := i * 8;
+            comb += i * 8
+
 #             if r1.forward_sel(i) = '1' then
+            with m.If(r1.forward_sel[i]):
 #                 data_out(j + 7 downto j) := data_fwd(j + 7 downto j);
+                comb += data_out[j:j+8].eq(data_fwd[j:j+8])
 #             end if;
 #         end loop;
-#
+
 #        d_out.valid <= r1.ls_valid;
 #        d_out.data <= data_out;
 #         d_out.store_done <= not r1.stcx_fail;
 #         d_out.error <= r1.ls_error;
 #         d_out.cache_paradox <= r1.cache_paradox;
-#
+        comb += d_out.valid.eq(r1.ls_valid)
+        comb += d_out.data.eq(data_out)
+        comb += d_out.store_done.eq(~r1.stcx_fail)
+        comb += d_out.error.eq(r1.ls_error)
+        comb += d_out.cache_paradox.eq(r1.cache_paradox)
+
 #         -- Outputs to MMU
 #         m_out.done <= r1.mmu_done;
 #         m_out.err <= r1.mmu_error;
 #         m_out.data <= data_out;
-#
+        comb += m_out.done.eq(r1.mmu_done)
+        comb += m_out.err.eq(r1.mmu_error)
+        comb += m_out.data.eq(data_out)
+
 #        -- We have a valid load or store hit or we just completed
 #         -- a slow op such as a load miss, a NC load or a store
 #        --
@@ -1768,49 +1890,86 @@ class MaybePLRUs(Elaboratable):
 #        assert ((r1.slow_valid or r1.stcx_fail) and r1.hit_load_valid)
 #          /= '1' report "unexpected hit_load_delayed collision with
 #          slow_valid" severity FAILURE;
-#
+        # We have a valid load or store hit or we just completed
+        # a slow op such as a load miss, a NC load or a store
+        #
+        # Note: the load hit is delayed by one cycle. However it
+        # can still not collide with r.slow_valid (well unless I
+        # miscalculated) because slow_valid can only be set on a
+        # subsequent request and not on its first cycle (the state
+        # machine must have advanced), which makes slow_valid
+        # at least 2 cycles from the previous hit_load_valid.
+
+        # Sanity: Only one of these must be set in any given cycle
+        assert (r1.slow_valid & r1.stcx_fail) != 1 "unexpected" \
+         "slow_valid collision with stcx_fail -!- severity FAILURE"
+
+        assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1
+         "unexpected hit_load_delayed collision with slow_valid -!-" \
+         "severity FAILURE"
+
 #         if r1.mmu_req = '0' then
+        with m.If(~r1._mmu_req):
 #             -- Request came from loadstore1...
 #             -- Load hit case is the standard path
 #             if r1.hit_load_valid = '1' then
+            # Request came from loadstore1...
+            # Load hit case is the standard path
+            with m.If(r1.hit_load_valid):
 #                 report
 #                  "completing load hit data=" & to_hstring(data_out);
+                print(f"completing load hit data={data_out}")
 #             end if;
-#
+
 #             -- error cases complete without stalling
 #             if r1.ls_error = '1' then
+            # error cases complete without stalling
+            with m.If(r1.ls_error):
 #                 report "completing ld/st with error";
+                print("completing ld/st with error")
 #             end if;
-#
+
 #             -- Slow ops (load miss, NC, stores)
 #             if r1.slow_valid = '1' then
+            # Slow ops (load miss, NC, stores)
+            with m.If(r1.slow_valid):
 #                 report
 #                  "completing store or load miss data="
 #                   & to_hstring(data_out);
+                print(f"completing store or load miss data={data_out}")
 #             end if;
-#
+
 #         else
+        with m.Else():
 #             -- Request came from MMU
 #             if r1.hit_load_valid = '1' then
+            # Request came from MMU
+            with m.If(r1.hit_load_valid):
 #                 report "completing load hit to MMU, data="
 #                  & to_hstring(m_out.data);
+                print(f"completing load hit to MMU, data={m_out.data}")
 #             end if;
 #
 #             -- error cases complete without stalling
 #             if r1.mmu_error = '1' then
 #                 report "completing MMU ld with error";
+            # error cases complete without stalling
+            with m.If(r1.mmu_error):
+                print("combpleting MMU ld with error")
 #             end if;
 #
 #             -- Slow ops (i.e. load miss)
 #             if r1.slow_valid = '1' then
+            # Slow ops (i.e. load miss)
+            with m.If(r1.slow_valid):
 #                 report "completing MMU load miss, data="
 #                  & to_hstring(m_out.data);
+                print("completing MMU load miss, data={m_out.data}")
 #             end if;
 #         end if;
-#
 #     end process;
-#
-#
+
+# begin TODO
 #     -- Generate a cache RAM for each way. This handles the normal
 #     -- reads, writes from reloads and the special store-hit update
 #     -- path as well.
@@ -1847,121 +2006,222 @@ class MaybePLRUs(Elaboratable):
 #              wr_data => wr_data
 #              );
 #      process(all)
+# end TODO
+class TODO(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #      begin
 #          -- Cache hit reads
 #          do_read <= '1';
 #          rd_addr <=
 #            std_ulogic_vector(to_unsigned(early_req_row, ROW_BITS));
 #          cache_out(i) <= dout;
-#
+        # Cache hit reads
+        comb += do_read.eq(1)
+        comb += rd_addr.eq(Signal(ROW))
+        comb += cache_out[i].eq(dout)
+
 #          -- Write mux:
 #          --
 #          -- Defaults to wishbone read responses (cache refill)
 #          --
 #          -- For timing, the mux on wr_data/sel/addr is not
 #           -- dependent on anything other than the current state.
+        # Write mux:
+        #
+        # Defaults to wishbone read responses (cache refill)
+        #
+        # For timing, the mux on wr_data/sel/addr is not
+        # dependent on anything other than the current state.
 #           wr_sel_m <= (others => '0');
-#
+        comb += wr_sel_m.eq(0)
+
 #          do_write <= '0';
+        comb += do_write.eq(0)
 #             if r1.write_bram = '1' then
+        with m.If(r1.write_bram):
 #                 -- Write store data to BRAM.  This happens one
 #                 -- cycle after the store is in r0.
+            # Write store data to BRAM.  This happens one
+            # cycle after the store is in r0.
 #                 wr_data <= r1.req.data;
 #                 wr_sel  <= r1.req.byte_sel;
 #                 wr_addr <= std_ulogic_vector(to_unsigned(
 #                             get_row(r1.req.real_addr), ROW_BITS
 #                            ));
+            comb += wr_data.eq(r1.req.data)
+            comb += wr_sel.eq(r1.req.byte_sel)
+            comb += wr_addr.eq(Signal(get_row(r1.req.real_addr)))
+
 #                 if i = r1.req.hit_way then
+            with m.If(i == r1.req.hit_way):
 #                     do_write <= '1';
+                comb += do_write.eq(1)
 #                 end if;
 #          else
+            with m.Else():
 #              -- Otherwise, we might be doing a reload or a DCBZ
 #                 if r1.dcbz = '1' then
+                # Otherwise, we might be doing a reload or a DCBZ
+                with m.If(r1.dcbz):
 #                     wr_data <= (others => '0');
+                    comb += wr_data.eq(0)
 #                 else
+                with m.Else():
 #                     wr_data <= wishbone_in.dat;
+                    comb += wr_data.eq(wishbone_in.dat)
 #                 end if;
+
 #                 wr_addr <= std_ulogic_vector(to_unsigned(
 #                             r1.store_row, ROW_BITS
 #                            ));
 #                 wr_sel <= (others => '1');
-#
+            comb += wr_addr.eq(Signal(r1.store_row))
+            comb += wr_sel.eq(1)
+
 #                 if r1.state = RELOAD_WAIT_ACK and
 #                 wishbone_in.ack = '1' and replace_way = i then
+            with m.If(r1.state == State.RELOAD_WAIT_ACK & wishbone_in.ack
+                      & relpace_way == i):
 #                     do_write <= '1';
+                comb += do_write.eq(1)
 #                 end if;
 #          end if;
-#
+
 #             -- Mask write selects with do_write since BRAM
 #             -- doesn't have a global write-enable
 #             if do_write = '1' then
+#             -- Mask write selects with do_write since BRAM
+#             -- doesn't have a global write-enable
+            with m.If(do_write):
 #                 wr_sel_m <= wr_sel;
+                comb += wr_sel_m.eq(wr_sel)
 #             end if;
-#
 #         end process;
 #     end generate;
-#
+
 #     -- Cache hit synchronous machine for the easy case.
 #     -- This handles load hits.
 #     -- It also handles error cases (TLB miss, cache paradox)
 #     dcache_fast_hit : process(clk)
+# Cache hit synchronous machine for the easy case.
+# This handles load hits.
+# It also handles error cases (TLB miss, cache paradox)
+class DcacheFastHit(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #     begin
 #         if rising_edge(clk) then
 #             if req_op /= OP_NONE then
+        with m.If(req_op != Op.OP_NONE):
 #              report "op:" & op_t'image(req_op) &
 #                  " addr:" & to_hstring(r0.req.addr) &
 #                  " nc:" & std_ulogic'image(r0.req.nc) &
 #                  " idx:" & integer'image(req_index) &
 #                  " tag:" & to_hstring(req_tag) &
 #                  " way: " & integer'image(req_hit_way);
+            print(f"op:{req_op} addr:{r0.req.addr} nc: {r0.req.nc}" \
+                  f"idx:{req_index} tag:{req_tag} way: {req_hit_way}"
+                 )
 #            end if;
 #             if r0_valid = '1' then
+        with m.If(r0_valid):
 #                 r1.mmu_req <= r0.mmu_req;
+            sync += r1.mmu_req.eq(r0.mmu_req)
 #             end if;
-#
+
 #             -- Fast path for load/store hits.
 #             -- Set signals for the writeback controls.
 #             r1.hit_way <= req_hit_way;
 #             r1.hit_index <= req_index;
+        # Fast path for load/store hits.
+        # Set signals for the writeback controls.
+        sync += r1.hit_way.eq(req_hit_way)
+        sync += r1.hit_index.eq(req_index)
+
 #            if req_op = OP_LOAD_HIT then
+        with m.If(req_op == Op.OP_LOAD_HIT):
 #                r1.hit_load_valid <= '1';
+            sync += r1.hit_load_valid.eq(1)
+
 #            else
+        with m.Else():
 #                r1.hit_load_valid <= '0';
+            sync += r1.hit_load_valid.eq(0)
 #            end if;
+
 #             if req_op = OP_LOAD_HIT or req_op = OP_STORE_HIT then
+        with m.If(req_op == Op.OP_LOAD_HIT | req_op == Op.OP_STORE_HIT):
 #                 r1.cache_hit <= '1';
+            sync += r1.cache_hit.eq(1)
 #             else
+        with m.Else():
 #                 r1.cache_hit <= '0';
+            sync += r1.cache_hit.eq(0)
 #             end if;
-#
+
 #             if req_op = OP_BAD then
+        with m.If(req_op == Op.OP_BAD):
 #                 report "Signalling ld/st error valid_ra=" &
 #                  std_ulogic'image(valid_ra) & " rc_ok=" &
 #                  std_ulogic'image(rc_ok) & " perm_ok=" &
 #                  std_ulogic'image(perm_ok);
+            print(f"Signalling ld/st error valid_ra={valid_ra}"
+                  f"rc_ok={rc_ok} perm_ok={perm_ok}"
+
 #                 r1.ls_error <= not r0.mmu_req;
 #                 r1.mmu_error <= r0.mmu_req;
 #                 r1.cache_paradox <= access_ok;
+            sync += r1.ls_error.eq(~r0.mmu_req)
+            sync += r1.mmu_error.eq(r0.mmu_req)
+            sync += r1.cache_paradox.eq(access_ok)
+
 #             else
+            with m.Else():
 #                 r1.ls_error <= '0';
 #                 r1.mmu_error <= '0';
 #                 r1.cache_paradox <= '0';
+                sync += r1.ls_error.eq(0)
+                sync += r1.mmu_error.eq(0)
+                sync += r1.cache_paradox.eq(0)
 #             end if;
 #
 #             if req_op = OP_STCX_FAIL then
+            with m.If(req_op == Op.OP_STCX_FAIL):
 #                 r1.stcx_fail <= '1';
+                r1.stcx_fail.eq(1)
+
 #             else
+            with m.Else():
 #                 r1.stcx_fail <= '0';
+                sync += r1.stcx_fail.eq(0)
 #             end if;
 #
 #             -- Record TLB hit information for updating TLB PLRU
 #             r1.tlb_hit <= tlb_hit;
 #             r1.tlb_hit_way <= tlb_hit_way;
 #             r1.tlb_hit_index <= tlb_req_index;
-#
+            # Record TLB hit information for updating TLB PLRU
+            sync += r1.tlb_hit.eq(tlb_hit)
+            sync += r1.tlb_hit_way.eq(tlb_hit_way)
+            sync += r1.tlb_hit_index.eq(tlb_req_index)
 #        end if;
 #     end process;
-#
+
 #     -- Memory accesses are handled by this state machine:
 #     --
 #     --   * Cache load miss/reload (in conjunction with "rams")
@@ -1971,57 +2231,128 @@ class MaybePLRUs(Elaboratable):
 #     -- All wishbone requests generation is done here.
 #     -- This machine operates at stage 1.
 #     dcache_slow : process(clk)
+# Memory accesses are handled by this state machine:
+#
+#   * Cache load miss/reload (in conjunction with "rams")
+#   * Load hits for non-cachable forms
+#   * Stores (the collision case is handled in "rams")
+#
+# All wishbone requests generation is done here.
+# This machine operates at stage 1.
+class DcacheSlow(Elaboratable):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #        variable stbs_done : boolean;
 #         variable req       : mem_access_request_t;
 #         variable acks      : unsigned(2 downto 0);
+        stbs_done = Signal()
+        req       = MemAccessRequest()
+        acks      = Signal(3)
+
+        comb += stbs_done
+        comb += req
+        comb += acks
+
 #     begin
 #         if rising_edge(clk) then
 #             r1.use_forward1 <= use_forward1_next;
 #             r1.forward_sel <= (others => '0');
+        sync += r1.use_forward1.eq(use_forward1_next)
+        sync += r1.forward_sel.eq(0)
+
 #             if use_forward1_next = '1' then
+        with m.If(use_forward1_next):
 #                 r1.forward_sel <= r1.req.byte_sel;
-#             elsif use_forward2_next = '1' then
+            sync += r1.forward_sel.eq(r1.req.byte_sel)
+
+#           elsif use_forward2_next = '1' then
+        with m.Elif(use_forward2_next):
 #                 r1.forward_sel <= r1.forward_sel1;
+            sync += r1.forward_sel.eq(r1.forward_sel1)
 #             end if;
-#
+
 #             r1.forward_data2 <= r1.forward_data1;
+        sync += r1.forward_data2.eq(r1.forward_data1)
+
 #             if r1.write_bram = '1' then
+        with m.If(r1.write_bram):
 #                 r1.forward_data1 <= r1.req.data;
 #                 r1.forward_sel1 <= r1.req.byte_sel;
 #                 r1.forward_way1 <= r1.req.hit_way;
 #                 r1.forward_row1 <= get_row(r1.req.real_addr);
 #                 r1.forward_valid1 <= '1';
+            sync += r1.forward_data1.eq(r1.req.data)
+            sync += r1.forward_sel1.eq(r1.req.byte_sel)
+            sync += r1.forward_way1.eq(r1.req.hit_way)
+            sync += r1.forward_row1.eq(get_row(r1.req.real_addr))
+            sync += r1.forward_valid1.eq(1)
 #             else
+        with m.Else():
+
 #                 if r1.dcbz = '1' then
+            with m.If(r1.bcbz):
 #                     r1.forward_data1 <= (others => '0');
+                sync += r1.forward_data1.eq(0)
+
 #                 else
+            with m.Else():
 #                     r1.forward_data1 <= wishbone_in.dat;
+                sync += r1.forward_data1.eq(wb_in.dat)
 #                 end if;
+
 #                 r1.forward_sel1 <= (others => '1');
 #                 r1.forward_way1 <= replace_way;
 #                 r1.forward_row1 <= r1.store_row;
 #                 r1.forward_valid1 <= '0';
+            sync += r1.forward_sel1.eq(1)
+            sync += r1.forward_way1.eq(replace_way)
+            sync += r1.forward_row1.eq(r1.store_row)
+            sync += r1.forward_valid1.eq(0)
 #             end if;
-#
+
 #          -- On reset, clear all valid bits to force misses
 #             if rst = '1' then
+        # On reset, clear all valid bits to force misses
+        # TODO figure out how reset signal works in nmigeni
+        with m.If("""TODO RST???"""):
 #              for i in index_t loop
+            for i in range(INDEX):
 #                  cache_valids(i) <= (others => '0');
+                sync += cache_valid_bits[i].eq(0)
 #              end loop;
+
 #                 r1.state <= IDLE;
 #                 r1.full <= '0';
-#              r1.slow_valid <= '0';
+#                r1.slow_valid <= '0';
 #                 r1.wb.cyc <= '0';
 #                 r1.wb.stb <= '0';
 #                 r1.ls_valid <= '0';
 #                 r1.mmu_done <= '0';
-#
+            sync += r1.state.eq(State.IDLE)
+            sync += r1.full.eq(0)
+            sync += r1.slow_valid.eq(0)
+            sync += r1.wb.cyc.eq(0)
+            sync += r1.wb.stb.eq(0)
+            sync += r1.ls_valid.eq(0)
+            sync += r1.mmu_done.eq(0)
+
 #              -- Not useful normally but helps avoiding
 #               -- tons of sim warnings
+        # Not useful normally but helps avoiding
+        # tons of sim warnings
 #              r1.wb.adr <= (others => '0');
+            sync += r1.wb.adr.eq(0)
 #             else
-#              -- One cycle pulses reset
-#              r1.slow_valid <= '0';
+        with m.Else():
+#                -- One cycle pulses reset
+#                r1.slow_valid <= '0';
 #                 r1.write_bram <= '0';
 #                 r1.inc_acks <= '0';
 #                 r1.dec_acks <= '0';
@@ -2029,71 +2360,139 @@ class MaybePLRUs(Elaboratable):
 #                 r1.ls_valid <= '0';
 #                 -- complete tlbies and TLB loads in the third cycle
 #                 r1.mmu_done <= r0_valid and (r0.tlbie or r0.tlbld);
+            # One cycle pulses reset
+            sync += r1.slow_valid.eq(0)
+            sync += r1.write_bram.eq(0)
+            sync += r1.inc_acks.eq(0)
+            sync += r1.dec_acks.eq(0)
+
+            sync += r1.ls_valid.eq(0)
+            # complete tlbies and TLB loads in the third cycle
+            sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
+
 #                 if req_op = OP_LOAD_HIT or req_op = OP_STCX_FAIL then
+            with m.If(req_op == Op.OP_LOAD_HIT | req_op == Op.OP_STCX_FAIL)
 #                     if r0.mmu_req = '0' then
+                with m.If(~r0.mmu_req):
 #                         r1.ls_valid <= '1';
+                    sync += r1.ls_valid.eq(1)
 #                     else
+                with m.Else():
 #                         r1.mmu_done <= '1';
+                    sync += r1.mmu_done.eq(1)
 #                     end if;
 #                 end if;
-#
+
 #                 if r1.write_tag = '1' then
+            with m.If(r1.write_tag):
 #                     -- Store new tag in selected way
 #                     for i in 0 to NUM_WAYS-1 loop
+                # Store new tag in selected way
+                for i in range(NUM_WAYS):
 #                         if i = replace_way then
+                    with m.If(i == replace_way):
 #                             cache_tags(r1.store_index)(
 #                              (i + 1) * TAG_WIDTH - 1
 #                              downto i * TAG_WIDTH
 #                             ) <=
 #                              (TAG_WIDTH - 1 downto TAG_BITS => '0')
 #                              & r1.reload_tag;
+                        sync += cache_tag[
+                                 r1.store_index
+                                ][i * TAG_WIDTH:(i +1) * TAG_WIDTH].eq(
+                                 Const(TAG_WIDTH, TAG_WIDTH)
+                                 & r1.reload_tag
+                                )
 #                         end if;
 #                     end loop;
 #                     r1.store_way <= replace_way;
 #                     r1.write_tag <= '0';
+                sync += r1.store_way.eq(replace_way)
+                sync += r1.write_tag.eq(0)
 #                 end if;
-#
+
 #                 -- Take request from r1.req if there is one there,
 #                 -- else from req_op, ra, etc.
 #                 if r1.full = '1' then
+            # Take request from r1.req if there is one there,
+            # else from req_op, ra, etc.
+            with m.If(r1.full)
 #                     req := r1.req;
+                sync += req.eq(r1.req)
+
 #                 else
+            with m.Else():
 #                     req.op := req_op;
 #                     req.valid := req_go;
 #                     req.mmu_req := r0.mmu_req;
 #                     req.dcbz := r0.req.dcbz;
 #                     req.real_addr := ra;
+                sync += req.op.eq(req_op)
+                sync += req.valid.eq(req_go)
+                sync += req.mmu_req.eq(r0.mmu_req)
+                sync += req.dcbz.eq(r0.req.dcbz)
+                sync += req.real_addr.eq(ra)
+
 #                     -- Force data to 0 for dcbz
 #                     if r0.req.dcbz = '0' then
+                with m.If(~r0.req.dcbz):
 #                         req.data := r0.req.data;
+                    sync += req.data.eq(r0.req.data)
+
 #                     else
+                with m.Else():
 #                         req.data := (others => '0');
+                    sync += req.data.eq(0)
 #                     end if;
+
 #                     -- Select all bytes for dcbz
 #                     -- and for cacheable loads
 #                     if r0.req.dcbz = '1'
 #                      or (r0.req.load = '1' and r0.req.nc = '0') then
+                # Select all bytes for dcbz
+                # and for cacheable loads
+                with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc):
 #                         req.byte_sel := (others => '1');
+                    sync += req.byte_sel.eq(1)
+
 #                     else
+                with m.Else():
 #                         req.byte_sel := r0.req.byte_sel;
+                    sync += req.byte_sel.eq(r0.req.byte_sel)
 #                     end if;
+
 #                     req.hit_way := req_hit_way;
 #                     req.same_tag := req_same_tag;
-#
+                sync += req.hit_way.eq(req_hit_way)
+                sync += req.same_tag.eq(req_same_tag)
+
 #                     -- Store the incoming request from r0,
 #                     -- if it is a slow request
 #                     -- Note that r1.full = 1 implies req_op = OP_NONE
 #                     if req_op = OP_LOAD_MISS or req_op = OP_LOAD_NC
 #                      or req_op = OP_STORE_MISS
 #                      or req_op = OP_STORE_HIT then
+                # Store the incoming request from r0,
+                # if it is a slow request
+                # Note that r1.full = 1 implies req_op = OP_NONE
+                with m.If(req_op == Op.OP_LOAD_MISS
+                          | req_op == Op.OP_LOAD_NC
+                          | req_op == Op.OP_STORE_MISS
+                          | req_op == Op.OP_STORE_HIT):
 #                         r1.req <= req;
 #                         r1.full <= '1';
+                    sync += r1.req(req)
+                    sync += r1.full.eq(1)
 #                     end if;
 #                 end if;
 #
 #              -- Main state machine
 #              case r1.state is
+            # Main state machine
+            with m.Switch(r1.state):
+
 #                 when IDLE =>
+                with m.Case(State.IDLE)
 #                     r1.wb.adr <= req.real_addr(r1.wb.adr'left downto 0);
 #                     r1.wb.sel <= req.byte_sel;
 #                     r1.wb.dat <= req.data;
@@ -2107,103 +2506,207 @@ class MaybePLRUs(Elaboratable):
 #                      get_row_of_line(get_row(req.real_addr)) - 1;
 #                     r1.reload_tag <= get_tag(req.real_addr);
 #                     r1.req.same_tag <= '1';
-#
-#                     if req.op = OP_STORE_HIT then
+                    sync += r1.wb.adr.eq(req.real_addr[0:r1.wb.adr])
+                    sync += r1.wb.sel.eq(req.byte_sel)
+                    sync += r1.wb.dat.eq(req.data)
+                    sync += r1.dcbz.eq(req.dcbz)
+
+                    # Keep track of our index and way
+                    # for subsequent stores.
+                    sync += r1.store_index.eq(get_index(req.real_addr))
+                    sync += r1.store_row.eq(get_row(req.real_addr))
+                    sync += r1.end_row_ix.eq(
+                             get_row_of_line(get_row(req.real_addr))
+                            )
+                    sync += r1.reload_tag.eq(get_tag(req.real_addr))
+                    sync += r1.req.same_tag.eq(1)
+
+#                     if req.op = OP_STORE_HIT theni
+                    with m.If(req.op == Op.OP_STORE_HIT):
 #                         r1.store_way <= req.hit_way;
+                        sync += r1.store_way.eq(req.hit_way)
 #                     end if;
-#
+
 #                     -- Reset per-row valid bits,
 #                     -- ready for handling OP_LOAD_MISS
 #                     for i in 0 to ROW_PER_LINE - 1 loop
+                    # Reset per-row valid bits,
+                    # ready for handling OP_LOAD_MISS
+                    for i in range(ROW_PER_LINE):
 #                         r1.rows_valid(i) <= '0';
+                        sync += r1.rows_valid[i].eq(0)
 #                     end loop;
-#
+
 #                     case req.op is
+                    with m.Switch(req.op):
 #                     when OP_LOAD_HIT =>
+                        with m.Case(Op.OP_LOAD_HIT):
 #                         -- stay in IDLE state
-#
+                            # stay in IDLE state
+                            pass
+
 #                     when OP_LOAD_MISS =>
+                        with m.Case(Op.OP_LOAD_MISS):
 #                      -- Normal load cache miss,
 #                       -- start the reload machine
 #                      report "cache miss real addr:" &
 #                        to_hstring(req.real_addr) & " idx:" &
 #                        integer'image(get_index(req.real_addr)) &
 #                       " tag:" & to_hstring(get_tag(req.real_addr));
-#
+                            # Normal load cache miss,
+                            # start the reload machine
+                            print(f"cache miss real addr:{req_real_addr}" \
+                                  f" idx:{get_index(req_real_addr)}" \
+                                  f" tag:{get_tag(req.real_addr)}")
+
 #                      -- Start the wishbone cycle
 #                      r1.wb.we  <= '0';
 #                      r1.wb.cyc <= '1';
 #                      r1.wb.stb <= '1';
-#
+                            # Start the wishbone cycle
+                            sync += r1.wb.we.eq(0)
+                            sync += r1.wb.cyc.eq(1)
+                            sync += r1.wb.stb.eq(1)
+
 #                      -- Track that we had one request sent
 #                      r1.state <= RELOAD_WAIT_ACK;
-#                         r1.write_tag <= '1';
-#
+#                       r1.write_tag <= '1';
+                            # Track that we had one request sent
+                            sync += r1.state.eq(State.RELOAD_WAIT_ACK)
+                            sync += r1.write_tag.eq(1)
+
 #                  when OP_LOAD_NC =>
-#                         r1.wb.cyc <= '1';
-#                         r1.wb.stb <= '1';
+                        with m.Case(Op.OP_LOAD_NC):
+#                       r1.wb.cyc <= '1';
+#                       r1.wb.stb <= '1';
 #                      r1.wb.we <= '0';
 #                      r1.state <= NC_LOAD_WAIT_ACK;
-#
+                            sync += r1.wb.cyc.eq(1)
+                            sync += r1.wb.stb.eq(1)
+                            sync += r1.wb.we.eq(0)
+                            sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
+
 #                     when OP_STORE_HIT | OP_STORE_MISS =>
+                        with m.Case(Op.OP_STORE_HIT | Op.OP_STORE_MISS):
 #                         if req.dcbz = '0' then
+                            with m.If(~req.bcbz):
 #                             r1.state <= STORE_WAIT_ACK;
 #                             r1.acks_pending <= to_unsigned(1, 3);
 #                             r1.full <= '0';
 #                             r1.slow_valid <= '1';
+                                sync += r1.state.eq(State.STORE_WAIT_ACK)
+                                sync += r1.acks_pending.eq(
+                                         '''TODO to_unsignes(1,3)'''
+                                        )
+                                sync += r1.full.eq(0)
+                                sync += r1.slow_valid.eq(1)
+
 #                             if req.mmu_req = '0' then
+                                with m.If(~req.mmu_req):
 #                                 r1.ls_valid <= '1';
+                                    sync += r1.ls_valid.eq(1)
 #                             else
+                                with m.Else():
 #                                 r1.mmu_done <= '1';
+                                    sync += r1.mmu_done.eq(1)
 #                             end if;
+
 #                             if req.op = OP_STORE_HIT then
+                                with m.If(req.op == Op.OP_STORE_HIT):
 #                                 r1.write_bram <= '1';
+                                    sync += r1.write_bram.eq(1)
 #                             end if;
+
 #                         else
+                            with m.Else():
 #                             -- dcbz is handled much like a load
 #                             -- miss except that we are writing
 #                             -- to memory instead of reading
 #                             r1.state <= RELOAD_WAIT_ACK;
+                                # dcbz is handled much like a load
+                                # miss except that we are writing
+                                # to memory instead of reading
+                                sync += r1.state.eq(Op.RELOAD_WAIT_ACK)
+
 #                             if req.op = OP_STORE_MISS then
+                                with m.If(req.op == Op.OP_STORE_MISS):
 #                                 r1.write_tag <= '1';
+                                    sync += r1.write_tag.eq(1)
 #                             end if;
 #                         end if;
+
 #                         r1.wb.we <= '1';
 #                         r1.wb.cyc <= '1';
 #                         r1.wb.stb <= '1';
-#
+                            sync += r1.wb.we.eq(1)
+                            sync += r1.wb.cyc.eq(1)
+                            sync += r1.wb.stb.eq(1)
+
 #                  -- OP_NONE and OP_BAD do nothing
 #                   -- OP_BAD & OP_STCX_FAIL were handled above already
 #                  when OP_NONE =>
 #                     when OP_BAD =>
 #                     when OP_STCX_FAIL =>
+                        # OP_NONE and OP_BAD do nothing
+                        # OP_BAD & OP_STCX_FAIL were handled above already
+                        with m.Case(Op.OP_NONE):
+                            pass
+
+                        with m.Case(OP_BAD):
+                            pass
+
+                        with m.Case(OP_STCX_FAIL):
+                            pass
 #                  end case;
-#
+
 #                 when RELOAD_WAIT_ACK =>
+                    with m.Case(State.RELOAD_WAIT_ACK):
 #                     -- Requests are all sent if stb is 0
+                        # Requests are all sent if stb is 0
+                        sync += stbs_done.eq(~r1.wb.stb)
 #                  stbs_done := r1.wb.stb = '0';
-#
+
 #                  -- If we are still sending requests,
 #                   -- was one accepted?
 #                  if wishbone_in.stall = '0' and not stbs_done then
+                        # If we are still sending requests,
+                        # was one accepted?
+                        with m.If(~wb_in.stall & ~stbs_done):
 #                      -- That was the last word ? We are done sending.
 #                       -- Clear stb and set stbs_done so we can handle
 #                       -- an eventual last ack on the same cycle.
 #                      if is_last_row_addr(r1.wb.adr, r1.end_row_ix) then
+                            # That was the last word ? We are done sending.
+                            # Clear stb and set stbs_done so we can handle
+                            # an eventual last ack on the same cycle.
+                            with m.If(is_last_row_addr(
+                                      r1.wb.adr, r1.end_row_ix)):
 #                          r1.wb.stb <= '0';
 #                          stbs_done := true;
+                                sync += r1.wb.stb.eq(0)
+                                sync += stbs_done.eq(0)
 #                      end if;
-#
+
 #                      -- Calculate the next row address
 #                      r1.wb.adr <= next_row_addr(r1.wb.adr);
+                            # Calculate the next row address
+                            sync += r1.wb.adr.eq(next_row_addr(r1.wb.adr))
 #                  end if;
-#
+
 #                  -- Incoming acks processing
 #                     r1.forward_valid1 <= wishbone_in.ack;
+                        # Incoming acks processing
+                        sync += r1.forward_valid1.eq(wb_in.ack)
+
 #                  if wishbone_in.ack = '1' then
+                        with m.If(wb_in.ack):
 #                         r1.rows_valid(
 #                          r1.store_row mod ROW_PER_LINE
 #                         ) <= '1';
+                            sync += r1.rows_valid[
+                                     r1.store_row % ROW_PER_LINE
+                                    ].eq(1)
+
 #                         -- If this is the data we were looking for,
 #                         -- we can complete the request next cycle.
 #                         -- Compare the whole address in case the
@@ -2213,51 +2716,105 @@ class MaybePLRUs(Elaboratable):
 #                        and ((r1.dcbz = '1' and r1.req.dcbz = '1')
 #                        or (r1.dcbz = '0' and r1.req.op = OP_LOAD_MISS))
 #                        and r1.store_row = get_row(r1.req.real_addr) then
+                            # If this is the data we were looking for,
+                            # we can complete the request next cycle.
+                            # Compare the whole address in case the
+                            # request in r1.req is not the one that
+                            # started this refill.
+                            with m.If(r1.full & r1.req.same_tag &
+                                      ((r1.dcbz & r1.req.dcbz)
+                                       (~r1.dcbz &
+                                        r1.req.op == Op.OP_LOAD_MISS)
+                                       ) &
+                                       r1.store_row
+                                       == get_row(r1.req.real_addr):
 #                             r1.full <= '0';
 #                             r1.slow_valid <= '1';
+                                sync += r1.full.eq(0)
+                                sync += r1.slow_valid.eq(1)
+
 #                             if r1.mmu_req = '0' then
+                                    with m.If(~r1.mmu_req):
 #                                 r1.ls_valid <= '1';
+                                        sync += r1.ls_valid.eq(1)
 #                             else
+                                    with m.Else():
 #                                 r1.mmu_done <= '1';
+                                        sync += r1.mmu_done.eq(1)
 #                             end if;
 #                             r1.forward_sel <= (others => '1');
 #                             r1.use_forward1 <= '1';
+                                sync += r1.forward_sel.eq(1)
+                                sync += r1.use_forward1.eq(1)
 #                      end if;
-#
+
 #                      -- Check for completion
 #                      if stbs_done and is_last_row(r1.store_row,
 #                        r1.end_row_ix) then
+                            # Check for completion
+                            with m.If(stbs_done &
+                                      is_last_row(r1.store_row,
+                                      r1.end_row_ix)):
+
 #                          -- Complete wishbone cycle
 #                          r1.wb.cyc <= '0';
-#
+                                # Complete wishbone cycle
+                                sync += r1.wb.cyc.eq(0)
+
 #                          -- Cache line is now valid
 #                          cache_valids(r1.store_index)(
 #                            r1.store_way
 #                           ) <= '1';
-#
+                                # Cache line is now valid
+                                sync += cache_valid_bits[
+                                         r1.store_index
+                                        ][r1.store_way].eq(1)
+
 #                           r1.state <= IDLE;
+                                sync += r1.state.eq(State.IDLE)
 #                      end if;
-#
+
 #                      -- Increment store row counter
 #                      r1.store_row <= next_row(r1.store_row);
+                            # Increment store row counter
+                            sync += r1.store_row.eq(next_row(r1.store_row))
 #                  end if;
-#
+
 #                 when STORE_WAIT_ACK =>
-#                  stbs_done := r1.wb.stb = '0';
+                    with m.Case(State.STORE_WAIT_ACK):
+#                     stbs_done := r1.wb.stb = '0';
 #                     acks := r1.acks_pending;
+                        sync += stbs_done.eq(~r1.wb.stb)
+                        sync += acks.eq(r1.acks_pending)
+
 #                     if r1.inc_acks /= r1.dec_acks then
+                        with m.If(r1.inc_acks != r1.dec_acks):
+
 #                         if r1.inc_acks = '1' then
+                            with m.If(r1.inc_acks):
 #                             acks := acks + 1;
+                                sync += acks.eq(acks + 1)
+
 #                         else
+                            with m.Else():
 #                             acks := acks - 1;
+                                sync += acks.eq(acks - 1)
 #                         end if;
 #                     end if;
+
 #                     r1.acks_pending <= acks;
+                        sync += r1.acks_pending.eq(acks)
+
 #                    -- Clear stb when slave accepted request
 #                     if wishbone_in.stall = '0' then
+                        # Clear stb when slave accepted request
+                        with m.If(~wb_in.stall):
 #                         -- See if there is another store waiting
 #                         -- to be done which is in the same real page.
 #                         if req.valid = '1' then
+                            # See if there is another store waiting
+                            # to be done which is in the same real page.
+                            with m.If(req.valid):
 #                             r1.wb.adr(
 #                              SET_SIZE_BITS - 1 downto 0
 #                             ) <= req.real_addr(
@@ -2265,65 +2822,129 @@ class MaybePLRUs(Elaboratable):
 #                             );
 #                             r1.wb.dat <= req.data;
 #                             r1.wb.sel <= req.byte_sel;
+                                sync += r1.wb.adr[0:SET_SIZE_BITS].eq(
+                                         req.real_addr[0:SET_SIZE_BITS]
+                                        )
 #                         end if;
+
 #                         if acks < 7 and req.same_tag = '1'
 #                          and (req.op = OP_STORE_MISS
 #                          or req.op = OP_STORE_HIT) then
+                            with m.Elif(acks < 7 & req.same_tag &
+                                        (req.op == Op.Op_STORE_MISS
+                                         | req.op == Op.OP_SOTRE_HIT)):
 #                             r1.wb.stb <= '1';
 #                             stbs_done := false;
+                                sync += r1.wb.stb.eq(1)
+                                sync += stbs_done.eq(0)
+
 #                             if req.op = OP_STORE_HIT then
+                                with m.If(req.op == Op.OP_STORE_HIT):
 #                                 r1.write_bram <= '1';
+                                    sync += r1.write_bram.eq(1)
 #                             end if;
 #                             r1.full <= '0';
 #                             r1.slow_valid <= '1';
+                                sync += r1.full.eq(0)
+                                sync += r1.slow_valid.eq(1)
+
 #                             -- Store requests never come from the MMU
 #                             r1.ls_valid <= '1';
 #                             stbs_done := false;
 #                             r1.inc_acks <= '1';
+                                # Store request never come from the MMU
+                                sync += r1.ls_valid.eq(1)
+                                sync += stbs_done.eq(0)
+                                sync += r1.inc_acks.eq(1)
 #                         else
+                            with m.Else():
 #                             r1.wb.stb <= '0';
 #                             stbs_done := true;
+                                sync += r1.wb.stb.eq(0)
+                                sync += stbs_done.eq(1)
 #                         end if;
 #                  end if;
-#
+
 #                  -- Got ack ? See if complete.
 #                  if wishbone_in.ack = '1' then
+                        # Got ack ? See if complete.
+                        with m.If(wb_in.ack):
 #                         if stbs_done and acks = 1 then
+                            with m.If(stbs_done & acks)
 #                             r1.state <= IDLE;
 #                             r1.wb.cyc <= '0';
 #                             r1.wb.stb <= '0';
+                                sync += r1.state.eq(State.IDLE)
+                                sync += r1.wb.cyc.eq(0)
+                                sync += r1.wb.stb.eq(0)
 #                         end if;
 #                         r1.dec_acks <= '1';
+                            sync += r1.dec_acks.eq(1)
 #                  end if;
-#
+
 #                 when NC_LOAD_WAIT_ACK =>
+                    with m.Case(State.NC_LOAD_WAIT_ACK):
 #                  -- Clear stb when slave accepted request
 #                     if wishbone_in.stall = '0' then
+                        # Clear stb when slave accepted request
+                        with m.If(~wb_in.stall):
 #                      r1.wb.stb <= '0';
+                            sync += r1.wb.stb.eq(0)
 #                  end if;
-#
+
 #                  -- Got ack ? complete.
 #                  if wishbone_in.ack = '1' then
+                        # Got ack ? complete.
+                        with m.If(wb_in.ack):
 #                         r1.state <= IDLE;
 #                         r1.full <= '0';
-#                      r1.slow_valid <= '1';
+#                        r1.slow_valid <= '1';
+                            sync += r1.state.eq(State.IDLE)
+                            sync += r1.full.eq(0)
+                            sync += r1.slow_valid.eq(1)
+
 #                         if r1.mmu_req = '0' then
+                            with m.If(~r1.mmu_req):
 #                             r1.ls_valid <= '1';
+                                sync += r1.ls_valid.eq(1)
+
 #                         else
+                            with m.Else():
 #                             r1.mmu_done <= '1';
+                                sync += r1.mmu_done.eq(1)
 #                         end if;
+
 #                         r1.forward_sel <= (others => '1');
 #                         r1.use_forward1 <= '1';
-#                      r1.wb.cyc <= '0';
-#                      r1.wb.stb <= '0';
+#                        r1.wb.cyc <= '0';
+#                        r1.wb.stb <= '0';
+                            sync += r1.forward_sel.eq(1)
+                            sync += r1.use_forward1.eq(1)
+                            sync += r1.wb.cyc.eq(0)
+                            sync += r1.wb.stb.eq(0)
 #                  end if;
 #                 end case;
 #          end if;
 #      end if;
 #     end process;
-#
+
 #     dc_log: if LOG_LENGTH > 0 generate
+# TODO learn how to tranlate vhdl generate into nmigen
+class DcacheLog(Elaborate):
+    def __init__(self):
+        pass
+
+    def elaborate(self, platform):
+        m = Module()
+
+        comb = m.d.comb
+        sync = m.d.sync
+
 #         signal log_data : std_ulogic_vector(19 downto 0);
+        log_data = Signal(20)
+
+        comb += log_data
+
 #     begin
 #         dcache_log: process(clk)
 #         begin
@@ -2342,8 +2963,16 @@ class MaybePLRUs(Elaboratable):
 #                             valid_ra &
 #                             std_ulogic_vector(
 #                              to_unsigned(state_t'pos(r1.state), 3));
+        sync += log_data.eq(Cat(
+                 Const(r1.state, 3), valid_ra, Const(tlb_hit_way, 3),
+                 stall_out, Const(req_op, 3), d_out.valid, d_out.error,
+                 r1.wb.cyc, r1.wb.stb, wb_in.ack, wb_in.stall,
+                 r1.wb.adr[3:6]
+                ))
 #             end if;
 #         end process;
 #         log_out <= log_data;
+    # TODO ??? I am very confused need help
+    comb += log_out.eq(log_data)
 #     end generate;
 # end;