self.lsui_busy = Signal()
self.valid_l = SRLatch(False, name="valid")
- def set_wr_addr(self, m, addr, mask):
+ def set_wr_addr(self, m, addr, mask, misalign):
m.d.comb += self.valid_l.s.eq(1)
m.d.comb += self.lsui.x_mask_i.eq(mask)
m.d.comb += self.lsui.x_addr_i.eq(addr)
- def set_rd_addr(self, m, addr, mask):
+ def set_rd_addr(self, m, addr, mask, misalign):
m.d.comb += self.valid_l.s.eq(1)
m.d.comb += self.lsui.x_mask_i.eq(mask)
m.d.comb += self.lsui.x_addr_i.eq(addr)
def connect_port(self, inport):
return self.pi.connect_port(inport)
- def set_wr_addr(self, m, addr, mask): pass
- def set_rd_addr(self, m, addr, mask): pass
+ def set_wr_addr(self, m, addr, mask, misalign): pass
+ def set_rd_addr(self, m, addr, mask, misalign): pass
def set_wr_data(self, m, data, wen): pass
def get_rd_data(self, m): pass
sync += busy_delay.eq(pi.busy_o)
comb += busy_edge.eq(pi.busy_o & ~busy_delay)
+ # misalignment detection: bits at end of lenexpand are set.
+ # when using the L0CacheBuffer "data expander" which splits requests
+ # into *two* PortInterfaces, this acts as a "safety check".
+ misalign = Signal()
+ comb += misalign.eq(lenexp.lexp_o[8:].bool())
+
# activate mode: only on "edge"
comb += ld_active.s.eq(rising_edge(m, lds)) # activate LD mode
comb += st_active.s.eq(rising_edge(m, sts)) # activate ST mode
comb += lenexp.len_i.eq(pi.data_len)
comb += lenexp.addr_i.eq(lsbaddr)
with m.If(pi.addr.ok & adrok_l.qn):
- self.set_rd_addr(m, pi.addr.data, lenexp.lexp_o)
+ self.set_rd_addr(m, pi.addr.data, lenexp.lexp_o, misalign)
comb += pi.addr_ok_o.eq(1) # acknowledge addr ok
sync += adrok_l.s.eq(1) # and pull "ack" latch
comb += lenexp.len_i.eq(pi.data_len)
comb += lenexp.addr_i.eq(lsbaddr)
with m.If(pi.addr.ok):
- self.set_wr_addr(m, pi.addr.data, lenexp.lexp_o)
+ self.set_wr_addr(m, pi.addr.data, lenexp.lexp_o, misalign)
with m.If(adrok_l.qn):
comb += pi.addr_ok_o.eq(1) # acknowledge addr ok
sync += adrok_l.s.eq(1) # and pull "ack" latch
# hard-code memory addressing width to 6 bits
self.mem = TestMemory(regwid, 5, granularity=regwid//8, init=False)
- def set_wr_addr(self, m, addr, mask):
+ def set_wr_addr(self, m, addr, mask, misalign):
lsbaddr, msbaddr = self.splitaddr(addr)
m.d.comb += self.mem.wrport.addr.eq(msbaddr)
- def set_rd_addr(self, m, addr, mask):
+ def set_rd_addr(self, m, addr, mask, misalign):
lsbaddr, msbaddr = self.splitaddr(addr)
m.d.comb += self.mem.rdport.addr.eq(msbaddr)
super().__init__(regwid, addrwid)
self.ldst = LDSTSplitter(32, 48, 4)
- def set_wr_addr(self, m, addr, mask):
+ def set_wr_addr(self, m, addr, mask, misalign):
m.d.comb += self.ldst.addr_i.eq(addr)
- def set_rd_addr(self, m, addr, mask):
+ def set_rd_addr(self, m, addr, mask, misalign):
m.d.comb += self.ldst.addr_i.eq(addr)
def set_wr_data(self, m, data, wen):
yield from super().ports()
# TODO: memory ports
+
def test_cache_single_run(dut):
#test single byte
addr = 0
self.mmu = mmu
self.dcache = dcache
- def set_wr_addr(self, m, addr, mask):
+ def set_wr_addr(self, m, addr, mask, misalign):
m.d.comb += self.dcache.d_in.addr.eq(addr)
m.d.comb += self.mmu.l_in.addr.eq(addr)
m.d.comb += self.mmu.l_in.load.eq(0)
m.d.comb += self.mmu.l_in.priv.eq(1)
m.d.comb += self.mmu.l_in.valid.eq(1)
- def set_rd_addr(self, m, addr, mask):
+ def set_rd_addr(self, m, addr, mask, misalign):
m.d.comb += self.dcache.d_in.addr.eq(addr)
m.d.comb += self.mmu.l_in.addr.eq(addr)
m.d.comb += self.mmu.l_in.load.eq(1)
#self.nia = Signal(64)
#self.srr1 = Signal(16)
- def set_wr_addr(self, m, addr, mask):
+ def set_wr_addr(self, m, addr, mask, misalign):
m.d.comb += self.load.eq(0) # store operation
m.d.comb += self.d_in.load.eq(0)
m.d.comb += self.nc.eq(1)
return None
- def set_rd_addr(self, m, addr, mask):
+ def set_rd_addr(self, m, addr, mask, misalign):
m.d.comb += self.d_valid.eq(1)
m.d.comb += self.d_in.valid.eq(self.d_validblip)
m.d.comb += self.load.eq(1) # load operation