busy_o/1 most likely to be x_busy_o
go_die_i/1 rst?
addr.data/48 x_addr_i (x_addr_i[:4] goes into LenExpand)
- addr.ok/1 probably x_valid_i & ~x_stall_i
+ addr.ok/1 probably x_i_valid & ~x_stall_i
addr_ok_o/1 no equivalent. *might* work using x_stall_i
- addr_exc_o/2(?) m_load_err_o and m_store_err_o
+ exc_o/6(?) m_load_err_o and m_store_err_o
ld.data/64 m_ld_data_o
ld.ok/1 probably implicit, when x_busy drops low
from nmigen import Elaboratable, Module, Signal
from nmutil.latch import SRLatch
+from nmutil.util import rising_edge
+
class Pi2LSUI(PortInterfaceBase):
if lsui is None:
lsui = LoadStoreUnitInterface(addr_wid, self.addrbits, data_wid)
self.lsui = lsui
+ self.lsui_busy = Signal()
self.valid_l = SRLatch(False, name="valid")
- def set_wr_addr(self, m, addr, mask):
+ def set_wr_addr(self, m, addr, mask, misalign, msr_pr):
m.d.comb += self.valid_l.s.eq(1)
m.d.comb += self.lsui.x_mask_i.eq(mask)
m.d.comb += self.lsui.x_addr_i.eq(addr)
- def set_rd_addr(self, m, addr, mask):
+ def set_rd_addr(self, m, addr, mask, misalign, msr_pr):
m.d.comb += self.valid_l.s.eq(1)
m.d.comb += self.lsui.x_mask_i.eq(mask)
m.d.comb += self.lsui.x_addr_i.eq(addr)
def set_wr_data(self, m, data, wen): # mask already done in addr setup
m.d.comb += self.lsui.x_st_data_i.eq(data)
- return ~self.lsui.x_busy_o
+ return (~(self.lsui.x_busy_o | self.lsui_busy))
def get_rd_data(self, m):
- return self.lsui.m_ld_data_o, ~self.lsui.x_busy_o
+ return self.lsui.m_ld_data_o, ~self.lsui_busy
def elaborate(self, platform):
m = super().elaborate(platform)
m.d.comb += lsui.x_ld_i.eq(pi.is_ld_i)
m.d.comb += lsui.x_st_i.eq(pi.is_st_i)
- # indicate valid at both ends
- m.d.comb += self.lsui.m_valid_i.eq(self.valid_l.q)
- m.d.comb += self.lsui.x_valid_i.eq(self.valid_l.q)
-
- # reset the valid latch when not busy
- m.d.comb += self.valid_l.r.eq(~pi.busy_o) # self.lsui.x_busy_o)
+ # ooo how annoying. x_busy_o is set synchronously, i.e. one
+ # clock too late for this converter to "notice". consequently,
+ # when trying to wait for ld/st, here: on the first cycle
+ # it goes "oh, x_busy_o isn't set, the ld/st must have been
+ # completed already, we must be done" when in fact it hasn't
+ # started. to "fix" that we actually have to have a full FSM
+ # tracking from when LD/ST starts, right the way through. sigh.
+ # first clock busy signal. needed because x_busy_o is sync
+ with m.FSM() as fsm:
+ with m.State("IDLE"):
+ # detect when ld/st starts. set busy *immediately*
+ with m.If((pi.is_ld_i | pi.is_st_i) & self.valid_l.q):
+ m.d.comb += self.lsui_busy.eq(1)
+ m.next = "BUSY"
+ with m.State("BUSY"):
+ # detect when busy drops: must then wait for ld/st to end..
+ #m.d.comb += self.lsui_busy.eq(self.lsui.x_busy_o)
+ m.d.comb += self.lsui_busy.eq(1)
+ with m.If(~self.lsui.x_busy_o):
+ m.next = "WAITDEASSERT"
+ with m.State("WAITDEASSERT"):
+ # when no longer busy: back to start
+ with m.If(~pi.is_st_i & ~pi.busy_o):
+ m.next = "IDLE"
+
+ # indicate valid at both ends. OR with lsui_busy (stops comb loop)
+ m.d.comb += self.lsui.m_i_valid.eq(self.valid_l.q )
+ m.d.comb += self.lsui.x_i_valid.eq(self.valid_l.q )
+
+ # reset the valid latch when not busy. sync to stop loop
+ lsui_active = Signal()
+ m.d.comb += lsui_active.eq(~self.lsui.x_busy_o)
+ m.d.comb += self.valid_l.r.eq(rising_edge(m, lsui_active))
return m
# expand the LSBs of address plus LD/ST len into 16-bit mask
m.d.comb += lsui.x_mask_i.eq(lenexp.lexp_o)
# pass through the address, indicate "valid"
- m.d.comb += lsui.x_valid_i.eq(1)
+ m.d.comb += lsui.x_i_valid.eq(1)
# indicate "OK" - XXX should be checking address valid
m.d.comb += pi.addr_ok_o.eq(1)