"""LoadStore1 FSM.
-based on microwatt loadstore1.vhdl
+based on microwatt loadstore1.vhdl, but conforming to PortInterface.
+unlike loadstore1.vhdl this does *not* deal with actual Load/Store
+ops: that job is handled by LDSTCompUnit, which talks to LoadStore1
+by way of PortInterface. PortInterface is where things need extending,
+such as adding dcbz support, etc.
+
+this module basically handles "pure" load / store operations, and
+its first job is to ask the D-Cache for the data. if that fails,
+the second task (if virtual memory is enabled) is to ask the MMU
+to perform a TLB, then to go *back* to the cache and ask again.
Links:
ACK_WAIT = 1 # waiting for ack from dcache
MMU_LOOKUP = 2 # waiting for MMU to look up translation
TLBIE_WAIT = 3 # waiting for MMU to finish doing a tlbie
- COMPLETE = 4 # extra cycle to complete an operation
# glue logic for microwatt mmu and dcache
super().__init__(regwid, addrwid)
self.dcache = DCache()
- self.d_in = self.dcache.d_in
- self.d_out = self.dcache.d_out
- self.l_in = LoadStore1ToMMUType()
- self.l_out = MMUToLoadStore1Type()
- # TODO microwatt
- self.mmureq = Signal()
- self.derror = Signal()
+ # these names are from the perspective of here (LoadStore1)
+ self.d_out = self.dcache.d_in # in to dcache is out for LoadStore
+ self.d_in = self.dcache.d_out # out from dcache is in for LoadStore
+ self.m_out = LoadStore1ToMMUType() # out *to* MMU
+ self.m_in = MMUToLoadStore1Type() # in *from* MMU
# TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
self.dbus = Record(make_wb_layout(pspec))
def set_wr_addr(self, m, addr, mask, misalign):
m.d.comb += self.load.eq(0) # store operation
- m.d.comb += self.d_in.load.eq(0)
+ m.d.comb += self.d_out.load.eq(0)
m.d.comb += self.byte_sel.eq(mask)
m.d.comb += self.addr.eq(addr)
m.d.comb += self.align_intr.eq(misalign)
def set_rd_addr(self, m, addr, mask, misalign):
m.d.comb += self.d_valid.eq(1)
- m.d.comb += self.d_in.valid.eq(self.d_validblip)
+ m.d.comb += self.d_out.valid.eq(self.d_validblip)
m.d.comb += self.load.eq(1) # load operation
- m.d.comb += self.d_in.load.eq(1)
+ m.d.comb += self.d_out.load.eq(1)
m.d.comb += self.byte_sel.eq(mask)
m.d.comb += self.align_intr.eq(misalign)
m.d.comb += self.addr.eq(addr)
def set_wr_data(self, m, data, wen):
# do the "blip" on write data
m.d.comb += self.d_valid.eq(1)
- m.d.comb += self.d_in.valid.eq(self.d_validblip)
+ m.d.comb += self.d_out.valid.eq(self.d_validblip)
# put data into comb which is picked up in main elaborate()
m.d.comb += self.d_w_valid.eq(1)
m.d.comb += self.store_data.eq(data)
- #m.d.sync += self.d_in.byte_sel.eq(wen) # this might not be needed
+ #m.d.sync += self.d_out.byte_sel.eq(wen) # this might not be needed
st_ok = self.done # TODO indicates write data is valid
return st_ok
data = self.load_data # actual read data
return data, ld_ok
- """
- if d_in.error = '1' then
- if d_in.cache_paradox = '1' then
- -- signal an interrupt straight away
- exception := '1';
- dsisr(63 - 38) := not r2.req.load;
- -- XXX there is no architected bit for this
- -- (probably should be a machine check in fact)
- dsisr(63 - 35) := d_in.cache_paradox;
- else
- -- Look up the translation for TLB miss
- -- and also for permission error and RC error
- -- in case the PTE has been updated.
- mmureq := '1';
- v.state := MMU_LOOKUP;
- v.stage1_en := '0';
- end if;
- end if;
- """
-
def elaborate(self, platform):
m = super().elaborate(platform)
comb, sync = m.d.comb, m.d.sync
m.submodules.dcache = dcache = self.dcache
# temp vars
- d_in, d_out, l_out, dbus = self.d_in, self.d_out, self.l_out, self.dbus
+ d_out, d_in, m_in, dbus = self.d_out, self.d_in, self.m_in, self.dbus
+ exc = self.pi.exc_o
+ exception = exc.happened
+ mmureq = Signal()
+
+ # copy of address, but gets over-ridden for OP_FETCH_FAILED
+ maddr = Signal(64)
+ m.d.comb += maddr.eq(self.addr)
+
+ # create a blip (single pulse) on valid read/write request
+ # this can be over-ridden in the FSM to get dcache to re-run
+ # a request when MMU_LOOKUP completes
+ m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
# fsm skeleton
with m.Switch(self.state):
with m.If(self.d_validblip):
sync += self.state.eq(State.ACK_WAIT)
- with m.Case(State.ACK_WAIT): # waiting for completion
- with m.If(d_out.error):
- with m.If(d_out.cache_paradox):
- sync += self.derror.eq(1)
+ # waiting for completion
+ with m.Case(State.ACK_WAIT):
+
+ with m.If(d_in.error):
+ # cache error is not necessarily "final", it could
+ # be that it was just a TLB miss
+ with m.If(d_in.cache_paradox):
+ comb += exception.eq(1)
sync += self.state.eq(State.IDLE)
sync += self.dsisr[63 - 38].eq(~self.load)
# XXX there is no architected bit for this
# (probably should be a machine check in fact)
- sync += self.dsisr[63 - 35].eq(d_out.cache_paradox)
+ sync += self.dsisr[63 - 35].eq(d_in.cache_paradox)
with m.Else():
# Look up the translation for TLB miss
# and also for permission error and RC error
# in case the PTE has been updated.
- sync += self.mmureq.eq(1)
+ comb += mmureq.eq(1)
sync += self.state.eq(State.MMU_LOOKUP)
- with m.If(d_out.valid):
+ with m.If(d_in.valid):
m.d.comb += self.done.eq(1)
sync += self.state.eq(State.IDLE)
with m.If(self.load):
- m.d.comb += self.load_data.eq(d_out.data)
+ m.d.comb += self.load_data.eq(d_in.data)
+ # waiting here for the MMU TLB lookup to complete.
+ # either re-try the dcache lookup or throw MMU exception
with m.Case(State.MMU_LOOKUP):
- with m.If(l_out.done):
+ with m.If(m_in.done):
with m.If(~self.instr_fault):
# retry the request now that the MMU has
# installed a TLB entry
+ m.d.comb += self.d_validblip.eq(1) # re-run dcache req
sync += self.state.eq(State.ACK_WAIT)
- with m.If(l_out.err):
- sync += self.dsisr[63 - 33].eq(l_out.invalid)
- sync += self.dsisr[63 - 36].eq(l_out.perm_error)
+ with m.Else():
+ # instruction lookup fault:
+ comb += exc.happened.eq(1)
+ sync += self.dar.eq(self.addr)
+
+ with m.If(m_in.err):
+ # MMU RADIX exception thrown
+ comb += exception.eq(1)
+ sync += self.dsisr[63 - 33].eq(m_in.invalid)
+ sync += self.dsisr[63 - 36].eq(m_in.perm_error)
sync += self.dsisr[63 - 38].eq(self.load)
- sync += self.dsisr[63 - 44].eq(l_out.badtree)
- sync += self.dsisr[63 - 45].eq(l_out.rc_error)
-
- '''
- if m_in.done = '1' then # actually l_out.done
- if r.instr_fault = '0' then
- # retry the request now that the MMU has
- # installed a TLB entry
- v.state := ACK_WAIT;
- end if;
- end if;
- if m_in.err = '1' then # actually l_out.err
- dsisr(63 - 33) := m_in.invalid;
- dsisr(63 - 36) := m_in.perm_error;
- dsisr(63 - 38) := not r.load;
- dsisr(63 - 44) := m_in.badtree;
- dsisr(63 - 45) := m_in.rc_error;
- end if;
- '''
- pass
+ sync += self.dsisr[63 - 44].eq(m_in.badtree)
+ sync += self.dsisr[63 - 45].eq(m_in.rc_error)
with m.Case(State.TLBIE_WAIT):
pass
- with m.Case(State.COMPLETE):
- pass
+
+ with m.If(self.align_intr):
+ comb += exc.happened.eq(1)
+ sync += self.dar.eq(self.addr)
# happened, alignment, instr_fault, invalid.
# note that all of these flow through - eventually to the TRAP
# pipeline, via PowerDecoder2.
- exc = self.pi.exc_o
- comb += exc.happened.eq(d_out.error | l_out.err | self.align_intr)
- comb += exc.invalid.eq(l_out.invalid)
+ comb += exc.invalid.eq(m_in.invalid)
comb += exc.alignment.eq(self.align_intr)
-
+ comb += exc.instr_fault.eq(self.instr_fault)
# badtree, perm_error, rc_error, segment_fault
- comb += exc.badtree.eq(l_out.badtree)
- comb += exc.perm_error.eq(l_out.perm_error)
- comb += exc.rc_error.eq(l_out.rc_error)
- comb += exc.segment_fault.eq(l_out.segerr)
-
- # TODO some exceptions set SPRs
+ comb += exc.badtree.eq(m_in.badtree)
+ comb += exc.perm_error.eq(m_in.perm_error)
+ comb += exc.rc_error.eq(m_in.rc_error)
+ comb += exc.segment_fault.eq(m_in.segerr)
# TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
comb += dbus.adr.eq(dcache.wb_out.adr)
if hasattr(dbus, "stall"):
comb += dcache.wb_in.stall.eq(dbus.stall)
- # create a blip (single pulse) on valid read/write request
- m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
-
# write out d data only when flag set
with m.If(self.d_w_valid):
- m.d.sync += d_in.data.eq(self.store_data)
+ m.d.sync += d_out.data.eq(self.store_data)
with m.Else():
- m.d.sync += d_in.data.eq(0)
+ m.d.sync += d_out.data.eq(0)
# this must move into the FSM, conditionally noticing that
# the "blip" comes from self.d_validblip.
# task 1: look up in dcache
# task 2: if dcache fails, look up in MMU.
# do **NOT** confuse the two.
- m.d.comb += d_in.load.eq(self.load)
- m.d.comb += d_in.byte_sel.eq(self.byte_sel)
- m.d.comb += d_in.addr.eq(self.addr)
- m.d.comb += d_in.nc.eq(self.nc)
+ m.d.comb += d_out.load.eq(self.load)
+ m.d.comb += d_out.byte_sel.eq(self.byte_sel)
+ m.d.comb += d_out.addr.eq(self.addr)
+ m.d.comb += d_out.nc.eq(self.nc)
# XXX these should be possible to remove but for some reason
# cannot be... yet. TODO, investigate
- m.d.comb += self.done.eq(d_out.valid)
- m.d.comb += self.load_data.eq(d_out.data)
+ m.d.comb += self.done.eq(d_in.valid)
+ m.d.comb += self.load_data.eq(d_in.data)
+
+ ''' TODO: translate to nmigen.
+ -- Update outputs to MMU
+ m_out.valid <= mmureq;
+ m_out.iside <= v.instr_fault;
+ m_out.load <= r.load;
+ # m_out.priv <= r.priv_mode; TODO
+ m_out.tlbie <= v.tlbie;
+ # m_out.mtspr <= mmu_mtspr; # TODO
+ # m_out.sprn <= sprn; # TODO
+ m_out.addr <= maddr;
+ # m_out.slbia <= l_in.insn(7); # TODO: no idea what this is
+ # m_out.rs <= l_in.data; # nope, probably not needed, TODO investigate
+ '''
return m