Record, Memory,
Const)
from nmutil.iocontrol import RecordObject
-from nmutil.util import rising_edge
+from nmutil.util import rising_edge, Display
from enum import Enum, unique
from soc.experiment.dcache import DCache
+from soc.experiment.icache import ICache
from soc.experiment.pimem import PortInterfaceBase
from soc.experiment.mem_types import LoadStore1ToMMUType
from soc.experiment.mem_types import MMUToLoadStore1Type
self.priv_mode = Signal()
self.align_intr = Signal()
+
# glue logic for microwatt mmu and dcache
class LoadStore1(PortInterfaceBase):
def __init__(self, pspec):
super().__init__(regwid, addrwid)
self.dcache = DCache()
+ self.icache = ICache()
# these names are from the perspective of here (LoadStore1)
self.d_out = self.dcache.d_in # in to dcache is out for LoadStore
self.d_in = self.dcache.d_out # out from dcache is in for LoadStore
+ self.i_out = self.icache.i_in # in to icache is out for LoadStore
+ self.i_in = self.icache.i_out # out from icache is in for LoadStore
self.m_out = LoadStore1ToMMUType() # out *to* MMU
self.m_in = MMUToLoadStore1Type() # in *from* MMU
self.req = LDSTRequest(name="ldst_req")
# TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
self.dbus = Record(make_wb_layout(pspec))
+ self.ibus = Record(make_wb_layout(pspec))
# for creating a single clock blip to DCache
self.d_valid = Signal()
self.d_w_valid = Signal()
self.d_validblip = Signal()
- # DSISR and DAR cached values. note that the MMU FSM is where
- # these are accessed by OP_MTSPR/OP_MFSPR, on behalf of LoadStore1.
- # by contrast microwatt has the spr set/get done *in* loadstore1.vhdl
- self.dsisr = Signal(64)
- self.dar = Signal(64)
-
# state info for LD/ST
self.done = Signal()
+ self.done_delay = Signal()
# latch most of the input request
self.load = Signal()
self.tlbie = Signal()
self.addr = Signal(64)
self.store_data = Signal(64)
self.load_data = Signal(64)
+ self.load_data_delay = Signal(64)
self.byte_sel = Signal(8)
#self.xerc : xer_common_t;
#self.reserve = Signal()
self.virt_mode = Signal()
self.priv_mode = Signal()
self.state = Signal(State)
+ self.iside = Signal() # request instruction-side load
self.instr_fault = Signal()
self.align_intr = Signal()
self.busy = Signal()
#self.intr_vec : integer range 0 to 16#fff#;
#self.nia = Signal(64)
#self.srr1 = Signal(16)
+ # use these to set the dsisr or dar respectively
+ self.mmu_set_spr = Signal()
+ self.mmu_set_dsisr = Signal()
+ self.mmu_set_dar = Signal()
+ self.sprval_in = Signal(64)
+
+ # ONLY access these read-only, do NOT attempt to change
+ self.dsisr = Signal(32)
+ self.dar = Signal(64)
- def set_wr_addr(self, m, addr, mask, misalign, msr_pr):
+ def set_wr_addr(self, m, addr, mask, misalign, msr_pr, is_dcbz):
m.d.comb += self.req.load.eq(0) # store operation
m.d.comb += self.req.byte_sel.eq(mask)
m.d.comb += self.req.addr.eq(addr)
m.d.comb += self.req.priv_mode.eq(~msr_pr) # not-problem ==> priv
m.d.comb += self.req.virt_mode.eq(msr_pr) # problem-state ==> virt
m.d.comb += self.req.align_intr.eq(misalign)
+ m.d.comb += self.req.dcbz.eq(is_dcbz)
+
+ # m.d.comb += Display("set_wr_addr %i dcbz %i",addr,is_dcbz)
+
# option to disable the cache entirely for write
if self.disable_cache:
m.d.comb += self.req.nc.eq(1)
return None
def set_rd_addr(self, m, addr, mask, misalign, msr_pr):
- m.d.comb += self.d_out.valid.eq(self.d_validblip)
m.d.comb += self.d_valid.eq(1)
m.d.comb += self.req.load.eq(1) # load operation
m.d.comb += self.req.byte_sel.eq(mask)
def set_wr_data(self, m, data, wen):
# do the "blip" on write data
- m.d.comb += self.d_out.valid.eq(self.d_validblip)
m.d.comb += self.d_valid.eq(1)
# put data into comb which is picked up in main elaborate()
m.d.comb += self.d_w_valid.eq(1)
return st_ok
def get_rd_data(self, m):
- ld_ok = self.done # indicates read data is valid
- data = self.load_data # actual read data
+ ld_ok = self.done_delay # indicates read data is valid
+ data = self.load_data_delay # actual read data
return data, ld_ok
def elaborate(self, platform):
m = super().elaborate(platform)
comb, sync = m.d.comb, m.d.sync
- # create dcache module
+ # microwatt takes one more cycle before next operation can be issued
+ sync += self.done_delay.eq(self.done)
+ sync += self.load_data_delay.eq(self.load_data)
+
+ # create dcache and icache module
m.submodules.dcache = dcache = self.dcache
+ m.submodules.icache = icache = self.icache
# temp vars
d_out, d_in, dbus = self.d_out, self.d_in, self.dbus
+ i_out, i_in, ibus = self.i_out, self.i_in, self.ibus
m_out, m_in = self.m_out, self.m_in
exc = self.pi.exc_o
exception = exc.happened
# a request when MMU_LOOKUP completes.
m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
ldst_r = LDSTRequest("ldst_r")
- with m.If(self.d_validblip):
- sync += ldst_r.eq(self.req) # copy of LDSTRequest on "blip"
+ comb += Display("MMUTEST: LoadStore1 d_in.error=%i",d_in.error)
# fsm skeleton
with m.Switch(self.state):
with m.Case(State.IDLE):
- with m.If(self.d_validblip):
+ with m.If(self.d_validblip & ~exc.happened):
comb += self.busy.eq(1)
sync += self.state.eq(State.ACK_WAIT)
+ sync += ldst_r.eq(self.req) # copy of LDSTRequest on "blip"
+# sync += Display("validblip self.req.virt_mode=%i",
+# self.req.virt_mode)
+ with m.Else():
+ sync += ldst_r.eq(0)
# waiting for completion
with m.Case(State.ACK_WAIT):
- comb += self.busy.eq(1)
+ comb += Display("MMUTEST: ACK_WAIT")
+ comb += self.busy.eq(~exc.happened)
with m.If(d_in.error):
# cache error is not necessarily "final", it could
with m.If(d_in.cache_paradox):
comb += exception.eq(1)
sync += self.state.eq(State.IDLE)
+ sync += ldst_r.eq(0)
+ sync += Display("cache error -> update dsisr")
sync += self.dsisr[63 - 38].eq(~self.load)
# XXX there is no architected bit for this
# (probably should be a machine check in fact)
with m.If(self.done):
sync += Display("ACK_WAIT, done %x", self.addr)
sync += self.state.eq(State.IDLE)
+ sync += ldst_r.eq(0)
with m.If(self.load):
m.d.comb += self.load_data.eq(d_in.data)
# waiting here for the MMU TLB lookup to complete.
# either re-try the dcache lookup or throw MMU exception
with m.Case(State.MMU_LOOKUP):
- comb += self.busy.eq(1)
+ comb += self.busy.eq(~exception)
with m.If(m_in.done):
with m.If(~self.instr_fault):
sync += Display("MMU_LOOKUP, done %x -> %x",
# installed a TLB entry, if not exception raised
m.d.comb += self.d_out.valid.eq(~exception)
sync += self.state.eq(State.ACK_WAIT)
+ sync += ldst_r.eq(0)
with m.Else():
- sync += Display("MMU_LOOKUP, exception %x", self.addr)
- # instruction lookup fault: store address in DAR
- comb += exc.happened.eq(1)
- sync += self.dar.eq(self.addr)
+ sync += self.state.eq(State.IDLE)
with m.If(m_in.err):
- # MMU RADIX exception thrown
+ # MMU RADIX exception thrown. XXX
+ # TODO: critical that the write here has to
+ # notify the MMU FSM of the change to dsisr
comb += exception.eq(1)
+ sync += Display("MMU RADIX exception thrown")
+ sync += Display("TODO: notify MMU of change to dsisr")
sync += self.dsisr[63 - 33].eq(m_in.invalid)
- sync += self.dsisr[63 - 36].eq(m_in.perm_error)
- sync += self.dsisr[63 - 38].eq(self.load)
+ sync += self.dsisr[63 - 36].eq(m_in.perm_error) # noexec
+ sync += self.dsisr[63 - 38].eq(~self.load)
sync += self.dsisr[63 - 44].eq(m_in.badtree)
sync += self.dsisr[63 - 45].eq(m_in.rc_error)
+ sync += self.state.eq(State.IDLE)
with m.Case(State.TLBIE_WAIT):
pass
- # alignment error: store address in DAR
+ # MMU FSM communicating a request to update DSISR or DAR (OP_MTSPR)
+ with m.If(self.mmu_set_spr):
+ with m.If(self.mmu_set_dsisr):
+ sync += self.dsisr.eq(self.sprval_in)
+ with m.If(self.mmu_set_dar):
+ sync += self.dar.eq(self.sprval_in)
+
+ # hmmm, alignment occurs in set_rd_addr/set_wr_addr, note exception
with m.If(self.align_intr):
comb += exc.happened.eq(1)
- sync += self.dar.eq(self.addr)
+ # check for updating DAR
+ with m.If(exception):
+ sync += Display("exception %x", self.addr)
+ # alignment error: store address in DAR
+ with m.If(self.align_intr):
+ sync += Display("alignment error: addr in DAR %x", self.addr)
+ sync += self.dar.eq(self.addr)
+ with m.Elif(~self.instr_fault):
+ sync += Display("not instr fault, addr in DAR %x", self.addr)
+ sync += self.dar.eq(self.addr)
+
+ # when done or exception, return to idle state
+ with m.If(self.done | exception):
+ sync += self.state.eq(State.IDLE)
+ comb += self.busy.eq(0)
# happened, alignment, instr_fault, invalid.
# note that all of these flow through - eventually to the TRAP
# pipeline, via PowerDecoder2.
+ comb += self.align_intr.eq(self.req.align_intr)
comb += exc.invalid.eq(m_in.invalid)
comb += exc.alignment.eq(self.align_intr)
comb += exc.instr_fault.eq(self.instr_fault)
comb += exc.segment_fault.eq(m_in.segerr)
# TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
- comb += dbus.adr.eq(dcache.wb_out.adr)
- comb += dbus.dat_w.eq(dcache.wb_out.dat)
- comb += dbus.sel.eq(dcache.wb_out.sel)
- comb += dbus.cyc.eq(dcache.wb_out.cyc)
- comb += dbus.stb.eq(dcache.wb_out.stb)
- comb += dbus.we.eq(dcache.wb_out.we)
-
- comb += dcache.wb_in.dat.eq(dbus.dat_r)
- comb += dcache.wb_in.ack.eq(dbus.ack)
+ comb += dbus.adr.eq(dcache.bus.adr)
+ comb += dbus.dat_w.eq(dcache.bus.dat_w)
+ comb += dbus.sel.eq(dcache.bus.sel)
+ comb += dbus.cyc.eq(dcache.bus.cyc)
+ comb += dbus.stb.eq(dcache.bus.stb)
+ comb += dbus.we.eq(dcache.bus.we)
+
+ comb += dcache.bus.dat_r.eq(dbus.dat_r)
+ comb += dcache.bus.ack.eq(dbus.ack)
if hasattr(dbus, "stall"):
- comb += dcache.wb_in.stall.eq(dbus.stall)
+ comb += dcache.bus.stall.eq(dbus.stall)
- # write out d data only when flag set
+ # update out d data when flag set
with m.If(self.d_w_valid):
m.d.sync += d_out.data.eq(self.store_data)
- with m.Else():
- m.d.sync += d_out.data.eq(0)
+ #with m.Else():
+ # m.d.sync += d_out.data.eq(0)
+ # unit test passes with that change
# this must move into the FSM, conditionally noticing that
# the "blip" comes from self.d_validblip.
# task 2: if dcache fails, look up in MMU.
# do **NOT** confuse the two.
with m.If(self.d_validblip):
+ m.d.comb += self.d_out.valid.eq(~exc.happened)
m.d.comb += d_out.load.eq(self.req.load)
m.d.comb += d_out.byte_sel.eq(self.req.byte_sel)
m.d.comb += self.addr.eq(self.req.addr)
m.d.comb += d_out.nc.eq(self.req.nc)
m.d.comb += d_out.priv_mode.eq(self.req.priv_mode)
m.d.comb += d_out.virt_mode.eq(self.req.virt_mode)
+ #m.d.comb += Display("validblip dcbz=%i addr=%x",
+ #self.req.dcbz,self.req.addr)
+ m.d.comb += d_out.dcbz.eq(self.req.dcbz)
with m.Else():
m.d.comb += d_out.load.eq(ldst_r.load)
m.d.comb += d_out.byte_sel.eq(ldst_r.byte_sel)
m.d.comb += d_out.nc.eq(ldst_r.nc)
m.d.comb += d_out.priv_mode.eq(ldst_r.priv_mode)
m.d.comb += d_out.virt_mode.eq(ldst_r.virt_mode)
+ #m.d.comb += Display("no_validblip dcbz=%i addr=%x",
+ #ldst_r.dcbz,ldst_r.addr)
+ m.d.comb += d_out.dcbz.eq(ldst_r.dcbz)
# XXX these should be possible to remove but for some reason
# cannot be... yet. TODO, investigate
# Update outputs to MMU
m.d.comb += m_out.valid.eq(mmureq)
- m.d.comb += m_out.iside.eq(self.instr_fault)
+ m.d.comb += m_out.iside.eq(self.iside)
m.d.comb += m_out.load.eq(ldst_r.load)
# m_out.priv <= r.priv_mode; TODO
m.d.comb += m_out.tlbie.eq(self.tlbie)