+
+# Fetch Finite State Machine.
+# WARNING: there are currently DriverConflicts but it's actually working.
+# TODO, here: everything that is global in nature, information from the
+# main TestIssuerInternal, needs to move to either ispec() or ospec().
+# not only that: TestIssuerInternal.imem can entirely move into here
+# because imem is only ever accessed inside the FetchFSM.
+class FetchFSM(ControlBase):
+ def __init__(self, allow_overlap, svp64_en, imem, core_rst,
+ pdecode2, cur_state,
+ dbg, core, svstate, nia, is_svp64_mode):
+ self.allow_overlap = allow_overlap
+ self.svp64_en = svp64_en
+ self.imem = imem
+ self.core_rst = core_rst
+ self.pdecode2 = pdecode2
+ self.cur_state = cur_state
+ self.dbg = dbg
+ self.core = core
+ self.svstate = svstate
+ self.nia = nia
+ self.is_svp64_mode = is_svp64_mode
+
+ # set up pipeline ControlBase and allocate i/o specs
+ # (unusual: normally done by the Pipeline API)
+ super().__init__(stage=self)
+ self.p.i_data, self.n.o_data = self.new_specs(None)
+ self.i, self.o = self.p.i_data, self.n.o_data
+
+ # next 3 functions are Stage API Compliance
+ def setup(self, m, i):
+ pass
+
+ def ispec(self):
+ return FetchInput()
+
+ def ospec(self):
+ return FetchOutput()
+
+ def elaborate(self, platform):
+ """fetch FSM
+
+ this FSM performs fetch of raw instruction data, partial-decodes
+ it 32-bit at a time to detect SVP64 prefixes, and will optionally
+ read a 2nd 32-bit quantity if that occurs.
+ """
+ m = super().elaborate(platform)
+
+ dbg = self.dbg
+ core = self.core
+ pc = self.i.pc
+ msr = self.i.msr
+ svstate = self.svstate
+ nia = self.nia
+ is_svp64_mode = self.is_svp64_mode
+ fetch_pc_o_ready = self.p.o_ready
+ fetch_pc_i_valid = self.p.i_valid
+ fetch_insn_o_valid = self.n.o_valid
+ fetch_insn_i_ready = self.n.i_ready
+
+ comb = m.d.comb
+ sync = m.d.sync
+ pdecode2 = self.pdecode2
+ cur_state = self.cur_state
+ dec_opcode_o = pdecode2.dec.raw_opcode_in # raw opcode
+
+ # also note instruction fetch failed
+ if hasattr(core, "icache"):
+ fetch_failed = core.icache.i_out.fetch_failed
+ flush_needed = True
+ else:
+ fetch_failed = Const(0, 1)
+ flush_needed = False
+
+ # set priv / virt mode on I-Cache, sigh
+ if isinstance(self.imem, ICache):
+ comb += self.imem.i_in.priv_mode.eq(~msr[MSR.PR])
+ comb += self.imem.i_in.virt_mode.eq(msr[MSR.DR])
+
+ with m.FSM(name='fetch_fsm'):
+
+ # waiting (zzz)
+ with m.State("IDLE"):
+ with m.If(~dbg.stopping_o & ~fetch_failed & ~dbg.core_stop_o):
+ comb += fetch_pc_o_ready.eq(1)
+ with m.If(fetch_pc_i_valid & ~pdecode2.instr_fault
+ & ~dbg.core_stop_o):
+ # instruction allowed to go: start by reading the PC
+ # capture the PC and also drop it into Insn Memory
+ # we have joined a pair of combinatorial memory
+ # lookups together. this is Generally Bad.
+ comb += self.imem.a_pc_i.eq(pc)
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ # transfer state to output
+ sync += cur_state.pc.eq(pc)
+ sync += cur_state.svstate.eq(svstate) # and svstate
+ sync += cur_state.msr.eq(msr) # and msr
+
+ m.next = "INSN_READ" # move to "wait for bus" phase
+
+ # dummy pause to find out why simulation is not keeping up
+ with m.State("INSN_READ"):
+ if self.allow_overlap:
+ stopping = dbg.stopping_o
+ else:
+ stopping = Const(0)
+ with m.If(stopping):
+ # stopping: jump back to idle
+ m.next = "IDLE"
+ with m.Else():
+ with m.If(self.imem.f_busy_o &
+ ~pdecode2.instr_fault): # zzz...
+ # busy but not fetch failed: stay in wait-read
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ with m.Else():
+ # not busy (or fetch failed!): instruction fetched
+ # when fetch failed, the instruction gets ignored
+ # by the decoder
+ if hasattr(core, "icache"):
+ # blech, icache returns actual instruction
+ insn = self.imem.f_instr_o
+ else:
+ # but these return raw memory
+ insn = get_insn(self.imem.f_instr_o, cur_state.pc)
+ if self.svp64_en:
+ svp64 = self.svp64
+ # decode the SVP64 prefix, if any
+ comb += svp64.raw_opcode_in.eq(insn)
+ comb += svp64.bigendian.eq(self.core_bigendian_i)
+ # pass the decoded prefix (if any) to PowerDecoder2
+ sync += pdecode2.sv_rm.eq(svp64.svp64_rm)
+ sync += pdecode2.is_svp64_mode.eq(is_svp64_mode)
+ # remember whether this is a prefixed instruction,
+ # so the FSM can readily loop when VL==0
+ sync += is_svp64_mode.eq(svp64.is_svp64_mode)
+ # calculate the address of the following instruction
+ insn_size = Mux(svp64.is_svp64_mode, 8, 4)
+ sync += nia.eq(cur_state.pc + insn_size)
+ with m.If(~svp64.is_svp64_mode):
+ # with no prefix, store the instruction
+ # and hand it directly to the next FSM
+ sync += dec_opcode_o.eq(insn)
+ m.next = "INSN_READY"
+ with m.Else():
+ # fetch the rest of the instruction from memory
+ comb += self.imem.a_pc_i.eq(cur_state.pc + 4)
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ m.next = "INSN_READ2"
+ else:
+ # not SVP64 - 32-bit only
+ sync += nia.eq(cur_state.pc + 4)
+ sync += dec_opcode_o.eq(insn)
+ m.next = "INSN_READY"
+
+ with m.State("INSN_READ2"):
+ with m.If(self.imem.f_busy_o): # zzz...
+ # busy: stay in wait-read
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ with m.Else():
+ # not busy: instruction fetched
+ insn = get_insn(self.imem.f_instr_o, cur_state.pc+4)
+ sync += dec_opcode_o.eq(insn)
+ m.next = "INSN_READY"
+ # TODO: probably can start looking at pdecode2.rm_dec
+ # here or maybe even in INSN_READ state, if svp64_mode
+ # detected, in order to trigger - and wait for - the
+ # predicate reading.
+ if self.svp64_en:
+ pmode = pdecode2.rm_dec.predmode
+ """
+ if pmode != SVP64PredMode.ALWAYS.value:
+ fire predicate loading FSM and wait before
+ moving to INSN_READY
+ else:
+ sync += self.srcmask.eq(-1) # set to all 1s
+ sync += self.dstmask.eq(-1) # set to all 1s
+ m.next = "INSN_READY"
+ """
+
+ with m.State("INSN_READY"):
+ # hand over the instruction, to be decoded
+ comb += fetch_insn_o_valid.eq(1)
+ with m.If(fetch_insn_i_ready):
+ m.next = "IDLE"
+
+ # whatever was done above, over-ride it if core reset is held
+ with m.If(self.core_rst):
+ sync += nia.eq(0)
+
+ return m
+
+
+class TestIssuerInternal(TestIssuerBase):
+ """TestIssuer - reads instructions from TestMemory and issues them
+
+ efficiency and speed is not the main goal here: functional correctness
+ and code clarity is. optimisations (which almost 100% interfere with
+ easy understanding) come later.
+ """
+
+ def fetch_predicate_fsm(self, m,
+ pred_insn_i_valid, pred_insn_o_ready,
+ pred_mask_o_valid, pred_mask_i_ready):
+ """fetch_predicate_fsm - obtains (constructs in the case of CR)
+ src/dest predicate masks
+
+ https://bugs.libre-soc.org/show_bug.cgi?id=617
+ the predicates can be read here, by using IntRegs r_ports['pred']
+ or CRRegs r_ports['pred']. in the case of CRs it will have to
+ be done through multiple reads, extracting one relevant at a time.
+ later, a faster way would be to use the 32-bit-wide CR port but
+ this is more complex decoding, here. equivalent code used in
+ ISACaller is "from openpower.decoder.isa.caller import get_predcr"
+
+ note: this ENTIRE FSM is not to be called when svp64 is disabled
+ """
+ comb = m.d.comb
+ sync = m.d.sync
+ pdecode2 = self.pdecode2
+ rm_dec = pdecode2.rm_dec # SVP64RMModeDecode
+ predmode = rm_dec.predmode
+ srcpred, dstpred = rm_dec.srcpred, rm_dec.dstpred
+ cr_pred, int_pred = self.cr_pred, self.int_pred # read regfiles
+ # get src/dst step, so we can skip already used mask bits
+ cur_state = self.cur_state
+ srcstep = cur_state.svstate.srcstep
+ dststep = cur_state.svstate.dststep
+ cur_vl = cur_state.svstate.vl
+
+ # decode predicates
+ sregread, sinvert, sunary, sall1s = get_predint(m, srcpred, 's')
+ dregread, dinvert, dunary, dall1s = get_predint(m, dstpred, 'd')
+ sidx, scrinvert = get_predcr(m, srcpred, 's')
+ didx, dcrinvert = get_predcr(m, dstpred, 'd')
+
+ # store fetched masks, for either intpred or crpred
+ # when src/dst step is not zero, the skipped mask bits need to be
+ # shifted-out, before actually storing them in src/dest mask
+ new_srcmask = Signal(64, reset_less=True)
+ new_dstmask = Signal(64, reset_less=True)
+
+ with m.FSM(name="fetch_predicate"):
+
+ with m.State("FETCH_PRED_IDLE"):
+ comb += pred_insn_o_ready.eq(1)
+ with m.If(pred_insn_i_valid):
+ with m.If(predmode == SVP64PredMode.INT):
+ # skip fetching destination mask register, when zero
+ with m.If(dall1s):
+ sync += new_dstmask.eq(-1)
+ # directly go to fetch source mask register
+ # guaranteed not to be zero (otherwise predmode
+ # would be SVP64PredMode.ALWAYS, not INT)
+ comb += int_pred.addr.eq(sregread)
+ comb += int_pred.ren.eq(1)
+ m.next = "INT_SRC_READ"
+ # fetch destination predicate register
+ with m.Else():
+ comb += int_pred.addr.eq(dregread)
+ comb += int_pred.ren.eq(1)
+ m.next = "INT_DST_READ"
+ with m.Elif(predmode == SVP64PredMode.CR):
+ # go fetch masks from the CR register file
+ sync += new_srcmask.eq(0)
+ sync += new_dstmask.eq(0)
+ m.next = "CR_READ"
+ with m.Else():
+ sync += self.srcmask.eq(-1)
+ sync += self.dstmask.eq(-1)
+ m.next = "FETCH_PRED_DONE"
+
+ with m.State("INT_DST_READ"):
+ # store destination mask
+ inv = Repl(dinvert, 64)
+ with m.If(dunary):
+ # set selected mask bit for 1<<r3 mode
+ dst_shift = Signal(range(64))
+ comb += dst_shift.eq(self.int_pred.o_data & 0b111111)
+ sync += new_dstmask.eq(1 << dst_shift)
+ with m.Else():
+ # invert mask if requested
+ sync += new_dstmask.eq(self.int_pred.o_data ^ inv)
+ # skip fetching source mask register, when zero
+ with m.If(sall1s):
+ sync += new_srcmask.eq(-1)
+ m.next = "FETCH_PRED_SHIFT_MASK"
+ # fetch source predicate register
+ with m.Else():
+ comb += int_pred.addr.eq(sregread)
+ comb += int_pred.ren.eq(1)
+ m.next = "INT_SRC_READ"
+
+ with m.State("INT_SRC_READ"):
+ # store source mask
+ inv = Repl(sinvert, 64)
+ with m.If(sunary):
+ # set selected mask bit for 1<<r3 mode
+ src_shift = Signal(range(64))
+ comb += src_shift.eq(self.int_pred.o_data & 0b111111)
+ sync += new_srcmask.eq(1 << src_shift)
+ with m.Else():
+ # invert mask if requested
+ sync += new_srcmask.eq(self.int_pred.o_data ^ inv)
+ m.next = "FETCH_PRED_SHIFT_MASK"
+
+ # fetch masks from the CR register file
+ # implements the following loop:
+ # idx, inv = get_predcr(mask)
+ # mask = 0
+ # for cr_idx in range(vl):
+ # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
+ # if cr[idx] ^ inv:
+ # mask |= 1 << cr_idx
+ # return mask
+ with m.State("CR_READ"):
+ # CR index to be read, which will be ready by the next cycle
+ cr_idx = Signal.like(cur_vl, reset_less=True)
+ # submit the read operation to the regfile
+ with m.If(cr_idx != cur_vl):
+ # the CR read port is unary ...
+ # ren = 1 << cr_idx
+ # ... in MSB0 convention ...
+ # ren = 1 << (7 - cr_idx)
+ # ... and with an offset:
+ # ren = 1 << (7 - off - cr_idx)
+ idx = SVP64CROffs.CRPred + cr_idx
+ comb += cr_pred.ren.eq(1 << (7 - idx))
+ # signal data valid in the next cycle
+ cr_read = Signal(reset_less=True)
+ sync += cr_read.eq(1)
+ # load the next index
+ sync += cr_idx.eq(cr_idx + 1)
+ with m.Else():
+ # exit on loop end
+ sync += cr_read.eq(0)
+ sync += cr_idx.eq(0)
+ m.next = "FETCH_PRED_SHIFT_MASK"
+ with m.If(cr_read):
+ # compensate for the one cycle delay on the regfile
+ cur_cr_idx = Signal.like(cur_vl)
+ comb += cur_cr_idx.eq(cr_idx - 1)
+ # read the CR field, select the appropriate bit
+ cr_field = Signal(4)
+ scr_bit = Signal()
+ dcr_bit = Signal()
+ comb += cr_field.eq(cr_pred.o_data)
+ comb += scr_bit.eq(cr_field.bit_select(sidx, 1)
+ ^ scrinvert)
+ comb += dcr_bit.eq(cr_field.bit_select(didx, 1)
+ ^ dcrinvert)
+ # set the corresponding mask bit
+ bit_to_set = Signal.like(self.srcmask)
+ comb += bit_to_set.eq(1 << cur_cr_idx)
+ with m.If(scr_bit):
+ sync += new_srcmask.eq(new_srcmask | bit_to_set)
+ with m.If(dcr_bit):
+ sync += new_dstmask.eq(new_dstmask | bit_to_set)
+
+ with m.State("FETCH_PRED_SHIFT_MASK"):
+ # shift-out skipped mask bits
+ sync += self.srcmask.eq(new_srcmask >> srcstep)
+ sync += self.dstmask.eq(new_dstmask >> dststep)
+ m.next = "FETCH_PRED_DONE"
+
+ with m.State("FETCH_PRED_DONE"):
+ comb += pred_mask_o_valid.eq(1)
+ with m.If(pred_mask_i_ready):
+ m.next = "FETCH_PRED_IDLE"
+
+ def issue_fsm(self, m, core, nia,
+ dbg, core_rst, is_svp64_mode,
+ fetch_pc_o_ready, fetch_pc_i_valid,
+ fetch_insn_o_valid, fetch_insn_i_ready,
+ pred_insn_i_valid, pred_insn_o_ready,
+ pred_mask_o_valid, pred_mask_i_ready,
+ exec_insn_i_valid, exec_insn_o_ready,
+ exec_pc_o_valid, exec_pc_i_ready):
+ """issue FSM
+
+ decode / issue FSM. this interacts with the "fetch" FSM
+ through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
+ (outgoing). also interacts with the "execute" FSM
+ through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
+ (incoming).
+ SVP64 RM prefixes have already been set up by the
+ "fetch" phase, so execute is fairly straightforward.
+ """
+
+ comb = m.d.comb
+ sync = m.d.sync
+ pdecode2 = self.pdecode2
+ cur_state = self.cur_state
+ new_svstate = self.new_svstate
+
+ # temporaries
+ dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
+
+ # for updating svstate (things like srcstep etc.)
+ comb += new_svstate.eq(cur_state.svstate)
+
+ # precalculate srcstep+1 and dststep+1
+ cur_srcstep = cur_state.svstate.srcstep
+ cur_dststep = cur_state.svstate.dststep
+ next_srcstep = Signal.like(cur_srcstep)
+ next_dststep = Signal.like(cur_dststep)
+ comb += next_srcstep.eq(cur_state.svstate.srcstep+1)
+ comb += next_dststep.eq(cur_state.svstate.dststep+1)
+
+ # note if an exception happened. in a pipelined or OoO design
+ # this needs to be accompanied by "shadowing" (or stalling)
+ exc_happened = self.core.o.exc_happened
+ # also note instruction fetch failed
+ if hasattr(core, "icache"):
+ fetch_failed = core.icache.i_out.fetch_failed
+ flush_needed = True
+ # set to fault in decoder
+ # update (highest priority) instruction fault
+ rising_fetch_failed = rising_edge(m, fetch_failed)
+ with m.If(rising_fetch_failed):
+ sync += pdecode2.instr_fault.eq(1)
+ else:
+ fetch_failed = Const(0, 1)
+ flush_needed = False
+
+ with m.FSM(name="issue_fsm"):
+
+ # sync with the "fetch" phase which is reading the instruction
+ # at this point, there is no instruction running, that
+ # could inadvertently update the PC.
+ with m.State("ISSUE_START"):
+ # reset instruction fault
+ sync += pdecode2.instr_fault.eq(0)
+ # wait on "core stop" release, before next fetch
+ # need to do this here, in case we are in a VL==0 loop
+ with m.If(~dbg.core_stop_o & ~core_rst):
+ comb += fetch_pc_i_valid.eq(1) # tell fetch to start
+ with m.If(fetch_pc_o_ready): # fetch acknowledged us
+ m.next = "INSN_WAIT"
+ with m.Else():
+ # tell core it's stopped, and acknowledge debug handshake
+ comb += dbg.core_stopped_i.eq(1)
+ # while stopped, allow updating SVSTATE
+ with m.If(self.svstate_i.ok):
+ comb += new_svstate.eq(self.svstate_i.data)
+ comb += self.update_svstate.eq(1)
+ sync += self.sv_changed.eq(1)
+
+ # wait for an instruction to arrive from Fetch
+ with m.State("INSN_WAIT"):
+ if self.allow_overlap:
+ stopping = dbg.stopping_o
+ else:
+ stopping = Const(0)
+ with m.If(stopping):
+ # stopping: jump back to idle
+ m.next = "ISSUE_START"
+ if flush_needed:
+ # request the icache to stop asserting "failed"
+ comb += core.icache.flush_in.eq(1)
+ # stop instruction fault
+ sync += pdecode2.instr_fault.eq(0)
+ with m.Else():
+ comb += fetch_insn_i_ready.eq(1)
+ with m.If(fetch_insn_o_valid):
+ # loop into ISSUE_START if it's a SVP64 instruction
+ # and VL == 0. this because VL==0 is a for-loop
+ # from 0 to 0 i.e. always, always a NOP.
+ cur_vl = cur_state.svstate.vl
+ with m.If(is_svp64_mode & (cur_vl == 0)):
+ # update the PC before fetching the next instruction
+ # since we are in a VL==0 loop, no instruction was
+ # executed that we could be overwriting
+ comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
+ comb += self.state_w_pc.i_data.eq(nia)
+ comb += self.insn_done.eq(1)
+ m.next = "ISSUE_START"
+ with m.Else():
+ if self.svp64_en:
+ m.next = "PRED_START" # fetching predicate
+ else:
+ m.next = "DECODE_SV" # skip predication
+
+ with m.State("PRED_START"):
+ comb += pred_insn_i_valid.eq(1) # tell fetch_pred to start
+ with m.If(pred_insn_o_ready): # fetch_pred acknowledged us
+ m.next = "MASK_WAIT"
+
+ with m.State("MASK_WAIT"):
+ comb += pred_mask_i_ready.eq(1) # ready to receive the masks
+ with m.If(pred_mask_o_valid): # predication masks are ready
+ m.next = "PRED_SKIP"
+
+ # skip zeros in predicate
+ with m.State("PRED_SKIP"):
+ with m.If(~is_svp64_mode):
+ m.next = "DECODE_SV" # nothing to do
+ with m.Else():
+ if self.svp64_en:
+ pred_src_zero = pdecode2.rm_dec.pred_sz
+ pred_dst_zero = pdecode2.rm_dec.pred_dz
+
+ # new srcstep, after skipping zeros
+ skip_srcstep = Signal.like(cur_srcstep)
+ # value to be added to the current srcstep
+ src_delta = Signal.like(cur_srcstep)
+ # add leading zeros to srcstep, if not in zero mode
+ with m.If(~pred_src_zero):
+ # priority encoder (count leading zeros)
+ # append guard bit, in case the mask is all zeros
+ pri_enc_src = PriorityEncoder(65)
+ m.submodules.pri_enc_src = pri_enc_src
+ comb += pri_enc_src.i.eq(Cat(self.srcmask,
+ Const(1, 1)))
+ comb += src_delta.eq(pri_enc_src.o)
+ # apply delta to srcstep
+ comb += skip_srcstep.eq(cur_srcstep + src_delta)
+ # shift-out all leading zeros from the mask
+ # plus the leading "one" bit
+ # TODO count leading zeros and shift-out the zero
+ # bits, in the same step, in hardware
+ sync += self.srcmask.eq(self.srcmask >> (src_delta+1))
+
+ # same as above, but for dststep
+ skip_dststep = Signal.like(cur_dststep)
+ dst_delta = Signal.like(cur_dststep)
+ with m.If(~pred_dst_zero):
+ pri_enc_dst = PriorityEncoder(65)
+ m.submodules.pri_enc_dst = pri_enc_dst
+ comb += pri_enc_dst.i.eq(Cat(self.dstmask,
+ Const(1, 1)))
+ comb += dst_delta.eq(pri_enc_dst.o)
+ comb += skip_dststep.eq(cur_dststep + dst_delta)
+ sync += self.dstmask.eq(self.dstmask >> (dst_delta+1))
+
+ # TODO: initialize mask[VL]=1 to avoid passing past VL
+ with m.If((skip_srcstep >= cur_vl) |
+ (skip_dststep >= cur_vl)):
+ # end of VL loop. Update PC and reset src/dst step
+ comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
+ comb += self.state_w_pc.i_data.eq(nia)
+ comb += new_svstate.srcstep.eq(0)
+ comb += new_svstate.dststep.eq(0)
+ comb += self.update_svstate.eq(1)
+ # synchronize with the simulator
+ comb += self.insn_done.eq(1)
+ # go back to Issue
+ m.next = "ISSUE_START"
+ with m.Else():
+ # update new src/dst step
+ comb += new_svstate.srcstep.eq(skip_srcstep)
+ comb += new_svstate.dststep.eq(skip_dststep)
+ comb += self.update_svstate.eq(1)
+ # proceed to Decode
+ m.next = "DECODE_SV"
+
+ # pass predicate mask bits through to satellite decoders
+ # TODO: for SIMD this will be *multiple* bits
+ sync += core.i.sv_pred_sm.eq(self.srcmask[0])
+ sync += core.i.sv_pred_dm.eq(self.dstmask[0])
+
+ # after src/dst step have been updated, we are ready
+ # to decode the instruction
+ with m.State("DECODE_SV"):
+ # decode the instruction
+ with m.If(~fetch_failed):
+ sync += pdecode2.instr_fault.eq(0)
+ sync += core.i.e.eq(pdecode2.e)
+ sync += core.i.state.eq(cur_state)
+ sync += core.i.raw_insn_i.eq(dec_opcode_i)
+ sync += core.i.bigendian_i.eq(self.core_bigendian_i)
+ if self.svp64_en:
+ sync += core.i.sv_rm.eq(pdecode2.sv_rm)
+ # set RA_OR_ZERO detection in satellite decoders
+ sync += core.i.sv_a_nz.eq(pdecode2.sv_a_nz)
+ # and svp64 detection
+ sync += core.i.is_svp64_mode.eq(is_svp64_mode)
+ # and svp64 bit-rev'd ldst mode
+ ldst_dec = pdecode2.use_svp64_ldst_dec
+ sync += core.i.use_svp64_ldst_dec.eq(ldst_dec)
+ # after decoding, reset any previous exception condition,
+ # allowing it to be set again during the next execution
+ sync += pdecode2.ldst_exc.eq(0)
+
+ m.next = "INSN_EXECUTE" # move to "execute"
+
+ # handshake with execution FSM, move to "wait" once acknowledged
+ with m.State("INSN_EXECUTE"):
+ comb += exec_insn_i_valid.eq(1) # trigger execute
+ with m.If(exec_insn_o_ready): # execute acknowledged us
+ m.next = "EXECUTE_WAIT"
+
+ with m.State("EXECUTE_WAIT"):
+ # wait on "core stop" release, at instruction end
+ # need to do this here, in case we are in a VL>1 loop
+ with m.If(~dbg.core_stop_o & ~core_rst):
+ comb += exec_pc_i_ready.eq(1)
+ # see https://bugs.libre-soc.org/show_bug.cgi?id=636
+ # the exception info needs to be blatted into
+ # pdecode.ldst_exc, and the instruction "re-run".
+ # when ldst_exc.happened is set, the PowerDecoder2
+ # reacts very differently: it re-writes the instruction
+ # with a "trap" (calls PowerDecoder2.trap()) which
+ # will *overwrite* whatever was requested and jump the
+ # PC to the exception address, as well as alter MSR.
+ # nothing else needs to be done other than to note
+ # the change of PC and MSR (and, later, SVSTATE)
+ with m.If(exc_happened):
+ mmu = core.fus.get_exc("mmu0")
+ ldst = core.fus.get_exc("ldst0")
+ if mmu is not None:
+ with m.If(fetch_failed):
+ # instruction fetch: exception is from MMU
+ # reset instr_fault (highest priority)
+ sync += pdecode2.ldst_exc.eq(mmu)
+ sync += pdecode2.instr_fault.eq(0)
+ if flush_needed:
+ # request icache to stop asserting "failed"
+ comb += core.icache.flush_in.eq(1)
+ with m.If(~fetch_failed):
+ # otherwise assume it was a LDST exception
+ sync += pdecode2.ldst_exc.eq(ldst)
+
+ with m.If(exec_pc_o_valid):
+
+ # was this the last loop iteration?
+ is_last = Signal()
+ cur_vl = cur_state.svstate.vl
+ comb += is_last.eq(next_srcstep == cur_vl)
+
+ with m.If(pdecode2.instr_fault):
+ # reset instruction fault, try again
+ sync += pdecode2.instr_fault.eq(0)
+ m.next = "ISSUE_START"
+
+ # return directly to Decode if Execute generated an
+ # exception.
+ with m.Elif(pdecode2.ldst_exc.happened):
+ m.next = "DECODE_SV"
+
+ # if MSR, PC or SVSTATE were changed by the previous
+ # instruction, go directly back to Fetch, without
+ # updating either MSR PC or SVSTATE
+ with m.Elif(self.msr_changed | self.pc_changed |
+ self.sv_changed):
+ m.next = "ISSUE_START"
+
+ # also return to Fetch, when no output was a vector
+ # (regardless of SRCSTEP and VL), or when the last
+ # instruction was really the last one of the VL loop
+ with m.Elif((~pdecode2.loop_continue) | is_last):
+ # before going back to fetch, update the PC state
+ # register with the NIA.
+ # ok here we are not reading the branch unit.
+ # TODO: this just blithely overwrites whatever
+ # pipeline updated the PC
+ comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
+ comb += self.state_w_pc.i_data.eq(nia)
+ # reset SRCSTEP before returning to Fetch
+ if self.svp64_en:
+ with m.If(pdecode2.loop_continue):
+ comb += new_svstate.srcstep.eq(0)
+ comb += new_svstate.dststep.eq(0)
+ comb += self.update_svstate.eq(1)
+ else:
+ comb += new_svstate.srcstep.eq(0)
+ comb += new_svstate.dststep.eq(0)
+ comb += self.update_svstate.eq(1)
+ m.next = "ISSUE_START"
+
+ # returning to Execute? then, first update SRCSTEP
+ with m.Else():
+ comb += new_svstate.srcstep.eq(next_srcstep)
+ comb += new_svstate.dststep.eq(next_dststep)
+ comb += self.update_svstate.eq(1)
+ # return to mask skip loop
+ m.next = "PRED_SKIP"
+
+ with m.Else():
+ comb += dbg.core_stopped_i.eq(1)
+ if flush_needed:
+ # request the icache to stop asserting "failed"
+ comb += core.icache.flush_in.eq(1)
+ # stop instruction fault
+ sync += pdecode2.instr_fault.eq(0)
+
+ # check if svstate needs updating: if so, write it to State Regfile
+ with m.If(self.update_svstate):
+ sync += cur_state.svstate.eq(self.new_svstate) # for next clock
+
+ def execute_fsm(self, m, core,
+ exec_insn_i_valid, exec_insn_o_ready,
+ exec_pc_o_valid, exec_pc_i_ready):
+ """execute FSM
+
+ execute FSM. this interacts with the "issue" FSM
+ through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
+ (outgoing). SVP64 RM prefixes have already been set up by the
+ "issue" phase, so execute is fairly straightforward.
+ """
+
+ comb = m.d.comb
+ sync = m.d.sync
+ pdecode2 = self.pdecode2
+
+ # temporaries
+ core_busy_o = core.n.o_data.busy_o # core is busy
+ core_ivalid_i = core.p.i_valid # instruction is valid
+
+ if hasattr(core, "icache"):
+ fetch_failed = core.icache.i_out.fetch_failed
+ else:
+ fetch_failed = Const(0, 1)
+
+ with m.FSM(name="exec_fsm"):
+
+ # waiting for instruction bus (stays there until not busy)
+ with m.State("INSN_START"):
+ comb += exec_insn_o_ready.eq(1)
+ with m.If(exec_insn_i_valid):
+ comb += core_ivalid_i.eq(1) # instruction is valid/issued
+ sync += self.sv_changed.eq(0)
+ sync += self.pc_changed.eq(0)
+ sync += self.msr_changed.eq(0)
+ with m.If(core.p.o_ready): # only move if accepted
+ m.next = "INSN_ACTIVE" # move to "wait completion"
+
+ # instruction started: must wait till it finishes
+ with m.State("INSN_ACTIVE"):
+ # note changes to MSR, PC and SVSTATE
+ # XXX oops, really must monitor *all* State Regfile write
+ # ports looking for changes!
+ with m.If(self.state_nia.wen & (1 << StateRegs.SVSTATE)):
+ sync += self.sv_changed.eq(1)
+ with m.If(self.state_nia.wen & (1 << StateRegs.MSR)):
+ sync += self.msr_changed.eq(1)
+ with m.If(self.state_nia.wen & (1 << StateRegs.PC)):
+ sync += self.pc_changed.eq(1)
+ with m.If(~core_busy_o): # instruction done!
+ comb += exec_pc_o_valid.eq(1)
+ with m.If(exec_pc_i_ready):
+ # when finished, indicate "done".
+ # however, if there was an exception, the instruction
+ # is *not* yet done. this is an implementation
+ # detail: we choose to implement exceptions by
+ # taking the exception information from the LDST
+ # unit, putting that *back* into the PowerDecoder2,
+ # and *re-running the entire instruction*.
+ # if we erroneously indicate "done" here, it is as if
+ # there were *TWO* instructions:
+ # 1) the failed LDST 2) a TRAP.
+ with m.If(~pdecode2.ldst_exc.happened &
+ ~pdecode2.instr_fault):
+ comb += self.insn_done.eq(1)
+ m.next = "INSN_START" # back to fetch
+
+ def elaborate(self, platform):
+ m = super().elaborate(platform)
+ # convenience
+ comb, sync = m.d.comb, m.d.sync
+ cur_state = self.cur_state
+ pdecode2 = self.pdecode2
+ dbg = self.dbg
+ core = self.core
+
+ # set up peripherals and core
+ core_rst = self.core_rst
+
+ # indicate to outside world if any FU is still executing
+ comb += self.any_busy.eq(core.n.o_data.any_busy_o) # any FU executing
+
+ # address of the next instruction, in the absence of a branch
+ # depends on the instruction size
+ nia = Signal(64)
+
+ # connect up debug signals
+ with m.If(core.o.core_terminate_o):
+ comb += dbg.terminate_i.eq(1)
+
+ # pass the prefix mode from Fetch to Issue, so the latter can loop
+ # on VL==0
+ is_svp64_mode = Signal()
+
+ # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
+ # issue, decode/execute, now joined by "Predicate fetch/calculate".
+ # these are the handshake signals between each
+
+ # fetch FSM can run as soon as the PC is valid
+ fetch_pc_i_valid = Signal() # Execute tells Fetch "start next read"
+ fetch_pc_o_ready = Signal() # Fetch Tells SVSTATE "proceed"
+
+ # fetch FSM hands over the instruction to be decoded / issued
+ fetch_insn_o_valid = Signal()
+ fetch_insn_i_ready = Signal()
+
+ # predicate fetch FSM decodes and fetches the predicate
+ pred_insn_i_valid = Signal()
+ pred_insn_o_ready = Signal()
+
+ # predicate fetch FSM delivers the masks
+ pred_mask_o_valid = Signal()
+ pred_mask_i_ready = Signal()
+
+ # issue FSM delivers the instruction to the be executed
+ exec_insn_i_valid = Signal()
+ exec_insn_o_ready = Signal()
+
+ # execute FSM, hands over the PC/SVSTATE back to the issue FSM
+ exec_pc_o_valid = Signal()
+ exec_pc_i_ready = Signal()
+
+ # the FSMs here are perhaps unusual in that they detect conditions
+ # then "hold" information, combinatorially, for the core
+ # (as opposed to using sync - which would be on a clock's delay)
+ # this includes the actual opcode, valid flags and so on.
+
+ # Fetch, then predicate fetch, then Issue, then Execute.
+ # Issue is where the VL for-loop # lives. the ready/valid
+ # signalling is used to communicate between the four.
+
+ # set up Fetch FSM
+ fetch = FetchFSM(self.allow_overlap, self.svp64_en,
+ self.imem, core_rst, pdecode2, cur_state,
+ dbg, core,
+ dbg.state.svstate, # combinatorially same
+ nia, is_svp64_mode)
+ m.submodules.fetch = fetch
+ # connect up in/out data to existing Signals
+ comb += fetch.p.i_data.pc.eq(dbg.state.pc) # combinatorially same
+ comb += fetch.p.i_data.msr.eq(dbg.state.msr) # combinatorially same
+ # and the ready/valid signalling
+ comb += fetch_pc_o_ready.eq(fetch.p.o_ready)
+ comb += fetch.p.i_valid.eq(fetch_pc_i_valid)
+ comb += fetch_insn_o_valid.eq(fetch.n.o_valid)
+ comb += fetch.n.i_ready.eq(fetch_insn_i_ready)
+
+ self.issue_fsm(m, core, nia,
+ dbg, core_rst, is_svp64_mode,
+ fetch_pc_o_ready, fetch_pc_i_valid,
+ fetch_insn_o_valid, fetch_insn_i_ready,
+ pred_insn_i_valid, pred_insn_o_ready,
+ pred_mask_o_valid, pred_mask_i_ready,
+ exec_insn_i_valid, exec_insn_o_ready,
+ exec_pc_o_valid, exec_pc_i_ready)
+
+ if self.svp64_en:
+ self.fetch_predicate_fsm(m,
+ pred_insn_i_valid, pred_insn_o_ready,
+ pred_mask_o_valid, pred_mask_i_ready)
+
+ self.execute_fsm(m, core,
+ exec_insn_i_valid, exec_insn_o_ready,
+ exec_pc_o_valid, exec_pc_i_ready)
+
+ return m
+
+