+ # XICS interrupt handler
+ if self.xics:
+ m.submodules.xics_icp = icp = csd(self.xics_icp)
+ m.submodules.xics_ics = ics = csd(self.xics_ics)
+ comb += icp.ics_i.eq(ics.icp_o) # connect ICS to ICP
+ sync += cur_state.eint.eq(icp.core_irq_o) # connect ICP to core
+ else:
+ sync += cur_state.eint.eq(self.ext_irq) # connect externally
+
+ # GPIO test peripheral
+ if self.gpio:
+ m.submodules.simple_gpio = simple_gpio = csd(self.simple_gpio)
+
+ # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
+ # XXX causes litex ECP5 test to get wrong idea about input and output
+ # (but works with verilator sim *sigh*)
+ # if self.gpio and self.xics:
+ # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
+
+ # instruction decoder
+ pdecode = create_pdecode()
+ m.submodules.dec2 = pdecode2 = csd(self.pdecode2)
+ if self.svp64_en:
+ m.submodules.svp64 = svp64 = csd(self.svp64)
+
+ # convenience
+ dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
+ intrf = self.core.regs.rf['int']
+
+ # clock delay power-on reset
+ cd_por = ClockDomain(reset_less=True)
+ cd_sync = ClockDomain()
+ m.domains += cd_por, cd_sync
+ core_sync = ClockDomain(self.core_domain)
+ if self.core_domain != "sync":
+ m.domains += core_sync
+ if self.dbg_domain != "sync":
+ dbg_sync = ClockDomain(self.dbg_domain)
+ m.domains += dbg_sync
+
+ ti_rst = Signal(reset_less=True)
+ delay = Signal(range(4), reset=3)
+ with m.If(delay != 0):
+ m.d.por += delay.eq(delay - 1)
+ comb += cd_por.clk.eq(ClockSignal())
+
+ # power-on reset delay
+ core_rst = ResetSignal(self.core_domain)
+ if self.core_domain != "sync":
+ comb += ti_rst.eq(delay != 0 | dbg.core_rst_o | ResetSignal())
+ comb += core_rst.eq(ti_rst)
+ else:
+ with m.If(delay != 0 | dbg.core_rst_o):
+ comb += core_rst.eq(1)
+
+ # connect external reset signal to DMI Reset
+ if self.dbg_domain != "sync":
+ dbg_rst = ResetSignal(self.dbg_domain)
+ comb += dbg_rst.eq(self.dbg_rst_i)
+
+ # busy/halted signals from core
+ core_busy_o = ~core.p.o_ready | core.n.o_data.busy_o # core is busy
+ comb += self.busy_o.eq(core_busy_o)
+ comb += pdecode2.dec.bigendian.eq(self.core_bigendian_i)
+
+ # temporary hack: says "go" immediately for both address gen and ST
+ l0 = core.l0
+ ldst = core.fus.fus['ldst0']
+ st_go_edge = rising_edge(m, ldst.st.rel_o)
+ # link addr-go direct to rel
+ m.d.comb += ldst.ad.go_i.eq(ldst.ad.rel_o)
+ m.d.comb += ldst.st.go_i.eq(st_go_edge) # link store-go to rising rel
+
+ def do_dmi(self, m, dbg):
+ """deals with DMI debug requests
+
+ currently only provides read requests for the INT regfile, CR and XER
+ it will later also deal with *writing* to these regfiles.
+ """
+ comb = m.d.comb
+ sync = m.d.sync
+ dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
+ intrf = self.core.regs.rf['int']
+
+ with m.If(d_reg.req): # request for regfile access being made
+ # TODO: error-check this
+ # XXX should this be combinatorial? sync better?
+ if intrf.unary:
+ comb += self.int_r.ren.eq(1 << d_reg.addr)
+ else:
+ comb += self.int_r.addr.eq(d_reg.addr)
+ comb += self.int_r.ren.eq(1)
+ d_reg_delay = Signal()
+ sync += d_reg_delay.eq(d_reg.req)
+ with m.If(d_reg_delay):
+ # data arrives one clock later
+ comb += d_reg.data.eq(self.int_r.o_data)
+ comb += d_reg.ack.eq(1)
+
+ # sigh same thing for CR debug
+ with m.If(d_cr.req): # request for regfile access being made
+ comb += self.cr_r.ren.eq(0b11111111) # enable all
+ d_cr_delay = Signal()
+ sync += d_cr_delay.eq(d_cr.req)
+ with m.If(d_cr_delay):
+ # data arrives one clock later
+ comb += d_cr.data.eq(self.cr_r.o_data)
+ comb += d_cr.ack.eq(1)
+
+ # aaand XER...
+ with m.If(d_xer.req): # request for regfile access being made
+ comb += self.xer_r.ren.eq(0b111111) # enable all
+ d_xer_delay = Signal()
+ sync += d_xer_delay.eq(d_xer.req)
+ with m.If(d_xer_delay):
+ # data arrives one clock later
+ comb += d_xer.data.eq(self.xer_r.o_data)
+ comb += d_xer.ack.eq(1)
+
+ def tb_dec_fsm(self, m, spr_dec):
+ """tb_dec_fsm
+
+ this is a FSM for updating either dec or tb. it runs alternately
+ DEC, TB, DEC, TB. note that SPR pipeline could have written a new
+ value to DEC, however the regfile has "passthrough" on it so this
+ *should* be ok.
+
+ see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
+ """
+
+ comb, sync = m.d.comb, m.d.sync
+ fast_rf = self.core.regs.rf['fast']
+ fast_r_dectb = fast_rf.r_ports['issue'] # DEC/TB
+ fast_w_dectb = fast_rf.w_ports['issue'] # DEC/TB
+
+ with m.FSM() as fsm:
+
+ # initiates read of current DEC
+ with m.State("DEC_READ"):
+ comb += fast_r_dectb.addr.eq(FastRegs.DEC)
+ comb += fast_r_dectb.ren.eq(1)
+ m.next = "DEC_WRITE"
+
+ # waits for DEC read to arrive (1 cycle), updates with new value
+ with m.State("DEC_WRITE"):
+ new_dec = Signal(64)
+ # TODO: MSR.LPCR 32-bit decrement mode
+ comb += new_dec.eq(fast_r_dectb.o_data - 1)
+ comb += fast_w_dectb.addr.eq(FastRegs.DEC)
+ comb += fast_w_dectb.wen.eq(1)
+ comb += fast_w_dectb.i_data.eq(new_dec)
+ sync += spr_dec.eq(new_dec) # copy into cur_state for decoder
+ m.next = "TB_READ"
+
+ # initiates read of current TB
+ with m.State("TB_READ"):
+ comb += fast_r_dectb.addr.eq(FastRegs.TB)
+ comb += fast_r_dectb.ren.eq(1)
+ m.next = "TB_WRITE"
+
+ # waits for read TB to arrive, initiates write of current TB
+ with m.State("TB_WRITE"):
+ new_tb = Signal(64)
+ comb += new_tb.eq(fast_r_dectb.o_data + 1)
+ comb += fast_w_dectb.addr.eq(FastRegs.TB)
+ comb += fast_w_dectb.wen.eq(1)
+ comb += fast_w_dectb.i_data.eq(new_tb)
+ m.next = "DEC_READ"
+
+ return m
+
+ def elaborate(self, platform):
+ m = Module()
+ # convenience
+ comb, sync = m.d.comb, m.d.sync
+ cur_state = self.cur_state
+ pdecode2 = self.pdecode2
+ dbg = self.dbg
+
+ # set up peripherals and core
+ core_rst = self.core_rst
+ self.setup_peripherals(m)
+
+ # reset current state if core reset requested
+ with m.If(core_rst):
+ m.d.sync += self.cur_state.eq(0)
+
+ # check halted condition: requested PC to execute matches DMI stop addr
+ # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
+ # match
+ halted = Signal()
+ comb += halted.eq(dbg.stop_addr_o == dbg.state.pc)
+ with m.If(halted):
+ comb += dbg.core_stopped_i.eq(1)
+ comb += dbg.terminate_i.eq(1)
+
+ # PC and instruction from I-Memory
+ comb += self.pc_o.eq(cur_state.pc)
+ self.pc_changed = Signal() # note write to PC
+ self.msr_changed = Signal() # note write to MSR
+ self.sv_changed = Signal() # note write to SVSTATE
+
+ # read state either from incoming override or from regfile
+ state = CoreState("get") # current state (MSR/PC/SVSTATE)
+ state_get(m, state.msr, core_rst, self.msr_i,
+ "msr", # read MSR
+ self.state_r_msr, StateRegs.MSR)
+ state_get(m, state.pc, core_rst, self.pc_i,
+ "pc", # read PC
+ self.state_r_pc, StateRegs.PC)
+ state_get(m, state.svstate, core_rst, self.svstate_i,
+ "svstate", # read SVSTATE
+ self.state_r_sv, StateRegs.SVSTATE)
+
+ # don't write pc every cycle
+ comb += self.state_w_pc.wen.eq(0)
+ comb += self.state_w_pc.i_data.eq(0)
+
+ # connect up debug state. note "combinatorially same" below,
+ # this is a bit naff, passing state over in the dbg class, but
+ # because it is combinatorial it achieves the desired goal
+ comb += dbg.state.eq(state)
+
+ # this bit doesn't have to be in the FSM: connect up to read
+ # regfiles on demand from DMI
+ self.do_dmi(m, dbg)
+
+ # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
+ # (which uses that in PowerDecoder2 to raise 0x900 exception)
+ self.tb_dec_fsm(m, cur_state.dec)
+
+ # while stopped, allow updating the MSR, PC and SVSTATE.
+ # these are mainly for debugging purposes (including DMI/JTAG)
+ with m.If(dbg.core_stopped_i):
+ with m.If(self.pc_i.ok):
+ comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
+ comb += self.state_w_pc.i_data.eq(self.pc_i.data)
+ sync += self.pc_changed.eq(1)
+ with m.If(self.msr_i.ok):
+ comb += self.state_w_msr.wen.eq(1 << StateRegs.MSR)
+ comb += self.state_w_msr.i_data.eq(self.msr_i.data)
+ sync += self.msr_changed.eq(1)
+ with m.If(self.svstate_i.ok | self.update_svstate):
+ with m.If(self.svstate_i.ok): # over-ride from external source
+ comb += self.new_svstate.eq(self.svstate_i.data)
+ comb += self.state_w_sv.wen.eq(1 << StateRegs.SVSTATE)
+ comb += self.state_w_sv.i_data.eq(self.new_svstate)
+ sync += self.sv_changed.eq(1)
+
+ # start renaming some of the ports to match microwatt
+ if self.microwatt_compat:
+ self.core.o.core_terminate_o.name = "terminated_out"
+ # names of DMI interface
+ self.dbg.dmi.addr_i.name = 'dmi_addr'
+ self.dbg.dmi.din.name = 'dmi_din'
+ self.dbg.dmi.dout.name = 'dmi_dout'
+ self.dbg.dmi.req_i.name = 'dmi_req'
+ self.dbg.dmi.we_i.name = 'dmi_wr'
+ self.dbg.dmi.ack_o.name = 'dmi_ack'
+ # wishbone instruction bus
+ ibus = self.imem.ibus
+ ibus.adr.name = 'wishbone_insn_out.adr'
+ ibus.dat_w.name = 'wishbone_insn_out.dat'
+ ibus.sel.name = 'wishbone_insn_out.sel'
+ ibus.cyc.name = 'wishbone_insn_out.cyc'
+ ibus.stb.name = 'wishbone_insn_out.stb'
+ ibus.we.name = 'wishbone_insn_out.we'
+ ibus.dat_r.name = 'wishbone_insn_in.dat'
+ ibus.ack.name = 'wishbone_insn_in.ack'
+ ibus.stall.name = 'wishbone_insn_in.stall'
+ # wishbone data bus
+ dbus = self.core.l0.cmpi.wb_bus()
+ dbus.adr.name = 'wishbone_data_out.adr'
+ dbus.dat_w.name = 'wishbone_data_out.dat'
+ dbus.sel.name = 'wishbone_data_out.sel'
+ dbus.cyc.name = 'wishbone_data_out.cyc'
+ dbus.stb.name = 'wishbone_data_out.stb'
+ dbus.we.name = 'wishbone_data_out.we'
+ dbus.dat_r.name = 'wishbone_data_in.dat'
+ dbus.ack.name = 'wishbone_data_in.ack'
+ dbus.stall.name = 'wishbone_data_in.stall'
+
+ return m
+
+ def __iter__(self):
+ yield from self.pc_i.ports()
+ yield from self.msr_i.ports()
+ yield self.pc_o
+ yield self.memerr_o
+ yield from self.core.ports()
+ yield from self.imem.ports()
+ yield self.core_bigendian_i
+ yield self.busy_o
+
+ def ports(self):
+ return list(self)
+
+ def external_ports(self):
+ if self.microwatt_compat:
+ ports = [self.core.o.core_terminate_o,
+ self.ext_irq,
+ self.alt_reset, # not connected yet
+ ClockSignal(),
+ ResetSignal(),
+ ]
+ ports += list(self.dbg.dmi.ports())
+ # for dbus/ibus microwatt, exclude err btw and cti
+ for name, sig in self.imem.ibus.fields.items():
+ if name not in ['err', 'bte', 'cti', 'adr']:
+ ports.append(sig)
+ for name, sig in self.core.l0.cmpi.wb_bus().fields.items():
+ if name not in ['err', 'bte', 'cti', 'adr']:
+ ports.append(sig)
+ # microwatt non-compliant with wishbone
+ ports.append(self.ibus_adr)
+ ports.append(self.dbus_adr)
+ return ports
+
+ ports = self.pc_i.ports()
+ ports = self.msr_i.ports()
+ ports += [self.pc_o, self.memerr_o, self.core_bigendian_i, self.busy_o,
+ ]
+
+ if self.jtag_en:
+ ports += list(self.jtag.external_ports())
+ else:
+ # don't add DMI if JTAG is enabled
+ ports += list(self.dbg.dmi.ports())
+
+ ports += list(self.imem.ibus.fields.values())
+ ports += list(self.core.l0.cmpi.wb_bus().fields.values())
+
+ if self.sram4x4k:
+ for sram in self.sram4k:
+ ports += list(sram.bus.fields.values())
+
+ if self.xics:
+ ports += list(self.xics_icp.bus.fields.values())
+ ports += list(self.xics_ics.bus.fields.values())
+ ports.append(self.int_level_i)
+ else:
+ ports.append(self.ext_irq)
+
+ if self.gpio:
+ ports += list(self.simple_gpio.bus.fields.values())
+ ports.append(self.gpio_o)
+
+ return ports
+
+ def ports(self):
+ return list(self)
+
+
+
+# Fetch Finite State Machine.
+# WARNING: there are currently DriverConflicts but it's actually working.
+# TODO, here: everything that is global in nature, information from the
+# main TestIssuerInternal, needs to move to either ispec() or ospec().
+# not only that: TestIssuerInternal.imem can entirely move into here
+# because imem is only ever accessed inside the FetchFSM.
+class FetchFSM(ControlBase):
+ def __init__(self, allow_overlap, svp64_en, imem, core_rst,
+ pdecode2, cur_state,
+ dbg, core, svstate, nia, is_svp64_mode):
+ self.allow_overlap = allow_overlap
+ self.svp64_en = svp64_en
+ self.imem = imem
+ self.core_rst = core_rst
+ self.pdecode2 = pdecode2
+ self.cur_state = cur_state
+ self.dbg = dbg
+ self.core = core
+ self.svstate = svstate
+ self.nia = nia
+ self.is_svp64_mode = is_svp64_mode
+
+ # set up pipeline ControlBase and allocate i/o specs
+ # (unusual: normally done by the Pipeline API)
+ super().__init__(stage=self)
+ self.p.i_data, self.n.o_data = self.new_specs(None)
+ self.i, self.o = self.p.i_data, self.n.o_data
+
+ # next 3 functions are Stage API Compliance
+ def setup(self, m, i):
+ pass
+
+ def ispec(self):
+ return FetchInput()
+
+ def ospec(self):
+ return FetchOutput()
+
+ def elaborate(self, platform):
+ """fetch FSM
+
+ this FSM performs fetch of raw instruction data, partial-decodes
+ it 32-bit at a time to detect SVP64 prefixes, and will optionally
+ read a 2nd 32-bit quantity if that occurs.
+ """
+ m = super().elaborate(platform)
+
+ dbg = self.dbg
+ core = self.core
+ pc = self.i.pc
+ msr = self.i.msr
+ svstate = self.svstate
+ nia = self.nia
+ is_svp64_mode = self.is_svp64_mode
+ fetch_pc_o_ready = self.p.o_ready
+ fetch_pc_i_valid = self.p.i_valid
+ fetch_insn_o_valid = self.n.o_valid
+ fetch_insn_i_ready = self.n.i_ready
+
+ comb = m.d.comb
+ sync = m.d.sync
+ pdecode2 = self.pdecode2
+ cur_state = self.cur_state
+ dec_opcode_o = pdecode2.dec.raw_opcode_in # raw opcode
+
+ # also note instruction fetch failed
+ if hasattr(core, "icache"):
+ fetch_failed = core.icache.i_out.fetch_failed
+ flush_needed = True
+ else:
+ fetch_failed = Const(0, 1)
+ flush_needed = False
+
+ # set priv / virt mode on I-Cache, sigh
+ if isinstance(self.imem, ICache):
+ comb += self.imem.i_in.priv_mode.eq(~msr[MSR.PR])
+ comb += self.imem.i_in.virt_mode.eq(msr[MSR.IR]) # Instr. Redir (VM)
+
+ with m.FSM(name='fetch_fsm'):
+
+ # waiting (zzz)
+ with m.State("IDLE"):
+ # fetch allowed if not failed and stopped but not stepping
+ # (see dmi.py for how core_stop_o is generated)
+ with m.If(~fetch_failed & ~dbg.core_stop_o):
+ comb += fetch_pc_o_ready.eq(1)
+ with m.If(fetch_pc_i_valid & ~pdecode2.instr_fault
+ & ~dbg.core_stop_o):
+ # instruction allowed to go: start by reading the PC
+ # capture the PC and also drop it into Insn Memory
+ # we have joined a pair of combinatorial memory
+ # lookups together. this is Generally Bad.
+ comb += self.imem.a_pc_i.eq(pc)
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ # transfer state to output
+ sync += cur_state.pc.eq(pc)
+ sync += cur_state.svstate.eq(svstate) # and svstate
+ sync += cur_state.msr.eq(msr) # and msr
+
+ m.next = "INSN_READ" # move to "wait for bus" phase
+
+ # dummy pause to find out why simulation is not keeping up
+ with m.State("INSN_READ"):
+ # when using "single-step" mode, checking dbg.stopping_o
+ # prevents progress. allow fetch to proceed once started
+ stopping = Const(0)
+ #if self.allow_overlap:
+ # stopping = dbg.stopping_o
+ with m.If(stopping):
+ # stopping: jump back to idle
+ m.next = "IDLE"
+ with m.Else():
+ with m.If(self.imem.f_busy_o &
+ ~pdecode2.instr_fault): # zzz...
+ # busy but not fetch failed: stay in wait-read
+ comb += self.imem.a_pc_i.eq(pc)
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ with m.Else():
+ # not busy (or fetch failed!): instruction fetched
+ # when fetch failed, the instruction gets ignored
+ # by the decoder
+ if hasattr(core, "icache"):
+ # blech, icache returns actual instruction
+ insn = self.imem.f_instr_o
+ else:
+ # but these return raw memory
+ insn = get_insn(self.imem.f_instr_o, cur_state.pc)
+ if self.svp64_en:
+ svp64 = self.svp64
+ # decode the SVP64 prefix, if any
+ comb += svp64.raw_opcode_in.eq(insn)
+ comb += svp64.bigendian.eq(self.core_bigendian_i)
+ # pass the decoded prefix (if any) to PowerDecoder2
+ sync += pdecode2.sv_rm.eq(svp64.svp64_rm)
+ sync += pdecode2.is_svp64_mode.eq(is_svp64_mode)
+ # remember whether this is a prefixed instruction,
+ # so the FSM can readily loop when VL==0
+ sync += is_svp64_mode.eq(svp64.is_svp64_mode)
+ # calculate the address of the following instruction
+ insn_size = Mux(svp64.is_svp64_mode, 8, 4)
+ sync += nia.eq(cur_state.pc + insn_size)
+ with m.If(~svp64.is_svp64_mode):
+ # with no prefix, store the instruction
+ # and hand it directly to the next FSM
+ sync += dec_opcode_o.eq(insn)
+ m.next = "INSN_READY"
+ with m.Else():
+ # fetch the rest of the instruction from memory
+ comb += self.imem.a_pc_i.eq(cur_state.pc + 4)
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ m.next = "INSN_READ2"
+ else:
+ # not SVP64 - 32-bit only
+ sync += nia.eq(cur_state.pc + 4)
+ sync += dec_opcode_o.eq(insn)
+ m.next = "INSN_READY"
+
+ with m.State("INSN_READ2"):
+ with m.If(self.imem.f_busy_o): # zzz...
+ # busy: stay in wait-read
+ comb += self.imem.a_i_valid.eq(1)
+ comb += self.imem.f_i_valid.eq(1)
+ with m.Else():
+ # not busy: instruction fetched
+ if hasattr(core, "icache"):
+ # blech, icache returns actual instruction
+ insn = self.imem.f_instr_o
+ else:
+ insn = get_insn(self.imem.f_instr_o, cur_state.pc+4)
+ sync += dec_opcode_o.eq(insn)
+ m.next = "INSN_READY"
+ # TODO: probably can start looking at pdecode2.rm_dec
+ # here or maybe even in INSN_READ state, if svp64_mode
+ # detected, in order to trigger - and wait for - the
+ # predicate reading.
+ if self.svp64_en:
+ pmode = pdecode2.rm_dec.predmode
+ """
+ if pmode != SVP64PredMode.ALWAYS.value:
+ fire predicate loading FSM and wait before
+ moving to INSN_READY
+ else:
+ sync += self.srcmask.eq(-1) # set to all 1s
+ sync += self.dstmask.eq(-1) # set to all 1s
+ m.next = "INSN_READY"
+ """
+
+ with m.State("INSN_READY"):
+ # hand over the instruction, to be decoded
+ comb += fetch_insn_o_valid.eq(1)
+ with m.If(fetch_insn_i_ready):
+ m.next = "IDLE"
+
+ # whatever was done above, over-ride it if core reset is held
+ with m.If(self.core_rst):
+ sync += nia.eq(0)
+
+ return m
+
+
+class TestIssuerInternal(TestIssuerBase):
+ """TestIssuer - reads instructions from TestMemory and issues them
+
+ efficiency and speed is not the main goal here: functional correctness
+ and code clarity is. optimisations (which almost 100% interfere with
+ easy understanding) come later.
+ """
+
+ def fetch_predicate_fsm(self, m,
+ pred_insn_i_valid, pred_insn_o_ready,
+ pred_mask_o_valid, pred_mask_i_ready):
+ """fetch_predicate_fsm - obtains (constructs in the case of CR)
+ src/dest predicate masks
+
+ https://bugs.libre-soc.org/show_bug.cgi?id=617
+ the predicates can be read here, by using IntRegs r_ports['pred']
+ or CRRegs r_ports['pred']. in the case of CRs it will have to
+ be done through multiple reads, extracting one relevant at a time.
+ later, a faster way would be to use the 32-bit-wide CR port but
+ this is more complex decoding, here. equivalent code used in
+ ISACaller is "from openpower.decoder.isa.caller import get_predcr"
+
+ note: this ENTIRE FSM is not to be called when svp64 is disabled
+ """
+ comb = m.d.comb
+ sync = m.d.sync
+ pdecode2 = self.pdecode2
+ rm_dec = pdecode2.rm_dec # SVP64RMModeDecode
+ predmode = rm_dec.predmode
+ srcpred, dstpred = rm_dec.srcpred, rm_dec.dstpred
+ cr_pred, int_pred = self.cr_pred, self.int_pred # read regfiles
+ # get src/dst step, so we can skip already used mask bits
+ cur_state = self.cur_state
+ srcstep = cur_state.svstate.srcstep
+ dststep = cur_state.svstate.dststep
+ cur_vl = cur_state.svstate.vl
+
+ # decode predicates
+ sregread, sinvert, sunary, sall1s = get_predint(m, srcpred, 's')
+ dregread, dinvert, dunary, dall1s = get_predint(m, dstpred, 'd')
+ sidx, scrinvert = get_predcr(m, srcpred, 's')
+ didx, dcrinvert = get_predcr(m, dstpred, 'd')
+
+ # store fetched masks, for either intpred or crpred
+ # when src/dst step is not zero, the skipped mask bits need to be
+ # shifted-out, before actually storing them in src/dest mask
+ new_srcmask = Signal(64, reset_less=True)
+ new_dstmask = Signal(64, reset_less=True)
+
+ with m.FSM(name="fetch_predicate"):
+
+ with m.State("FETCH_PRED_IDLE"):
+ comb += pred_insn_o_ready.eq(1)
+ with m.If(pred_insn_i_valid):
+ with m.If(predmode == SVP64PredMode.INT):
+ # skip fetching destination mask register, when zero
+ with m.If(dall1s):
+ sync += new_dstmask.eq(-1)
+ # directly go to fetch source mask register
+ # guaranteed not to be zero (otherwise predmode
+ # would be SVP64PredMode.ALWAYS, not INT)
+ comb += int_pred.addr.eq(sregread)
+ comb += int_pred.ren.eq(1)
+ m.next = "INT_SRC_READ"
+ # fetch destination predicate register
+ with m.Else():
+ comb += int_pred.addr.eq(dregread)
+ comb += int_pred.ren.eq(1)
+ m.next = "INT_DST_READ"
+ with m.Elif(predmode == SVP64PredMode.CR):
+ # go fetch masks from the CR register file
+ sync += new_srcmask.eq(0)
+ sync += new_dstmask.eq(0)
+ m.next = "CR_READ"
+ with m.Else():
+ sync += self.srcmask.eq(-1)
+ sync += self.dstmask.eq(-1)
+ m.next = "FETCH_PRED_DONE"
+
+ with m.State("INT_DST_READ"):
+ # store destination mask
+ inv = Repl(dinvert, 64)
+ with m.If(dunary):