INV = 2 # invert CR sense 0=set 1=unset
CR_MSB = 3 # CR bit to update (with Rc=1)
CR_LSB = 4
+ VLI = 3
RC1 = 4 # update CR as if Rc=1 (when Rc=0)
# LD immediate els (element-stride) locations, depending on mode
ELS_NORMAL = 4
else:
rc_en = False
rc_ok = False
+ # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
+ RC1 = yield self.dec2.rm_dec.RC1
+ if RC1:
+ rc_en = False
+ rc_ok = False
# grrrr have to special-case MUL op (see DecodeOE)
log("ov %d en %d rc %d en %d op %d" %
(ov_ok, ov_en, rc_ok, rc_en, int_op))
rm_mode = yield self.dec2.rm_dec.mode
ff_inv = yield self.dec2.rm_dec.inv
cr_bit = yield self.dec2.rm_dec.cr_sel
+ RC1 = yield self.dec2.rm_dec.RC1
+ vli = yield self.dec2.rm_dec.vli # VL inclusive if truncated
log(" ff rm_mode", rc_en, rm_mode, SVP64RMMode.FFIRST.value)
log(" inv", ff_inv)
+ log(" RC1", RC1)
+ log(" vli", vli)
log(" cr_bit", cr_bit)
ffirst_hit = False
if rc_en and rm_mode == SVP64RMMode.FFIRST.value:
log("cr test", regnum, int(crtest), crtest, cr_bit, ff_inv)
log("cr test?", ffirst_hit)
if ffirst_hit:
- self.svstate.vl = srcstep
+ vli = SelectableInt(int(vli), 7)
+ self.svstate.vl = srcstep + vli
yield self.dec2.state.svstate.eq(self.svstate.value)
yield Settle() # let decoder update
self.assertEqual(sim.svstate.srcstep, 0)
self.assertEqual(sim.svstate.dststep, 0)
- def tst_sv_addi_ffirst_vli(self):
- lst = SVP64Asm([ "sv.subf/ff=~RC1/vli *0,8,*0"
+ def test_sv_addi_ffirst_vli(self):
+ lst = SVP64Asm([ "sv.subf/ff=RC1/vli *0,8,*0"
])
lst = list(lst)
comb += dec_bi.sel_in.eq(self.op_get("in2_sel"))
comb += self.do_copy("imm_data", dec_bi.imm_out) # imm in RB
- # rc and oe out
- comb += self.do_copy("rc", dec_rc.rc_out)
- if self.svp64_en:
- # OE only enabled when SVP64 not active
- with m.If(~self.is_svp64_mode):
- comb += self.do_copy("oe", dec_oe.oe_out)
- else:
- comb += self.do_copy("oe", dec_oe.oe_out)
-
# CR in/out - note: these MUST match with what happens in
# DecodeCROut!
rc_out = self.dec_rc.rc_out.data
)):
comb += self.implicit_rs.eq(1)
+ # rc and oe out
+ comb += self.do_copy("rc", dec_rc.rc_out)
+ if self.svp64_en:
+ # OE only enabled when SVP64 not active
+ with m.If(~self.is_svp64_mode):
+ comb += self.do_copy("oe", dec_oe.oe_out)
+ # RC1 overrides Rc if rc type is NONE or ONE or Rc=0, in svp64_mode
+ # for instructions with a forced-Rc=1 (stbcx., pcdec.)
+ # the RC1 RM bit *becomes* Rc=0/1, but for instructions
+ # that have Rc=0/1 then when Rc=0 RC1 *becomes* (replaces) Rc.
+ with m.Elif((dec_rc.sel_in.matches(RCOE.RC, RCOE.RC_ONLY) &
+ dec_rc.rc_out.data == 0) |
+ (dec_rc.sel_in == RCOE.ONE)):
+ RC1 = Data(1, "RC1")
+ comb += RC1.ok.eq(rm_dec.RC1)
+ comb += RC1.RC1.eq(rm_dec.RC1)
+ comb += self.do_copy("rc", RC1)
+ else:
+ comb += self.do_copy("oe", dec_oe.oe_out)
+
# decoded/selected instruction flags
comb += self.do_copy("data_len", self.op_get("ldst_len"))
comb += self.do_copy("invert_in", self.op_get("inv_a"))
self.subvl= Signal(2) # subvl
self.saturate = Signal(SVP64sat)
self.RC1 = Signal()
+ self.vli = Signal()
self.cr_sel = Signal(2) # bit of CR to test (index 0-3)
self.inv = Signal(1) # and whether it's inverted (like branch BO)
self.map_evm = Signal(1)
# extract failfirst
with m.If(self.mode == SVP64RMMode.FFIRST): # fail-first
+ comb += self.inv.eq(mode[SVP64MODE.INV])
with m.If(self.rc_in):
- comb += self.inv.eq(mode[SVP64MODE.INV])
comb += self.cr_sel.eq(cr)
+ with m.Else():
+ # only when Rc=0
+ comb += self.RC1.eq(mode[SVP64MODE.RC1])
+ comb += self.vli.eq(mode[SVP64MODE.VLI])
+ comb += self.cr_sel.eq(0b10) # EQ bit index is implicit
# extract saturate
with m.Switch(mode2):