dec2.dec.RT)
if out_sel == OutSel.RT.value:
return out, o_isvec
+ elif name == 'RT_OR_ZERO':
+ log ("get_pdecode_idx_out", out_sel, OutSel.RT.value,
+ OutSel.RT_OR_ZERO.value, out, o_isvec,
+ dec2.dec.RT)
+ if out_sel == OutSel.RT_OR_ZERO.value:
+ return out, o_isvec
elif name == 'FRA':
log ("get_pdecode_idx_out", out_sel, OutSel.FRA.value, out, o_isvec)
if out_sel == OutSel.FRA.value:
SO = self.spr['XER'][XER_bits['SO']]
log("handle_comparison SO", SO)
cr_field = selectconcat(negative, positive, zero, SO)
+ log("handle_comparison cr_field", self.cr, cr_idx, cr_field)
self.crl[cr_idx].eq(cr_field)
def set_pc(self, pc_val):
yield self.dec2.dec.raw_opcode_in.eq(ins & 0xffffffff) # v3.0B suffix
yield self.dec2.sv_rm.eq(sv_rm) # svp64 prefix
yield Settle()
+ # store this for use in get_src_dststeps()
+ self.ldstmode = yield self.dec2.rm_dec.ldstmode
def execute_one(self):
"""execute one instruction
asmop = 'mtcrf'
return asmop
+ def get_remap_indices(self):
+ """WARNING, this function stores remap_idxs and remap_loopends
+ in the class for later use. this to avoid problems with yield
+ """
+ # go through all iterators in lock-step, advance to next remap_idx
+ srcstep, dststep = self.get_src_dststeps()
+ # get four SVSHAPEs. here we are hard-coding
+ SVSHAPE0 = self.spr['SVSHAPE0']
+ SVSHAPE1 = self.spr['SVSHAPE1']
+ SVSHAPE2 = self.spr['SVSHAPE2']
+ SVSHAPE3 = self.spr['SVSHAPE3']
+ # set up the iterators
+ remaps = [(SVSHAPE0, SVSHAPE0.get_iterator()),
+ (SVSHAPE1, SVSHAPE1.get_iterator()),
+ (SVSHAPE2, SVSHAPE2.get_iterator()),
+ (SVSHAPE3, SVSHAPE3.get_iterator()),
+ ]
+
+ self.remap_loopends = [0] * 4
+ self.remap_idxs = [0, 1, 2, 3]
+ dbg = []
+ for i, (shape, remap) in enumerate(remaps):
+ # zero is "disabled"
+ if shape.value == 0x0:
+ self.remap_idxs[i] = 0
+ # pick src or dststep depending on reg num (0-2=in, 3-4=out)
+ step = dststep if (i in [3, 4]) else srcstep
+ # this is terrible. O(N^2) looking for the match. but hey.
+ for idx, (remap_idx, loopends) in enumerate(remap):
+ if idx == step:
+ break
+ self.remap_idxs[i] = remap_idx
+ self.remap_loopends[i] = loopends
+ dbg.append((i, step, remap_idx, loopends))
+ for (i, step, remap_idx, loopends) in dbg:
+ log ("SVSHAPE %d idx, end" % i, step, remap_idx, bin(loopends))
+ return remaps
+
def get_spr_msb(self):
dec_insn = yield self.dec2.e.do.insn
return dec_insn & (1 << 20) != 0 # sigh - XFF.spr[-1]?
self.last_st_addr = None # reset the last known store address
self.last_ld_addr = None # etc.
- name = name.strip() # remove spaces if not already done so
+ ins_name = name.strip() # remove spaces if not already done so
if self.halted:
- log("halted - not executing", name)
+ log("halted - not executing", ins_name)
return
# TODO, asmregs is from the spec, e.g. add RT,RA,RB
# see http://bugs.libre-riscv.org/show_bug.cgi?id=282
asmop = yield from self.get_assembly_name()
- log("call", name, asmop)
+ log("call", ins_name, asmop)
# check privileged
int_op = yield self.dec2.dec.op.internal_op
return
# check halted condition
- if name == 'attn':
+ if ins_name == 'attn':
self.halted = True
return
# check illegal instruction
illegal = False
- if name not in ['mtcrf', 'mtocrf']:
- illegal = name != asmop
+ if ins_name not in ['mtcrf', 'mtocrf']:
+ illegal = ins_name != asmop
# sigh deal with setvl not being supported by binutils (.long)
if asmop.startswith('setvl'):
illegal = False
- name = 'setvl'
+ ins_name = 'setvl'
+
+ # and svstep not being supported by binutils (.long)
+ if asmop.startswith('svstep'):
+ illegal = False
+ ins_name = 'svstep'
# and svremap not being supported by binutils (.long)
if asmop.startswith('svremap'):
illegal = False
- name = 'svremap'
+ ins_name = 'svremap'
# and svshape not being supported by binutils (.long)
if asmop.startswith('svshape'):
illegal = False
- name = 'svshape'
+ ins_name = 'svshape'
+
+ # and fsin and fcos
+ if asmop == 'fsins':
+ illegal = False
+ ins_name = 'fsins'
+ if asmop == 'fcoss':
+ illegal = False
+ ins_name = 'fcoss'
# sigh also deal with ffmadds not being supported by binutils (.long)
if asmop == 'ffmadds':
illegal = False
- name = 'ffmadds'
+ ins_name = 'ffmadds'
+
+ # and fdmadds not being supported by binutils (.long)
+ if asmop == 'fdmadds':
+ illegal = False
+ ins_name = 'fdmadds'
# and ffadds not being supported by binutils (.long)
if asmop == 'ffadds':
illegal = False
- name = 'ffadds'
+ ins_name = 'ffadds'
if illegal:
- print("illegal", name, asmop)
+ print("illegal", ins_name, asmop)
self.call_trap(0x700, PIb.ILLEG)
print("name %s != %s - calling ILLEGAL trap, PC: %x" %
- (name, asmop, self.pc.CIA.value))
+ (ins_name, asmop, self.pc.CIA.value))
return
# this is for setvl "Vertical" mode: if set true,
- # srcstep/dststep is explicitly advanced
+ # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
+ # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
self.allow_next_step_inc = False
+ self.svstate_next_mode = 0
# nop has to be supported, we could let the actual op calculate
# but PowerDecoder has a pattern for nop
- if name is 'nop':
+ if ins_name is 'nop':
self.update_pc_next()
return
- info = self.instrs[name]
+ info = self.instrs[ins_name]
yield from self.prep_namespace(info.form, info.op_fields)
# preserve order of register names
log("input names", input_names)
# get SVP64 entry for the current instruction
- sv_rm = self.svp64rm.instrs.get(name)
+ sv_rm = self.svp64rm.instrs.get(ins_name)
if sv_rm is not None:
dest_cr, src_cr, src_byname, dest_byname = decode_extra(sv_rm)
else:
log ("sv rm", sv_rm, dest_cr, src_cr, src_byname, dest_byname)
# see if srcstep/dststep need skipping over masked-out predicate bits
- if self.is_svp64_mode:
+ if (self.is_svp64_mode or ins_name == 'setvl' or
+ ins_name.startswith("sv")):
yield from self.svstate_pre_inc()
+ if self.is_svp64_mode:
pre = yield from self.update_new_svstate_steps()
if pre:
self.svp64_reset_loop()
self.update_nia()
self.update_pc_next()
return
- srcstep, dststep = self.new_srcstep, self.new_dststep
+ srcstep, dststep = self.get_src_dststeps()
pred_dst_zero = self.pred_dst_zero
pred_src_zero = self.pred_src_zero
vl = self.svstate.vl
active = (persist or self.last_op_svshape) and remap_en != 0
yield self.dec2.remap_active.eq(remap_en if active else 0)
yield Settle()
+ if persist or self.last_op_svshape:
+ remaps = self.get_remap_indices()
if self.is_svp64_mode and (persist or self.last_op_svshape):
- # get four SVSHAPEs. here we are hard-coding
- SVSHAPE0 = self.spr['SVSHAPE0']
- SVSHAPE1 = self.spr['SVSHAPE1']
- SVSHAPE2 = self.spr['SVSHAPE2']
- SVSHAPE3 = self.spr['SVSHAPE3']
# just some convenient debug info
for i in range(4):
sname = 'SVSHAPE%d' % i
(self.dec2.o_step, mo0), # RT
(self.dec2.o2_step, mo1), # EA
]
- # set up the iterators
- remaps = [(SVSHAPE0, SVSHAPE0.get_iterator()),
- (SVSHAPE1, SVSHAPE1.get_iterator()),
- (SVSHAPE2, SVSHAPE2.get_iterator()),
- (SVSHAPE3, SVSHAPE3.get_iterator()),
- ]
- # go through all iterators in lock-step, advance to next remap_idx
- remap_idxs = []
- for i, (shape, remap) in enumerate(remaps):
- # zero is "disabled"
- if shape.value == 0x0:
- remap_idxs.append(0)
- # pick src or dststep depending on reg num (0-2=in, 3-4=out)
- step = dststep if (i in [3, 4]) else srcstep
- # this is terrible. O(N^2) looking for the match. but hey.
- for idx, remap_idx in enumerate(remap):
- if idx == step:
- break
- remap_idxs.append(remap_idx)
-
+ remap_idxs = self.remap_idxs
rremaps = []
# now cross-index the required SHAPE for each of 3-in 2-out regs
rnames = ['RA', 'RB', 'RC', 'RT', 'EA']
yield dstep.eq(remap_idx)
# debug printout info
- rremaps.append((shape.mode, i, rnames[i], step, shape_idx,
+ rremaps.append((shape.mode, i, rnames[i], shape_idx,
remap_idx))
for x in rremaps:
log ("shape remap", x)
reg_val = 0
inputs.append(reg_val)
# arrrrgh, awful hack, to get _RT into namespace
- if asmop == 'setvl':
+ if ins_name in ['setvl', 'svstep']:
regname = "_RT"
RT = yield self.dec2.dec.RT
self.namespace[regname] = SelectableInt(RT, 5)
+ if RT == 0:
+ self.namespace["RT"] = SelectableInt(0, 5)
+ regnum, is_vec = yield from get_pdecode_idx_out(self.dec2, "RT")
+ log('hack input reg %s %s' % (name, str(regnum)), is_vec)
# in SVP64 mode for LD/ST work out immediate
# XXX TODO: replace_ds for DS-Form rather than D-Form.
log ("bitrev SVD", imm)
replace_d = True
else:
- imm = yield self.dec2.dec.fields.FormD.D[0:16]
+ if info.form == 'DS':
+ # DS-Form, multiply by 4 then knock 2 bits off after
+ imm = yield self.dec2.dec.fields.FormDS.DS[0:14] * 4
+ else:
+ imm = yield self.dec2.dec.fields.FormD.D[0:16]
imm = exts(imm, 16) # sign-extend to integer
# get the right step. LD is from srcstep, ST is dststep
op = yield self.dec2.e.do.insn_type
offsmul = 0
if op == MicrOp.OP_LOAD.value:
- offsmul = srcstep
- log("D-field src", imm, offsmul)
+ if remap_active:
+ offsmul = yield self.dec2.in1_step
+ log("D-field REMAP src", imm, offsmul)
+ else:
+ offsmul = srcstep
+ log("D-field src", imm, offsmul)
elif op == MicrOp.OP_STORE.value:
+ # XXX NOTE! no bit-reversed STORE! this should not ever be used
offsmul = dststep
log("D-field dst", imm, offsmul)
- # bit-reverse mode
+ # bit-reverse mode, rev already done through get_src_dst_steps()
if ldstmode == SVP64LDSTmode.BITREVERSE.value:
# manually look up RC, sigh
RC = yield self.dec2.dec.RC[0:5]
RC = self.gpr(RC)
- log ("RC", RC.value, "imm", imm, "offs", bin(offsmul),
- "rev", bin(bitrev(offsmul, vl)))
- imm = SelectableInt((imm * bitrev(offsmul, vl)) << RC.value, 32)
+ log ("LD-BITREVERSE:", "VL", vl,
+ "RC", RC.value, "imm", imm,
+ "offs", bin(offsmul),
+ )
+ imm = SelectableInt((imm * offsmul) << RC.value, 32)
# Unit-Strided LD/ST adds offset*width to immediate
elif ldstmode == SVP64LDSTmode.UNITSTRIDE.value:
ldst_len = yield self.dec2.e.do.data_len
elif ldstmode == SVP64LDSTmode.ELSTRIDE.value:
imm = SelectableInt(imm * offsmul, 32)
replace_d = True
- ldst_ra_vec = yield self.dec2.rm_dec.ldst_ra_vec
- ldst_imz_in = yield self.dec2.rm_dec.ldst_imz_in
- log("LDSTmode", ldstmode, SVP64LDSTmode.BITREVERSE.value,
- offsmul, imm, ldst_ra_vec, ldst_imz_in)
- # new replacement D
+ if replace_d:
+ ldst_ra_vec = yield self.dec2.rm_dec.ldst_ra_vec
+ ldst_imz_in = yield self.dec2.rm_dec.ldst_imz_in
+ log("LDSTmode", SVP64LDSTmode(ldstmode),
+ offsmul, imm, ldst_ra_vec, ldst_imz_in)
+ # new replacement D... errr.. DS
if replace_d:
- self.namespace['D'] = imm
+ if info.form == 'DS':
+ # TODO: assert 2 LSBs are zero?
+ log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm.value))
+ imm.value = imm.value >> 2
+ self.namespace['DS'] = imm
+ else:
+ self.namespace['D'] = imm
# "special" registers
for special in info.special_regs:
if not self.is_svp64_mode or not pred_dst_zero:
if hasattr(self.dec2.e.do, "rc"):
rc_en = yield self.dec2.e.do.rc.rc
- if rc_en:
+ if rc_en and ins_name not in ['svstep']:
regnum, is_vec = yield from get_pdecode_cr_out(self.dec2, "CR0")
self.handle_comparison(results, regnum)
pre = False
post = False
if self.allow_next_step_inc:
- log("SVSTATE_NEXT: inc requested")
+ log("SVSTATE_NEXT: inc requested, mode",
+ self.svstate_next_mode, self.allow_next_step_inc)
yield from self.svstate_pre_inc()
pre = yield from self.update_new_svstate_steps()
if pre:
results = [SelectableInt(0, 64)]
self.handle_comparison(results) # CR0
else:
- log ("SVSTATE_NEXT: post-inc")
+ if self.allow_next_step_inc == 2:
+ log ("SVSTATE_NEXT: read")
+ yield from self.svstate_post_inc()
+ else:
+ log ("SVSTATE_NEXT: post-inc")
+ # use actual src/dst-step here to check end, do NOT
+ # use bit-reversed version
srcstep, dststep = self.new_srcstep, self.new_dststep
+ remaps = self.get_remap_indices()
+ remap_idxs = self.remap_idxs
vl = self.svstate.vl
end_src = srcstep == vl-1
end_dst = dststep == vl-1
- if not end_src:
- self.svstate.srcstep += SelectableInt(1, 7)
- if not end_dst:
- self.svstate.dststep += SelectableInt(1, 7)
+ if self.allow_next_step_inc != 2:
+ if not end_src:
+ self.svstate.srcstep += SelectableInt(1, 7)
+ if not end_dst:
+ self.svstate.dststep += SelectableInt(1, 7)
self.namespace['SVSTATE'] = self.svstate.spr
# set CR0 (if Rc=1) based on end
if rc_en:
srcstep = self.svstate.srcstep
dststep = self.svstate.srcstep
- endtest = 0 if (end_src or end_dst) else 1
- results = [SelectableInt(endtest, 64)]
- self.handle_comparison(results) # CR0
+ endtest = 1 if (end_src or end_dst) else 0
+ #results = [SelectableInt(endtest, 64)]
+ #self.handle_comparison(results) # CR0
+
+ # see if svstep was requested, if so, which SVSTATE
+ endings = 0b111
+ if self.svstate_next_mode > 0:
+ shape_idx = self.svstate_next_mode.value-1
+ endings = self.remap_loopends[shape_idx]
+ cr_field = SelectableInt((~endings)<<1 | endtest, 4)
+ print ("svstep Rc=1, CR0", cr_field)
+ self.crl[0].eq(cr_field) # CR0
if end_src or end_dst:
# reset at end of loop including exit Vertical Mode
log ("SVSTATE_NEXT: after increments, reset")
self.update_pc_next()
- def SVSTATE_NEXT(self):
+ def SVSTATE_NEXT(self, mode, submode):
"""explicitly moves srcstep/dststep on to next element, for
"Vertical-First" mode. this function is called from
setvl pseudo-code, as a pseudo-op "svstep"
+
+ WARNING: this function uses information that was created EARLIER
+ due to it being in the middle of a yield, but this function is
+ *NOT* called from yield (it's called from compiled pseudocode).
"""
- log("SVSTATE_NEXT")
- self.allow_next_step_inc = True
+ self.allow_next_step_inc = submode.value + 1
+ log("SVSTATE_NEXT mode", mode, submode, self.allow_next_step_inc)
+ self.svstate_next_mode = mode
+ if self.svstate_next_mode > 0:
+ shape_idx = self.svstate_next_mode.value-1
+ return SelectableInt(self.remap_idxs[shape_idx], 7)
+ return SelectableInt(0, 7)
def svstate_pre_inc(self):
"""check if srcstep/dststep need to skip over masked-out predicate bits
sv_a_nz = yield self.dec2.sv_a_nz
fft_mode = yield self.dec2.use_svp64_fft
in1 = yield self.dec2.e.read_reg1.data
- log ("SVP64: VL, srcstep, dststep, sv_a_nz, in1 fft",
- vl, srcstep, dststep, sv_a_nz, in1, fft_mode)
+ log ("SVP64: VL, srcstep, dststep, sv_a_nz, in1 fft, svp64",
+ vl, srcstep, dststep, sv_a_nz, in1, fft_mode,
+ self.is_svp64_mode)
# get predicate mask
srcmask = dstmask = 0xffff_ffff_ffff_ffff
log (" new srcstep", srcstep)
log (" new dststep", dststep)
+ def get_src_dststeps(self):
+ """gets srcstep and dststep but performs bit-reversal on srcstep if
+ required. use this ONLY to perform calculations, do NOT update
+ SVSTATE with the bit-reversed value of srcstep
+
+ ARGH, had to store self.ldstmode and VL due to yield issues
+ """
+ srcstep, dststep = self.new_srcstep, self.new_dststep
+ if self.is_svp64_mode:
+ if self.ldstmode == SVP64LDSTmode.BITREVERSE.value:
+ vl = self.svstate.vl
+ log ("SRCSTEP-BITREVERSE:", "VL", vl, "srcstep", srcstep,
+ "rev", bin(bitrev(srcstep, vl)))
+ srcstep = bitrev(srcstep, vl)
+
+ return (srcstep, dststep)
+
def update_new_svstate_steps(self):
+ # note, do not get the bit-reversed srcstep here!
srcstep, dststep = self.new_srcstep, self.new_dststep
# update SVSTATE with new srcstep