"""
import re
-from nmigen.back.pysim import Settle
+from nmigen.sim import Settle, Delay
from functools import wraps
from copy import copy, deepcopy
from openpower.decoder.orderedset import OrderedSet
from openpower.decoder.selectable_int import (
- SelectableIntMapping,
FieldSelectableInt,
SelectableInt,
selectconcat,
)
+from openpower.decoder.power_insn import SVP64Instruction
from openpower.decoder.power_enums import (spr_dict, spr_byname, XER_bits,
insns, MicrOp,
In1Sel, In2Sel, In3Sel,
OutSel, CRInSel, CROutSel, LDSTMode,
SVP64RMMode, SVP64PredMode,
SVP64PredInt, SVP64PredCR,
- SVP64LDSTmode)
+ SVP64LDSTmode, FPTRANS_INSNS)
from openpower.decoder.power_enums import SVPtype
from openpower.decoder.isa.svstate import SVP64State
-from openpower.util import log
+from openpower.util import LogKind, log
from collections import namedtuple
import math
"CA32": 0,
"overflow": 7, # should definitely be last
+ "CR0": 8, # likewise
}
fregs = ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
namespace['NIA'] = self.NIA
-# SVP64 ReMap field
-class SVP64RMFields(SelectableIntMapping):
- def __init__(self, value=0):
- self.spr = SelectableInt(value=value, bits=24)
- return super().__init__(si=self.spr, fields={
- "spr": range(24),
- # SVP64 RM fields: see https://libre-soc.org/openpower/sv/svp64/
- "mmode": (0,),
- "mask": range(1, 4),
- "elwidth": range(4, 6),
- "ewsrc": range(6, 8),
- "subvl": range(8, 10),
- "extra": range(10, 19),
- "mode": range(19, 24),
- # these cover the same extra field, split into parts as EXTRA2
- "extra2": dict(enumerate([
- range(10, 12),
- range(12, 14),
- range(14, 16),
- range(16, 18),
- ])),
- "smask": range(16, 19),
- # and here as well, but EXTRA3
- "extra3": dict(enumerate([
- range(10, 13),
- range(13, 16),
- range(16, 19),
- ])),
- })
-
-
-SVP64RM_MMODE_SIZE = len(SVP64RMFields().mmode.br)
-SVP64RM_MASK_SIZE = len(SVP64RMFields().mask.br)
-SVP64RM_ELWIDTH_SIZE = len(SVP64RMFields().elwidth.br)
-SVP64RM_EWSRC_SIZE = len(SVP64RMFields().ewsrc.br)
-SVP64RM_SUBVL_SIZE = len(SVP64RMFields().subvl.br)
-SVP64RM_EXTRA2_SPEC_SIZE = len(SVP64RMFields().extra2[0].br)
-SVP64RM_EXTRA3_SPEC_SIZE = len(SVP64RMFields().extra3[0].br)
-SVP64RM_SMASK_SIZE = len(SVP64RMFields().smask.br)
-SVP64RM_MODE_SIZE = len(SVP64RMFields().mode.br)
-
-
-# SVP64 Prefix fields: see https://libre-soc.org/openpower/sv/svp64/
-class SVP64PrefixFields(SelectableIntMapping):
- def __init__(self, value=0):
- self.insn = SelectableInt(value, 32)
- return super().__init__(si=self.insn, fields={
- "insn": range(32),
- # 6 bit major opcode EXT001, 2 bits "identifying" (7, 9), 24 SV ReMap
- "major": range(0, 6),
- "pid": (7, 9),
- # SVP64 24-bit RM (ReMap)
- "rm": ((6, 8) + tuple(range(10, 32))),
- })
-
-
-SV64P_MAJOR_SIZE = len(SVP64PrefixFields().major.br)
-SV64P_PID_SIZE = len(SVP64PrefixFields().pid.br)
-SV64P_RM_SIZE = len(SVP64PrefixFields().rm.br)
-
-
# CR register fields
# See PowerISA Version 3.0 B Book 1
# Section 2.3.1 Condition Register pages 30 - 31
r30 = gpr(30)
log("get_predint", mask, SVP64PredInt.ALWAYS.value)
if mask == SVP64PredInt.ALWAYS.value:
- return 0xffff_ffff_ffff_ffff # 64 bits of 1
+ return 0xffff_ffff_ffff_ffff # 64 bits of 1
if mask == SVP64PredInt.R3_UNARY.value:
return 1 << (gpr(3).value & 0b111111)
if mask == SVP64PredInt.R3.value:
log("get_pdecode_idx_in FRC in3", name, in3_sel, In3Sel.FRC.value,
in3, in3_isvec)
# identify which regnames map to in1/2/3
- if name == 'RA':
+ if name == 'RA' or name == 'RA_OR_ZERO':
if (in1_sel == In1Sel.RA.value or
(in1_sel == In1Sel.RA_OR_ZERO.value and in1 != 0)):
return in1, in1_isvec
return in3, in3_isvec
# XXX TODO, RC doesn't exist yet!
elif name == 'RC':
+ if in3_sel == In3Sel.RC.value:
+ return in3, in3_isvec
assert False, "RC does not exist yet"
elif name == 'RS':
if in1_sel == In1Sel.RS.value:
if name == 'CR0':
if out_sel == CROutSel.CR0.value:
return out, o_isvec
+ if name == 'CR1': # these are not actually calculated correctly
+ if out_sel == CROutSel.CR1.value:
+ return out, o_isvec
log("get_pdecode_cr_out not found", name)
return None, False
dec2.dec.RT)
if out_sel == OutSel.RT.value:
return out, o_isvec
+ if out_sel == OutSel.RT_OR_ZERO.value and out != 0:
+ return out, o_isvec
elif name == 'RT_OR_ZERO':
log("get_pdecode_idx_out", out_sel, OutSel.RT.value,
OutSel.RT_OR_ZERO.value, out, o_isvec,
out, o_isvec)
if upd == LDSTMode.update.value:
return out, o_isvec
+ if name == 'RS':
+ fft_en = yield dec2.implicit_rs
+ if fft_en:
+ log("get_pdecode_idx_out2", out_sel, OutSel.RS.value,
+ out, o_isvec)
+ return out, o_isvec
if name == 'FRS':
- int_op = yield dec2.dec.op.internal_op
- fft_en = yield dec2.use_svp64_fft
- # if int_op == MicrOp.OP_FP_MADD.value and fft_en:
+ fft_en = yield dec2.implicit_rs
if fft_en:
log("get_pdecode_idx_out2", out_sel, OutSel.FRS.value,
out, o_isvec)
return None, False
-class ISACaller(ISACallerHelper, ISAFPHelpers):
+class StepLoop:
+ """deals with svstate looping.
+ """
+
+ def __init__(self, svstate):
+ self.svstate = svstate
+
+ def get_iterators(self):
+ self.src_it = self.src_iterator()
+ self.dst_it = self.dst_iterator()
+
+ def src_iterator(self):
+ """source-stepping iterator
+ """
+ pack = self.svstate.pack
+
+ # source step
+ if pack:
+ # pack advances subvl in *outer* loop
+ if end_src:
+ if not end_ssub:
+ self.svstate.ssubstep += SelectableInt(1, 2)
+ self.svstate.srcstep = SelectableInt(0, 7) # reset
+ else:
+ self.svstate.srcstep += SelectableInt(1, 7) # advance srcstep
+ else:
+ # these cannot be done as for-loops because SVSTATE may change
+ # (srcstep/substep may be modified, interrupted, subvl/vl change)
+ # but they *can* be done as while-loops as long as every SVSTATE
+ # "thing" is re-read every single time a yield gives indices
+ while True: # outer vl loop
+ while True: # inner subvl loop
+ subvl = self.subvl
+ srcmask = self.srcmask
+ srcstep = self.svstate.srcstep
+ if self.pred_sz or ((1 << srcstep) & srcmask) != 0:
+ log(" advance src", srcstep, self.svstate.vl,
+ self.svstate.ssubstep, subvl)
+ # yield actual substep/srcstep
+ yield (self.svstate.ssubstep, srcstep)
+ if self.svstate.ssubstep == subvl: # end-point
+ self.svstate.ssubstep = SelectableInt(0, 2) # reset
+ break
+ self.svstate.ssubstep += SelectableInt(1, 2)
+ vl = self.svstate.vl
+ if srcstep == vl-1: # end-point
+ self.svstate.srcstep = SelectableInt(0, 7) # reset
+ break # trigger StopIteration
+ self.svstate.srcstep += SelectableInt(1, 7) # advance srcstep
+
+ def dst_iterator(self):
+ """dest-stepping iterator
+ """
+ unpack = self.svstate.unpack
+
+ # dest step
+ if unpack:
+ # pack advances subvl in *outer* loop
+ pass # TODO
+ else:
+ # these cannot be done as for-loops because SVSTATE may change
+ # (dststep/substep may be modified, interrupted, subvl/vl change)
+ # but they *can* be done as while-loops as long as every SVSTATE
+ # "thing" is re-read every single time a yield gives indices
+ while True: # outer vl loop
+ while True: # inner subvl loop
+ subvl = self.subvl
+ dstmask = self.dstmask
+ dststep = self.svstate.dststep
+ if self.pred_dz or ((1 << dststep) & dstmask) != 0:
+ log(" advance dst", dststep, self.svstate.vl,
+ self.svstate.dsubstep, subvl)
+ # yield actual substep/dststep
+ yield (self.svstate.dsubstep, dststep)
+ if self.svstate.dsubstep == subvl: # end-point
+ self.svstate.dsubstep = SelectableInt(0, 2) # reset
+ break
+ self.svstate.dsubstep += SelectableInt(1, 2)
+ vl = self.svstate.vl
+ if dststep == vl-1: # end-point
+ self.svstate.dststep = SelectableInt(0, 7) # reset
+ break # trigger StopIteration
+ self.svstate.dststep += SelectableInt(1, 7) # advance dststep
+
+ def src_iterate(self):
+ """source-stepping iterator
+ """
+ end_src = self.end_src
+ subvl = self.subvl
+ pack = self.svstate.pack
+ unpack = self.svstate.unpack
+ ssubstep = self.svstate.ssubstep
+ end_ssub = ssubstep == subvl
+ log(" pack/unpack/subvl", pack, unpack, subvl,
+ "end", end_src,
+ "sub", end_ssub)
+ # first source step
+ srcstep = self.svstate.srcstep
+ if pack:
+ # pack advances subvl in *outer* loop
+ if end_src:
+ if not end_ssub:
+ self.svstate.ssubstep += SelectableInt(1, 2)
+ self.svstate.srcstep = SelectableInt(0, 7) # reset
+ else:
+ self.svstate.srcstep += SelectableInt(1, 7) # advance srcstep
+ else:
+ # advance subvl in *inner* loop
+ if end_ssub:
+ if not end_src:
+ self.svstate.srcstep += SelectableInt(1, 7)
+ self.svstate.ssubstep = SelectableInt(0, 2) # reset
+ else:
+ # advance ssubstep
+ self.svstate.ssubstep += SelectableInt(1, 2)
+
+ log(" advance src", self.svstate.srcstep, self.svstate.ssubstep)
+
+ def dst_iterate(self):
+ """dest step iterator
+ """
+ end_dst = self.end_dst
+ subvl = self.subvl
+ pack = self.svstate.pack
+ unpack = self.svstate.unpack
+ dsubstep = self.svstate.dsubstep
+ end_dsub = dsubstep == subvl
+ log(" pack/unpack/subvl", pack, unpack, subvl,
+ "end", end_dst,
+ "sub", end_dsub)
+ # now dest step
+ if unpack:
+ # unpack advances subvl in *outer* loop
+ if end_dst:
+ if not end_dsub:
+ self.svstate.dsubstep += SelectableInt(1, 2)
+ self.svstate.dststep = SelectableInt(0, 7) # reset
+ else:
+ self.svstate.dststep += SelectableInt(1, 7) # advance dststep
+ else:
+ # advance subvl in *inner* loop
+ if end_dsub:
+ if not end_dst:
+ self.svstate.dststep += SelectableInt(1, 7)
+ self.svstate.dsubstep = SelectableInt(0, 2) # reset
+ else:
+ # advance ssubstep
+ self.svstate.dsubstep += SelectableInt(1, 2)
+ log(" advance dst", self.svstate.dststep, self.svstate.dsubstep)
+
+ def advance_svstate_steps(self, end_src=False, end_dst=False):
+ """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
+ TODO when Pack/Unpack is set, substep becomes the *outer* loop
+ """
+ self.subvl = yield self.dec2.rm_dec.rm_in.subvl
+ self.end_src = end_src
+ self.end_dst = end_dst
+ self.src_iterate()
+ self.dst_iterate()
+
+ def read_src_mask(self):
+ """read/update pred_sz and src mask
+ """
+ # get SVSTATE VL (oh and print out some debug stuff)
+ vl = self.svstate.vl
+ srcstep = self.svstate.srcstep
+ ssubstep = self.svstate.ssubstep
+
+ # get predicate mask (all 64 bits)
+ srcmask = 0xffff_ffff_ffff_ffff
+
+ pmode = yield self.dec2.rm_dec.predmode
+ sv_ptype = yield self.dec2.dec.op.SV_Ptype
+ srcpred = yield self.dec2.rm_dec.srcpred
+ dstpred = yield self.dec2.rm_dec.dstpred
+ pred_sz = yield self.dec2.rm_dec.pred_sz
+ if pmode == SVP64PredMode.INT.value:
+ srcmask = dstmask = get_predint(self.gpr, dstpred)
+ if sv_ptype == SVPtype.P2.value:
+ srcmask = get_predint(self.gpr, srcpred)
+ elif pmode == SVP64PredMode.CR.value:
+ srcmask = dstmask = get_predcr(self.crl, dstpred, vl)
+ if sv_ptype == SVPtype.P2.value:
+ srcmask = get_predcr(self.crl, srcpred, vl)
+ # work out if the ssubsteps are completed
+ ssubstart = ssubstep == 0
+ log(" pmode", pmode)
+ log(" ptype", sv_ptype)
+ log(" srcpred", bin(srcpred))
+ log(" srcmask", bin(srcmask))
+ log(" pred_sz", bin(pred_sz))
+ log(" ssubstart", ssubstart)
+
+ # store all that above
+ self.srcstep_skip = False
+ self.srcmask = srcmask
+ self.pred_sz = pred_sz
+ self.new_ssubstep = ssubstep
+ log(" new ssubstep", ssubstep)
+ if ssubstart:
+ # until the predicate mask has a "1" bit... or we run out of VL
+ # let srcstep==VL be the indicator to move to next instruction
+ if not pred_sz:
+ self.srcstep_skip = True
+
+ def read_dst_mask(self):
+ """same as read_src_mask - check and record everything needed
+ """
+ # get SVSTATE VL (oh and print out some debug stuff)
+ # yield Delay(1e-10) # make changes visible
+ vl = self.svstate.vl
+ dststep = self.svstate.dststep
+ dsubstep = self.svstate.dsubstep
+
+ # get predicate mask (all 64 bits)
+ dstmask = 0xffff_ffff_ffff_ffff
+
+ pmode = yield self.dec2.rm_dec.predmode
+ reverse_gear = yield self.dec2.rm_dec.reverse_gear
+ sv_ptype = yield self.dec2.dec.op.SV_Ptype
+ dstpred = yield self.dec2.rm_dec.dstpred
+ pred_dz = yield self.dec2.rm_dec.pred_dz
+ if pmode == SVP64PredMode.INT.value:
+ dstmask = get_predint(self.gpr, dstpred)
+ elif pmode == SVP64PredMode.CR.value:
+ dstmask = get_predcr(self.crl, dstpred, vl)
+ # work out if the ssubsteps are completed
+ dsubstart = dsubstep == 0
+ log(" pmode", pmode)
+ log(" ptype", sv_ptype)
+ log(" dstpred", bin(dstpred))
+ log(" dstmask", bin(dstmask))
+ log(" pred_dz", bin(pred_dz))
+ log(" dsubstart", dsubstart)
+
+ self.dststep_skip = False
+ self.dstmask = dstmask
+ self.pred_dz = pred_dz
+ self.new_dsubstep = dsubstep
+ log(" new dsubstep", dsubstep)
+ if dsubstart:
+ if not pred_dz:
+ self.dststep_skip = True
+
+ def svstate_pre_inc(self):
+ """check if srcstep/dststep need to skip over masked-out predicate bits
+ note that this is not supposed to do anything to substep,
+ it is purely for skipping masked-out bits
+ """
+
+ yield from self.read_src_mask()
+ yield from self.read_dst_mask()
+
+ self.skip_src()
+ self.skip_dst()
+
+ def skip_src(self):
+
+ srcstep = self.svstate.srcstep
+ srcmask = self.srcmask
+ pred_src_zero = self.pred_sz
+ vl = self.svstate.vl
+ # srcstep-skipping opportunity identified
+ if self.srcstep_skip:
+ while (((1 << srcstep) & srcmask) == 0) and (srcstep != vl):
+ log(" sskip", bin(1 << srcstep))
+ srcstep += 1
+
+ # now work out if the relevant mask bits require zeroing
+ if pred_src_zero:
+ pred_src_zero = ((1 << srcstep) & srcmask) == 0
+
+ # store new srcstep / dststep
+ self.new_srcstep = srcstep
+ self.pred_src_zero = pred_src_zero
+ log(" new srcstep", srcstep)
+
+ def skip_dst(self):
+ # dststep-skipping opportunity identified
+ dststep = self.svstate.dststep
+ dstmask = self.dstmask
+ pred_dst_zero = self.pred_dz
+ vl = self.svstate.vl
+ if self.dststep_skip:
+ while (((1 << dststep) & dstmask) == 0) and (dststep != vl):
+ log(" dskip", bin(1 << dststep))
+ dststep += 1
+
+ # now work out if the relevant mask bits require zeroing
+ if pred_dst_zero:
+ pred_dst_zero = ((1 << dststep) & dstmask) == 0
+
+ # store new srcstep / dststep
+ self.new_dststep = dststep
+ self.pred_dst_zero = pred_dst_zero
+ log(" new dststep", dststep)
+
+
+class ISACaller(ISACallerHelper, ISAFPHelpers, StepLoop):
# decoder2 - an instance of power_decoder2
# regfile - a list of initial values for the registers
# initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
if isinstance(initial_svstate, int):
initial_svstate = SVP64State(initial_svstate)
# SVSTATE, MSR and PC
- self.svstate = initial_svstate
+ StepLoop.__init__(self, initial_svstate)
self.msr = SelectableInt(initial_msr, 64) # underlying reg
self.pc = PC()
# GPR FPR SPR registers
for i in range(4):
sname = 'SVSHAPE%d' % i
if sname not in self.spr:
- self.spr[sname] = SVSHAPE(0)
+ val = 0
else:
- # make sure it's an SVSHAPE
val = self.spr[sname].value
- self.spr[sname] = SVSHAPE(val)
+ # make sure it's an SVSHAPE
+ self.spr[sname] = SVSHAPE(val, self.gpr)
self.last_op_svshape = False
# "raw" memory
self.mem = Mem(row_bytes=8, initial_mem=initial_mem)
+ self.mem.log_fancy(kind=LogKind.InstrInOuts)
self.imem = Mem(row_bytes=4, initial_mem=initial_insns)
# MMU mode, redirect underlying Mem through RADIX
if mmu:
hence the default arguments. when calling from inside ISACaller
it is best to use call_trap()
"""
- log("TRAP:", hex(trap_addr), hex(self.namespace['MSR'].value))
+ # https://bugs.libre-soc.org/show_bug.cgi?id=859
+ kaivb = self.spr['KAIVB'].value
+ msr = self.namespace['MSR'].value
+ log("TRAP:", hex(trap_addr), hex(msr), "kaivb", hex(kaivb))
# store CIA(+4?) in SRR0, set NIA to 0x700
# store MSR in SRR1, set MSR to um errr something, have to check spec
# store SVSTATE (if enabled) in SVSRR0
self.spr['SRR0'].value = self.pc.CIA.value
- self.spr['SRR1'].value = self.namespace['MSR'].value
+ self.spr['SRR1'].value = msr
if self.is_svp64_mode:
self.spr['SVSRR0'] = self.namespace['SVSTATE'].value
- self.trap_nia = SelectableInt(trap_addr, 64)
+ self.trap_nia = SelectableInt(trap_addr | (kaivb & ~0x1fff), 64)
self.spr['SRR1'][trap_bit] = 1 # change *copy* of MSR in SRR1
# set exception bits. TODO: this should, based on the address
# then "yield" fields only from op_fields rather than hard-coded
# list, here.
fields = self.decoder.sigforms[formname]
- log("prep_namespace", formname, op_fields)
+ log("prep_namespace", formname, op_fields, insn_name)
for name in op_fields:
# CR immediates. deal with separately. needs modifying
# pseudocode
assert regnum <= 7, "sigh, TODO, 128 CR fields"
val = (val & 0b11) | (regnum << 2)
else:
- if name == 'spr':
- sig = getattr(fields, name.upper())
- else:
- sig = getattr(fields, name)
+ sig = getattr(fields, name)
val = yield sig
# these are all opcode fields involved in index-selection of CR,
# and need to do "standard" arithmetic. CR[BA+32] for example
ov32 = 1 if input32_sgn[0] == input32_sgn[1] and \
output32_sgn != input32_sgn[0] else 0
+ # now update XER OV/OV32/SO
+ so = self.spr['XER'][XER_bits['SO']]
+ new_so = so | ov # sticky overflow ORs in old with new
self.spr['XER'][XER_bits['OV']] = ov
self.spr['XER'][XER_bits['OV32']] = ov32
- so = self.spr['XER'][XER_bits['SO']]
- so = so | ov
- self.spr['XER'][XER_bits['SO']] = so
+ self.spr['XER'][XER_bits['SO']] = new_so
+ log(" set overflow", ov, ov32, so, new_so)
- def handle_comparison(self, outputs, cr_idx=0):
+ def handle_comparison(self, outputs, cr_idx=0, overflow=None, no_so=False):
out = outputs[0]
assert isinstance(out, SelectableInt), \
"out zero not a SelectableInt %s" % repr(outputs)
# print ("handle_comparison exts 32 bit", hex(o32))
out = exts(out.value, out.bits)
log("handle_comparison exts", hex(out))
+ # create the three main CR flags, EQ GT LT
zero = SelectableInt(out == 0, 1)
positive = SelectableInt(out > 0, 1)
negative = SelectableInt(out < 0, 1)
- SO = self.spr['XER'][XER_bits['SO']]
- log("handle_comparison SO", SO)
+ # get (or not) XER.SO. for setvl this is important *not* to read SO
+ if no_so:
+ SO = SelectableInt(1, 0)
+ else:
+ SO = self.spr['XER'][XER_bits['SO']]
+ log("handle_comparison SO overflow", SO, overflow)
+ # alternative overflow checking (setvl mainly at the moment)
+ if overflow is not None and overflow == 1:
+ SO = SelectableInt(1, 1)
+ # create the four CR field values and set the required CR field
cr_field = selectconcat(negative, positive, zero, SO)
log("handle_comparison cr_field", self.cr, cr_idx, cr_field)
self.crl[cr_idx].eq(cr_field)
# SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
yield Settle()
opcode = yield self.dec2.dec.opcode_in
- pfx = SVP64PrefixFields() # TODO should probably use SVP64PrefixDecoder
- pfx.insn.value = opcode
- major = pfx.major.asint(msb0=True) # MSB0 inversion
- log("prefix test: opcode:", major, bin(major),
- pfx.insn[7] == 0b1, pfx.insn[9] == 0b1)
- self.is_svp64_mode = ((major == 0b000001) and
- pfx.insn[7].value == 0b1 and
- pfx.insn[9].value == 0b1)
+ opcode = SelectableInt(value=opcode, bits=32)
+ pfx = SVP64Instruction.Prefix(opcode)
+ log("prefix test: opcode:", pfx.po, bin(pfx.po), pfx.id)
+ self.is_svp64_mode = bool((pfx.po == 0b000001) and (pfx.id == 0b11))
self.pc.update_nia(self.is_svp64_mode)
# set SVP64 decode
yield self.dec2.is_svp64_mode.eq(self.is_svp64_mode)
return
# in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
- log("svp64.rm", bin(pfx.rm.asint(msb0=True)))
+ log("svp64.rm", bin(pfx.rm))
log(" svstate.vl", self.svstate.vl)
log(" svstate.mvl", self.svstate.maxvl)
- sv_rm = pfx.rm.asint(msb0=True)
ins = self.imem.ld(pc+4, 4, False, True, instr_fetch=True)
log(" svsetup: 0x%x 0x%x %s" % (pc+4, ins & 0xffffffff, bin(ins)))
yield self.dec2.dec.raw_opcode_in.eq(ins & 0xffffffff) # v3.0B suffix
- yield self.dec2.sv_rm.eq(sv_rm) # svp64 prefix
+ yield self.dec2.sv_rm.eq(int(pfx.rm)) # svp64 prefix
yield Settle()
def execute_one(self):
in the class for later use. this to avoid problems with yield
"""
# go through all iterators in lock-step, advance to next remap_idx
- srcstep, dststep = self.get_src_dststeps()
+ srcstep, dststep, ssubstep, dsubstep = self.get_src_dststeps()
# get four SVSHAPEs. here we are hard-coding
SVSHAPE0 = self.spr['SVSHAPE0']
SVSHAPE1 = self.spr['SVSHAPE1']
asmop = yield from self.get_assembly_name()
log("call", ins_name, asmop)
+ # sv.setvl is *not* a loop-function. sigh
+ log("is_svp64_mode", self.is_svp64_mode, asmop)
+
# check privileged
int_op = yield self.dec2.dec.op.internal_op
spr_msb = yield from self.get_spr_msb()
if ins_name not in ['mtcrf', 'mtocrf']:
illegal = ins_name != asmop
- # sigh deal with setvl not being supported by binutils (.long)
- if asmop.startswith('setvl'):
- illegal = False
- ins_name = 'setvl'
-
- # and svstep not being supported by binutils (.long)
- if asmop.startswith('svstep'):
+ # list of instructions not being supported by binutils (.long)
+ dotstrp = asmop[:-1] if asmop[-1] == '.' else asmop
+ if dotstrp in [*FPTRANS_INSNS,
+ 'ffmadds', 'fdmadds', 'ffadds',
+ 'mins', 'maxs', 'minu', 'maxu',
+ 'setvl', 'svindex', 'svremap', 'svstep',
+ 'svshape', 'svshape2',
+ 'grev', 'ternlogi', 'bmask', 'cprop',
+ 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
+ 'fmvis', 'fishmv', 'pcdec'
+ ]:
illegal = False
- ins_name = 'svstep'
-
- # and svremap not being supported by binutils (.long)
- if asmop.startswith('svremap'):
- illegal = False
- ins_name = 'svremap'
-
- # and svshape not being supported by binutils (.long)
- if asmop.startswith('svshape'):
- illegal = False
- ins_name = 'svshape'
-
- # and fsin and fcos
- if asmop == 'fsins':
- illegal = False
- ins_name = 'fsins'
- if asmop == 'fcoss':
- illegal = False
- ins_name = 'fcoss'
-
- # sigh also deal with ffmadds not being supported by binutils (.long)
- if asmop == 'ffmadds':
- illegal = False
- ins_name = 'ffmadds'
-
- # and fdmadds not being supported by binutils (.long)
- if asmop == 'fdmadds':
- illegal = False
- ins_name = 'fdmadds'
-
- # and ffadds not being supported by binutils (.long)
- if asmop == 'ffadds':
- illegal = False
- ins_name = 'ffadds'
-
- if asmop == 'ternlogi' \
- or re.fullmatch(r'grevw?i?\.?', asmop or ''):
- illegal = False
- ins_name = asmop
+ ins_name = dotstrp
# branch-conditional redirects to sv.bc
if asmop.startswith('bc') and self.is_svp64_mode:
ins_name = 'sv.%s' % ins_name
- log(" post-processed name", ins_name, asmop)
+ log(" post-processed name", dotstrp, ins_name, asmop)
# illegal instructions call TRAP at 0x700
if illegal:
# nop has to be supported, we could let the actual op calculate
# but PowerDecoder has a pattern for nop
- if ins_name is 'nop':
+ if ins_name == 'nop':
self.update_pc_next()
return
# look up instruction in ISA.instrs, prepare namespace
- info = self.instrs[ins_name]
+ if ins_name == 'pcdec': # grrrr yes there are others ("stbcx." etc.)
+ info = self.instrs[ins_name+"."]
+ else:
+ info = self.instrs[ins_name]
yield from self.prep_namespace(ins_name, info.form, info.op_fields)
# preserve order of register names
log("sv rm", sv_rm, dest_cr, src_cr, src_byname, dest_byname)
# see if srcstep/dststep need skipping over masked-out predicate bits
- if (self.is_svp64_mode or ins_name == 'setvl' or
- ins_name in ['svremap', 'svstate']):
+ if (self.is_svp64_mode or ins_name in ['setvl', 'svremap', 'svstate']):
yield from self.svstate_pre_inc()
if self.is_svp64_mode:
pre = yield from self.update_new_svstate_steps()
self.update_nia()
self.update_pc_next()
return
- srcstep, dststep = self.get_src_dststeps()
+ srcstep, dststep, ssubstep, dsubstep = self.get_src_dststeps()
pred_dst_zero = self.pred_dst_zero
pred_src_zero = self.pred_src_zero
vl = self.svstate.vl
+ subvl = yield self.dec2.rm_dec.rm_in.subvl
# VL=0 in SVP64 mode means "do nothing: skip instruction"
if self.is_svp64_mode and vl == 0:
self.pc.update(self.namespace, self.is_svp64_mode)
log("SVP64: VL=0, end of call", self.namespace['CIA'],
- self.namespace['NIA'])
+ self.namespace['NIA'], kind=LogKind.InstrInOuts)
return
# for when SVREMAP is active, using pre-arranged schedule.
if persist or self.last_op_svshape:
remaps = self.get_remap_indices()
if self.is_svp64_mode and (persist or self.last_op_svshape):
- # just some convenient debug info
- for i in range(4):
- sname = 'SVSHAPE%d' % i
- shape = self.spr[sname]
- log(sname, bin(shape.value))
- log(" lims", shape.lims)
- log(" mode", shape.mode)
- log(" skip", shape.skip)
-
- # set up the list of steps to remap
- mi0 = self.svstate.mi0
- mi1 = self.svstate.mi1
- mi2 = self.svstate.mi2
- mo0 = self.svstate.mo0
- mo1 = self.svstate.mo1
- steps = [(self.dec2.in1_step, mi0), # RA
- (self.dec2.in2_step, mi1), # RB
- (self.dec2.in3_step, mi2), # RC
- (self.dec2.o_step, mo0), # RT
- (self.dec2.o2_step, mo1), # EA
- ]
- remap_idxs = self.remap_idxs
- rremaps = []
- # now cross-index the required SHAPE for each of 3-in 2-out regs
- rnames = ['RA', 'RB', 'RC', 'RT', 'EA']
- for i, (dstep, shape_idx) in enumerate(steps):
- (shape, remap) = remaps[shape_idx]
- remap_idx = remap_idxs[shape_idx]
- # zero is "disabled"
- if shape.value == 0x0:
- continue
- # now set the actual requested step to the current index
- yield dstep.eq(remap_idx)
-
- # debug printout info
- rremaps.append((shape.mode, i, rnames[i], shape_idx,
- remap_idx))
- for x in rremaps:
- log("shape remap", x)
+ yield from self.remap_set_steps(remaps)
# after that, settle down (combinatorial) to let Vector reg numbers
# work themselves out
yield Settle()
# main input registers (RT, RA ...)
inputs = []
for name in input_names:
- # using PowerDecoder2, first, find the decoder index.
- # (mapping name RA RB RC RS to in1, in2, in3)
- regnum, is_vec = yield from get_pdecode_idx_in(self.dec2, name)
- if regnum is None:
- # doing this is not part of svp64, it's because output
- # registers, to be modified, need to be in the namespace.
- regnum, is_vec = yield from get_pdecode_idx_out(self.dec2, name)
- if regnum is None:
- regnum, is_vec = yield from get_pdecode_idx_out2(self.dec2,
- name)
-
- # in case getting the register number is needed, _RA, _RB
- regname = "_" + name
- self.namespace[regname] = regnum
- if not self.is_svp64_mode or not pred_src_zero:
- log('reading reg %s %s' % (name, str(regnum)), is_vec)
- if name in fregs:
- reg_val = SelectableInt(self.fpr(regnum))
- elif name is not None:
- reg_val = SelectableInt(self.gpr(regnum))
- else:
- log('zero input reg %s %s' % (name, str(regnum)), is_vec)
- reg_val = 0
- inputs.append(reg_val)
+ log("name", name)
+ regval = (yield from self.get_input(name))
+ log("regval", regval)
+ inputs.append(regval)
+
# arrrrgh, awful hack, to get _RT into namespace
if ins_name in ['setvl', 'svstep']:
regname = "_RT"
# in SVP64 mode for LD/ST work out immediate
# XXX TODO: replace_ds for DS-Form rather than D-Form.
# use info.form to detect
- replace_d = False # update / replace constant in pseudocode
if self.is_svp64_mode:
- ldstmode = yield self.dec2.rm_dec.ldstmode
- # shift mode reads SVD (or SVDS - TODO)
- # *BUT*... because this is "overloading" of LD operations,
- # it gets *STORED* into D (or DS, TODO)
- if ldstmode == SVP64LDSTmode.SHIFT.value:
- imm = yield self.dec2.dec.fields.FormSVD.SVD[0:11]
- imm = exts(imm, 11) # sign-extend to integer
- log("shift SVD", imm)
- replace_d = True
- else:
- if info.form == 'DS':
- # DS-Form, multiply by 4 then knock 2 bits off after
- imm = yield self.dec2.dec.fields.FormDS.DS[0:14] * 4
- else:
- imm = yield self.dec2.dec.fields.FormD.D[0:16]
- imm = exts(imm, 16) # sign-extend to integer
- # get the right step. LD is from srcstep, ST is dststep
- op = yield self.dec2.e.do.insn_type
- offsmul = 0
- if op == MicrOp.OP_LOAD.value:
- if remap_active:
- offsmul = yield self.dec2.in1_step
- log("D-field REMAP src", imm, offsmul)
- else:
- offsmul = srcstep
- log("D-field src", imm, offsmul)
- elif op == MicrOp.OP_STORE.value:
- # XXX NOTE! no bit-reversed STORE! this should not ever be used
- offsmul = dststep
- log("D-field dst", imm, offsmul)
- # bit-reverse mode, rev already done through get_src_dst_steps()
- if ldstmode == SVP64LDSTmode.SHIFT.value:
- # manually look up RC, sigh
- RC = yield self.dec2.dec.RC[0:5]
- RC = self.gpr(RC)
- log("LD-SHIFT:", "VL", vl,
- "RC", RC.value, "imm", imm,
- "offs", bin(offsmul),
- )
- imm = SelectableInt((imm * offsmul) << RC.value, 32)
- # Unit-Strided LD/ST adds offset*width to immediate
- elif ldstmode == SVP64LDSTmode.UNITSTRIDE.value:
- ldst_len = yield self.dec2.e.do.data_len
- imm = SelectableInt(imm + offsmul * ldst_len, 32)
- replace_d = True
- # Element-strided multiplies the immediate by element step
- elif ldstmode == SVP64LDSTmode.ELSTRIDE.value:
- imm = SelectableInt(imm * offsmul, 32)
- replace_d = True
- if replace_d:
- ldst_ra_vec = yield self.dec2.rm_dec.ldst_ra_vec
- ldst_imz_in = yield self.dec2.rm_dec.ldst_imz_in
- log("LDSTmode", SVP64LDSTmode(ldstmode),
- offsmul, imm, ldst_ra_vec, ldst_imz_in)
- # new replacement D... errr.. DS
- if replace_d:
- if info.form == 'DS':
- # TODO: assert 2 LSBs are zero?
- log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm.value))
- imm.value = imm.value >> 2
- self.namespace['DS'] = imm
- else:
- self.namespace['D'] = imm
+ yield from self.check_replace_d(info, remap_active)
# "special" registers
for special in info.special_regs:
# the ALL/ANY mode we can early-exit
if self.is_svp64_mode and ins_name.startswith("sv.bc"):
no_in_vec = yield self.dec2.no_in_vec # BI is scalar
+ # XXX TODO - pack/unpack here
end_loop = no_in_vec or srcstep == vl-1 or dststep == vl-1
self.namespace['end_loop'] = SelectableInt(end_loop, 1)
# detect if CA/CA32 already in outputs (sra*, basically)
already_done = 0
+ output_names = []
if info.write_regs:
output_names = create_args(info.write_regs)
for name in output_names:
if name == 'CA32':
already_done |= 2
- log("carry already done?", bin(already_done))
- if hasattr(self.dec2.e.do, "output_carry"):
- carry_en = yield self.dec2.e.do.output_carry
- else:
- carry_en = False
+ log("carry already done?", bin(already_done), output_names)
+ carry_en = yield self.dec2.e.do.output_carry
if carry_en:
yield from self.handle_carry_(inputs, results, already_done)
+ # check if one of the regs was named "overflow"
+ overflow = None
+ if info.write_regs:
+ for name, output in zip(output_names, results):
+ if name == 'overflow':
+ overflow = output
+
+ # and one called CR0
+ cr0 = None
+ if info.write_regs:
+ for name, output in zip(output_names, results):
+ if name == 'CR0':
+ cr0 = output
+
if not self.is_svp64_mode: # yeah just no. not in parallel processing
# detect if overflow was in return result
- overflow = None
- if info.write_regs:
- for name, output in zip(output_names, results):
- if name == 'overflow':
- overflow = output
-
- if hasattr(self.dec2.e.do, "oe"):
- ov_en = yield self.dec2.e.do.oe.oe
- ov_ok = yield self.dec2.e.do.oe.ok
- else:
- ov_en = False
- ov_ok = False
- log("internal overflow", overflow, ov_en, ov_ok)
+ ov_en = yield self.dec2.e.do.oe.oe
+ ov_ok = yield self.dec2.e.do.oe.ok
+ log("internal overflow", ins_name, overflow, "en?", ov_en, ov_ok)
if ov_en & ov_ok:
yield from self.handle_overflow(inputs, results, overflow)
if not self.is_svp64_mode or not pred_dst_zero:
if hasattr(self.dec2.e.do, "rc"):
rc_en = yield self.dec2.e.do.rc.rc
+ # don't do Rc=1 for svstep it is handled explicitly.
+ # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
+ # to write directly to CR0 instead of in ISACaller. hooyahh.
if rc_en and ins_name not in ['svstep']:
+ yield from self.do_rc_ov(ins_name, results, overflow, cr0)
+
+ # check failfirst
+ rm_mode = yield self.dec2.rm_dec.mode
+ ff_inv = yield self.dec2.rm_dec.inv
+ cr_bit = yield self.dec2.rm_dec.cr_sel
+ log(" ff rm_mode", rc_en, rm_mode, SVP64RMMode.FFIRST.value)
+ log(" inv", ff_inv)
+ log(" cr_bit", cr_bit)
+ ffirst_hit = False
+ if rc_en and rm_mode == SVP64RMMode.FFIRST.value:
regnum, is_vec = yield from get_pdecode_cr_out(self.dec2, "CR0")
- self.handle_comparison(results, regnum)
+ crtest = self.crl[regnum]
+ ffirst_hit = crtest[cr_bit] != ff_inv
+ log("cr test", regnum, int(crtest), crtest, cr_bit, ff_inv)
+ log("cr test?", ffirst_hit)
+ if ffirst_hit:
+ self.svstate.vl = srcstep
+ yield self.dec2.state.svstate.eq(self.svstate.value)
+ yield Settle() # let decoder update
# any modified return results?
+ yield from self.do_outregs_nia(asmop, ins_name, info,
+ output_names, results,
+ carry_en, rc_en, ffirst_hit)
+
+ def do_rc_ov(self, ins_name, results, overflow, cr0):
+ if ins_name.startswith("f"):
+ rc_reg = "CR1" # not calculated correctly yet (not FP compares)
+ else:
+ rc_reg = "CR0"
+ regnum, is_vec = yield from get_pdecode_cr_out(self.dec2, rc_reg)
+ cmps = results
+ # hang on... for `setvl` actually you want to test SVSTATE.VL
+ is_setvl = ins_name == 'setvl'
+ if is_setvl:
+ vl = results[0].vl
+ cmps = (SelectableInt(vl, 64), overflow,)
+ else:
+ overflow = None # do not override overflow except in setvl
+
+ # if there was not an explicit CR0 in the pseudocode, do implicit Rc=1
+ if cr0 is None:
+ self.handle_comparison(cmps, regnum, overflow, no_so=is_setvl)
+ else:
+ # otherwise we just blat CR0 into the required regnum
+ log("explicit rc0", cr0)
+ self.crl[regnum].eq(cr0)
+
+ def do_outregs_nia(self, asmop, ins_name, info, output_names, results,
+ carry_en, rc_en, ffirst_hit):
+ # write out any regs for this instruction
if info.write_regs:
for name, output in zip(output_names, results):
- if name == 'overflow': # ignore, done already (above)
- continue
- if isinstance(output, int):
- output = SelectableInt(output, 256)
- if name in ['CA', 'CA32']:
- if carry_en:
- log("writing %s to XER" % name, output)
- self.spr['XER'][XER_bits[name]] = output.value
- else:
- log("NOT writing %s to XER" % name, output)
- elif name in info.special_regs:
- log('writing special %s' % name, output, special_sprs)
- if name in special_sprs:
- self.spr[name] = output
- else:
- self.namespace[name].eq(output)
- if name == 'MSR':
- log('msr written', hex(self.msr.value))
- else:
- regnum, is_vec = yield from get_pdecode_idx_out(self.dec2,
- name)
- if regnum is None:
- regnum, is_vec = yield from get_pdecode_idx_out2(
- self.dec2, name)
- if regnum is None:
- # temporary hack for not having 2nd output
- regnum = yield getattr(self.decoder, name)
- is_vec = False
- if self.is_svp64_mode and pred_dst_zero:
- log('zeroing reg %d %s' % (regnum, str(output)),
- is_vec)
- output = SelectableInt(0, 256)
- else:
- if name in fregs:
- ftype = 'fpr'
- else:
- ftype = 'gpr'
- log('writing %s %s %s' % (ftype, regnum, str(output)),
- is_vec)
- if output.bits > 64:
- output = SelectableInt(output.value, 64)
- if name in fregs:
- self.fpr[regnum] = output
- else:
- self.gpr[regnum] = output
+ yield from self.check_write(info, name, output, carry_en)
- # check if it is the SVSTATE.src/dest step that needs incrementing
- # this is our Sub-Program-Counter loop from 0 to VL-1
- pre = False
- post = False
- nia_update = True
- if self.allow_next_step_inc:
- log("SVSTATE_NEXT: inc requested, mode",
- self.svstate_next_mode, self.allow_next_step_inc)
- yield from self.svstate_pre_inc()
- pre = yield from self.update_new_svstate_steps()
- if pre:
- # reset at end of loop including exit Vertical Mode
- log("SVSTATE_NEXT: end of loop, reset")
- self.svp64_reset_loop()
- self.svstate.vfirst = 0
- self.update_nia()
- if rc_en:
- results = [SelectableInt(0, 64)]
- self.handle_comparison(results) # CR0
+ if ffirst_hit:
+ self.svp64_reset_loop()
+ nia_update = True
+ else:
+ # check advancement of src/dst/sub-steps and if PC needs updating
+ nia_update = (yield from self.check_step_increment(results, rc_en,
+ asmop, ins_name))
+ if nia_update:
+ self.update_pc_next()
+
+ def check_replace_d(self, info, remap_active):
+ replace_d = False # update / replace constant in pseudocode
+ ldstmode = yield self.dec2.rm_dec.ldstmode
+ vl = self.svstate.vl
+ subvl = yield self.dec2.rm_dec.rm_in.subvl
+ srcstep, dststep = self.new_srcstep, self.new_dststep
+ ssubstep, dsubstep = self.new_ssubstep, self.new_dsubstep
+ if info.form == 'DS':
+ # DS-Form, multiply by 4 then knock 2 bits off after
+ imm = yield self.dec2.dec.fields.FormDS.DS[0:14] * 4
+ else:
+ imm = yield self.dec2.dec.fields.FormD.D[0:16]
+ imm = exts(imm, 16) # sign-extend to integer
+ # get the right step. LD is from srcstep, ST is dststep
+ op = yield self.dec2.e.do.insn_type
+ offsmul = 0
+ if op == MicrOp.OP_LOAD.value:
+ if remap_active:
+ offsmul = yield self.dec2.in1_step
+ log("D-field REMAP src", imm, offsmul)
else:
- if self.allow_next_step_inc == 2:
- log("SVSTATE_NEXT: read")
- nia_update = (yield from self.svstate_post_inc(ins_name))
- else:
- log("SVSTATE_NEXT: post-inc")
- # use actual src/dst-step here to check end, do NOT
- # use bit-reversed version
- srcstep, dststep = self.new_srcstep, self.new_dststep
- remaps = self.get_remap_indices()
- remap_idxs = self.remap_idxs
- vl = self.svstate.vl
- end_src = srcstep == vl-1
- end_dst = dststep == vl-1
- if self.allow_next_step_inc != 2:
- if not end_src:
- self.svstate.srcstep += SelectableInt(1, 7)
- if not end_dst:
- self.svstate.dststep += SelectableInt(1, 7)
- self.namespace['SVSTATE'] = self.svstate.spr
- # set CR0 (if Rc=1) based on end
- if rc_en:
- srcstep = self.svstate.srcstep
- dststep = self.svstate.srcstep
- endtest = 1 if (end_src or end_dst) else 0
- #results = [SelectableInt(endtest, 64)]
- # self.handle_comparison(results) # CR0
-
- # see if svstep was requested, if so, which SVSTATE
- endings = 0b111
- if self.svstate_next_mode > 0:
- shape_idx = self.svstate_next_mode.value-1
- endings = self.remap_loopends[shape_idx]
- cr_field = SelectableInt((~endings) << 1 | endtest, 4)
- print("svstep Rc=1, CR0", cr_field)
- self.crl[0].eq(cr_field) # CR0
- if end_src or end_dst:
- # reset at end of loop including exit Vertical Mode
- log("SVSTATE_NEXT: after increments, reset")
- self.svp64_reset_loop()
- self.svstate.vfirst = 0
-
- elif self.is_svp64_mode:
- nia_update = (yield from self.svstate_post_inc(ins_name))
+ offsmul = (srcstep * (subvl+1)) + ssubstep
+ log("D-field src", imm, offsmul)
+ elif op == MicrOp.OP_STORE.value:
+ # XXX NOTE! no bit-reversed STORE! this should not ever be used
+ offsmul = (dststep * (subvl+1)) + dsubstep
+ log("D-field dst", imm, offsmul)
+ # Unit-Strided LD/ST adds offset*width to immediate
+ if ldstmode == SVP64LDSTmode.UNITSTRIDE.value:
+ ldst_len = yield self.dec2.e.do.data_len
+ imm = SelectableInt(imm + offsmul * ldst_len, 32)
+ replace_d = True
+ # Element-strided multiplies the immediate by element step
+ elif ldstmode == SVP64LDSTmode.ELSTRIDE.value:
+ imm = SelectableInt(imm * offsmul, 32)
+ replace_d = True
+ if replace_d:
+ ldst_ra_vec = yield self.dec2.rm_dec.ldst_ra_vec
+ ldst_imz_in = yield self.dec2.rm_dec.ldst_imz_in
+ log("LDSTmode", SVP64LDSTmode(ldstmode),
+ offsmul, imm, ldst_ra_vec, ldst_imz_in)
+ # new replacement D... errr.. DS
+ if replace_d:
+ if info.form == 'DS':
+ # TODO: assert 2 LSBs are zero?
+ log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm.value))
+ imm.value = imm.value >> 2
+ self.namespace['DS'] = imm
+ else:
+ self.namespace['D'] = imm
+
+ def get_input(self, name):
+ # using PowerDecoder2, first, find the decoder index.
+ # (mapping name RA RB RC RS to in1, in2, in3)
+ regnum, is_vec = yield from get_pdecode_idx_in(self.dec2, name)
+ if regnum is None:
+ # doing this is not part of svp64, it's because output
+ # registers, to be modified, need to be in the namespace.
+ regnum, is_vec = yield from get_pdecode_idx_out(self.dec2, name)
+ if regnum is None:
+ regnum, is_vec = yield from get_pdecode_idx_out2(self.dec2, name)
+
+ # in case getting the register number is needed, _RA, _RB
+ regname = "_" + name
+ self.namespace[regname] = regnum
+ if not self.is_svp64_mode or not self.pred_src_zero:
+ log('reading reg %s %s' % (name, str(regnum)), is_vec)
+ if name in fregs:
+ reg_val = SelectableInt(self.fpr(regnum))
+ log("read reg %d: 0x%x" % (regnum, reg_val.value))
+ elif name is not None:
+ reg_val = SelectableInt(self.gpr(regnum))
+ log("read reg %d: 0x%x" % (regnum, reg_val.value))
+ else:
+ log('zero input reg %s %s' % (name, str(regnum)), is_vec)
+ reg_val = 0
+ return reg_val
+
+ def remap_set_steps(self, remaps):
+ """remap_set_steps sets up the in1/2/3 and out1/2 steps.
+ they work in concert with PowerDecoder2 at the moment,
+ there is no HDL implementation of REMAP. therefore this
+ function, because ISACaller still uses PowerDecoder2,
+ will *explicitly* write the dec2.XX_step values. this has
+ to get sorted out.
+ """
+ # just some convenient debug info
+ for i in range(4):
+ sname = 'SVSHAPE%d' % i
+ shape = self.spr[sname]
+ log(sname, bin(shape.value))
+ log(" lims", shape.lims)
+ log(" mode", shape.mode)
+ log(" skip", shape.skip)
+
+ # set up the list of steps to remap
+ mi0 = self.svstate.mi0
+ mi1 = self.svstate.mi1
+ mi2 = self.svstate.mi2
+ mo0 = self.svstate.mo0
+ mo1 = self.svstate.mo1
+ steps = [(self.dec2.in1_step, mi0), # RA
+ (self.dec2.in2_step, mi1), # RB
+ (self.dec2.in3_step, mi2), # RC
+ (self.dec2.o_step, mo0), # RT
+ (self.dec2.o2_step, mo1), # EA
+ ]
+ remap_idxs = self.remap_idxs
+ rremaps = []
+ # now cross-index the required SHAPE for each of 3-in 2-out regs
+ rnames = ['RA', 'RB', 'RC', 'RT', 'EA']
+ for i, (dstep, shape_idx) in enumerate(steps):
+ (shape, remap) = remaps[shape_idx]
+ remap_idx = remap_idxs[shape_idx]
+ # zero is "disabled"
+ if shape.value == 0x0:
+ continue
+ # now set the actual requested step to the current index
+ yield dstep.eq(remap_idx)
+
+ # debug printout info
+ rremaps.append((shape.mode, i, rnames[i], shape_idx, remap_idx))
+ for x in rremaps:
+ log("shape remap", x)
+
+ def check_write(self, info, name, output, carry_en):
+ if name == 'overflow': # ignore, done already (above)
+ return
+ if name == 'CR0': # ignore, done already (above)
+ return
+ if isinstance(output, int):
+ output = SelectableInt(output, 256)
+ # write carry flafs
+ if name in ['CA', 'CA32']:
+ if carry_en:
+ log("writing %s to XER" % name, output)
+ log("write XER %s 0x%x" % (name, output.value))
+ self.spr['XER'][XER_bits[name]] = output.value
+ else:
+ log("NOT writing %s to XER" % name, output)
+ return
+ # write special SPRs
+ if name in info.special_regs:
+ log('writing special %s' % name, output, special_sprs)
+ log("write reg %s 0x%x" % (name, output.value))
+ if name in special_sprs:
+ self.spr[name] = output
+ else:
+ self.namespace[name].eq(output)
+ if name == 'MSR':
+ log('msr written', hex(self.msr.value))
+ return
+ # find out1/out2 PR/FPR
+ regnum, is_vec = yield from get_pdecode_idx_out(self.dec2, name)
+ if regnum is None:
+ regnum, is_vec = yield from get_pdecode_idx_out2(self.dec2, name)
+ if regnum is None:
+ # temporary hack for not having 2nd output
+ regnum = yield getattr(self.decoder, name)
+ is_vec = False
+ # convenient debug prefix
+ if name in fregs:
+ reg_prefix = 'f'
+ else:
+ reg_prefix = 'r'
+ # check zeroing due to predicate bit being zero
+ if self.is_svp64_mode and self.pred_dst_zero:
+ log('zeroing reg %d %s' % (regnum, str(output)), is_vec)
+ output = SelectableInt(0, 256)
+ log("write reg %s%d 0x%x" % (reg_prefix, regnum, output.value),
+ kind=LogKind.InstrInOuts)
+ # zero-extend tov64 bit begore storing (should use EXT oh well)
+ if output.bits > 64:
+ output = SelectableInt(output.value, 64)
+ if name in fregs:
+ self.fpr[regnum] = output
else:
+ self.gpr[regnum] = output
+
+ def check_step_increment(self, results, rc_en, asmop, ins_name):
+ # check if it is the SVSTATE.src/dest step that needs incrementing
+ # this is our Sub-Program-Counter loop from 0 to VL-1
+ if not self.allow_next_step_inc:
+ if self.is_svp64_mode:
+ return (yield from self.svstate_post_inc(ins_name))
+
# XXX only in non-SVP64 mode!
# record state of whether the current operation was an svshape,
+ # OR svindex!
# to be able to know if it should apply in the next instruction.
# also (if going to use this instruction) should disable ability
# to interrupt in between. sigh.
- self.last_op_svshape = asmop == 'svremap'
+ self.last_op_svshape = asmop in ['svremap', 'svindex',
+ 'svshape2']
+ return True
- if nia_update:
- self.update_pc_next()
+ pre = False
+ post = False
+ nia_update = True
+ log("SVSTATE_NEXT: inc requested, mode",
+ self.svstate_next_mode, self.allow_next_step_inc)
+ yield from self.svstate_pre_inc()
+ pre = yield from self.update_new_svstate_steps()
+ if pre:
+ # reset at end of loop including exit Vertical Mode
+ log("SVSTATE_NEXT: end of loop, reset")
+ self.svp64_reset_loop()
+ self.svstate.vfirst = 0
+ self.update_nia()
+ if not rc_en:
+ return True
+ results = [SelectableInt(0, 64)]
+ self.handle_comparison(results) # CR0
+ return True
+ if self.allow_next_step_inc == 2:
+ log("SVSTATE_NEXT: read")
+ nia_update = (yield from self.svstate_post_inc(ins_name))
+ else:
+ log("SVSTATE_NEXT: post-inc")
+ # use actual src/dst-step here to check end, do NOT
+ # use bit-reversed version
+ srcstep, dststep = self.new_srcstep, self.new_dststep
+ ssubstep, dsubstep = self.new_ssubstep, self.new_dsubstep
+ remaps = self.get_remap_indices()
+ remap_idxs = self.remap_idxs
+ vl = self.svstate.vl
+ subvl = yield self.dec2.rm_dec.rm_in.subvl
+ end_src = srcstep == vl-1
+ end_dst = dststep == vl-1
+ if self.allow_next_step_inc != 2:
+ yield from self.advance_svstate_steps(end_src, end_dst)
+ #self.namespace['SVSTATE'] = self.svstate.spr
+ # set CR0 (if Rc=1) based on end
+ if rc_en:
+ endtest = 1 if (end_src or end_dst) else 0
+ #results = [SelectableInt(endtest, 64)]
+ # self.handle_comparison(results) # CR0
+
+ # see if svstep was requested, if so, which SVSTATE
+ endings = 0b111
+ if self.svstate_next_mode > 0:
+ shape_idx = self.svstate_next_mode.value-1
+ endings = self.remap_loopends[shape_idx]
+ cr_field = SelectableInt((~endings) << 1 | endtest, 4)
+ log("svstep Rc=1, CR0", cr_field)
+ self.crl[0].eq(cr_field) # CR0
+ if end_src or end_dst:
+ # reset at end of loop including exit Vertical Mode
+ log("SVSTATE_NEXT: after increments, reset")
+ self.svp64_reset_loop()
+ self.svstate.vfirst = 0
+ return nia_update
def SVSTATE_NEXT(self, mode, submode):
"""explicitly moves srcstep/dststep on to next element, for
self.allow_next_step_inc = submode.value + 1
log("SVSTATE_NEXT mode", mode, submode, self.allow_next_step_inc)
self.svstate_next_mode = mode
- if self.svstate_next_mode > 0:
+ if self.svstate_next_mode > 0 and self.svstate_next_mode < 5:
shape_idx = self.svstate_next_mode.value-1
return SelectableInt(self.remap_idxs[shape_idx], 7)
+ if self.svstate_next_mode == 5:
+ self.svstate_next_mode = 0
+ return SelectableInt(self.svstate.srcstep, 7)
+ if self.svstate_next_mode == 6:
+ self.svstate_next_mode = 0
+ return SelectableInt(self.svstate.dststep, 7)
return SelectableInt(0, 7)
- def svstate_pre_inc(self):
- """check if srcstep/dststep need to skip over masked-out predicate bits
- """
- # get SVSTATE VL (oh and print out some debug stuff)
- vl = self.svstate.vl
- srcstep = self.svstate.srcstep
- dststep = self.svstate.dststep
- sv_a_nz = yield self.dec2.sv_a_nz
- fft_mode = yield self.dec2.use_svp64_fft
- in1 = yield self.dec2.e.read_reg1.data
- log("SVP64: VL, srcstep, dststep, sv_a_nz, in1 fft, svp64",
- vl, srcstep, dststep, sv_a_nz, in1, fft_mode,
- self.is_svp64_mode)
-
- # get predicate mask (all 64 bits)
- srcmask = dstmask = 0xffff_ffff_ffff_ffff
-
- pmode = yield self.dec2.rm_dec.predmode
- reverse_gear = yield self.dec2.rm_dec.reverse_gear
- sv_ptype = yield self.dec2.dec.op.SV_Ptype
- srcpred = yield self.dec2.rm_dec.srcpred
- dstpred = yield self.dec2.rm_dec.dstpred
- pred_src_zero = yield self.dec2.rm_dec.pred_sz
- pred_dst_zero = yield self.dec2.rm_dec.pred_dz
- if pmode == SVP64PredMode.INT.value:
- srcmask = dstmask = get_predint(self.gpr, dstpred)
- if sv_ptype == SVPtype.P2.value:
- srcmask = get_predint(self.gpr, srcpred)
- elif pmode == SVP64PredMode.CR.value:
- srcmask = dstmask = get_predcr(self.crl, dstpred, vl)
- if sv_ptype == SVPtype.P2.value:
- srcmask = get_predcr(self.crl, srcpred, vl)
- log(" pmode", pmode)
- log(" reverse", reverse_gear)
- log(" ptype", sv_ptype)
- log(" srcpred", bin(srcpred))
- log(" dstpred", bin(dstpred))
- log(" srcmask", bin(srcmask))
- log(" dstmask", bin(dstmask))
- log(" pred_sz", bin(pred_src_zero))
- log(" pred_dz", bin(pred_dst_zero))
-
- # okaaay, so here we simply advance srcstep (TODO dststep)
- # until the predicate mask has a "1" bit... or we run out of VL
- # let srcstep==VL be the indicator to move to next instruction
- if not pred_src_zero:
- while (((1 << srcstep) & srcmask) == 0) and (srcstep != vl):
- log(" skip", bin(1 << srcstep))
- srcstep += 1
- # same for dststep
- if not pred_dst_zero:
- while (((1 << dststep) & dstmask) == 0) and (dststep != vl):
- log(" skip", bin(1 << dststep))
- dststep += 1
-
- # now work out if the relevant mask bits require zeroing
- if pred_dst_zero:
- pred_dst_zero = ((1 << dststep) & dstmask) == 0
- if pred_src_zero:
- pred_src_zero = ((1 << srcstep) & srcmask) == 0
-
- # store new srcstep / dststep
- self.new_srcstep, self.new_dststep = srcstep, dststep
- self.pred_dst_zero, self.pred_src_zero = pred_dst_zero, pred_src_zero
- log(" new srcstep", srcstep)
- log(" new dststep", dststep)
-
def get_src_dststeps(self):
- """gets srcstep and dststep
+ """gets srcstep, dststep, and ssubstep, dsubstep
"""
- return self.new_srcstep, self.new_dststep
+ return (self.new_srcstep, self.new_dststep,
+ self.new_ssubstep, self.new_dsubstep)
def update_new_svstate_steps(self):
# note, do not get the bit-reversed srcstep here!
srcstep, dststep = self.new_srcstep, self.new_dststep
+ ssubstep, dsubstep = self.new_ssubstep, self.new_dsubstep
# update SVSTATE with new srcstep
self.svstate.srcstep = srcstep
self.svstate.dststep = dststep
+ self.svstate.ssubstep = ssubstep
+ self.svstate.dsubstep = dsubstep
self.namespace['SVSTATE'] = self.svstate
yield self.dec2.state.svstate.eq(self.svstate.value)
yield Settle() # let decoder update
srcstep = self.svstate.srcstep
dststep = self.svstate.dststep
+ ssubstep = self.svstate.ssubstep
+ dsubstep = self.svstate.dsubstep
+ pack = self.svstate.pack
+ unpack = self.svstate.unpack
vl = self.svstate.vl
+ subvl = yield self.dec2.rm_dec.rm_in.subvl
+ rm_mode = yield self.dec2.rm_dec.mode
+ ff_inv = yield self.dec2.rm_dec.inv
+ cr_bit = yield self.dec2.rm_dec.cr_sel
log(" srcstep", srcstep)
log(" dststep", dststep)
+ log(" pack", pack)
+ log(" unpack", unpack)
+ log(" ssubstep", ssubstep)
+ log(" dsubstep", dsubstep)
log(" vl", vl)
+ log(" subvl", subvl)
+ log(" rm_mode", rm_mode)
+ log(" inv", ff_inv)
+ log(" cr_bit", cr_bit)
# check if end reached (we let srcstep overrun, above)
# nothing needs doing (TODO zeroing): just do next instruction
- return srcstep == vl or dststep == vl
+ return ((ssubstep == subvl and srcstep == vl) or
+ (dsubstep == subvl and dststep == vl))
def svstate_post_inc(self, insn_name, vf=0):
# check if SV "Vertical First" mode is enabled
# this is our Sub-Program-Counter loop from 0 to VL-1
# XXX twin predication TODO
vl = self.svstate.vl
+ subvl = yield self.dec2.rm_dec.rm_in.subvl
mvl = self.svstate.maxvl
srcstep = self.svstate.srcstep
dststep = self.svstate.dststep
+ ssubstep = self.svstate.ssubstep
+ dsubstep = self.svstate.dsubstep
+ pack = self.svstate.pack
+ unpack = self.svstate.unpack
rm_mode = yield self.dec2.rm_dec.mode
reverse_gear = yield self.dec2.rm_dec.reverse_gear
sv_ptype = yield self.dec2.dec.op.SV_Ptype
in_vec = not (yield self.dec2.no_in_vec)
log(" svstate.vl", vl)
log(" svstate.mvl", mvl)
+ log(" rm.subvl", subvl)
log(" svstate.srcstep", srcstep)
log(" svstate.dststep", dststep)
+ log(" svstate.ssubstep", ssubstep)
+ log(" svstate.dsubstep", dsubstep)
+ log(" svstate.pack", pack)
+ log(" svstate.unpack", unpack)
log(" mode", rm_mode)
log(" reverse", reverse_gear)
log(" out_vec", out_vec)
log(" in_vec", in_vec)
log(" sv_ptype", sv_ptype, sv_ptype == SVPtype.P2.value)
- # check if srcstep needs incrementing by one, stop PC advancing
- # svp64 loop can end early if the dest is scalar for single-pred
- # but for 2-pred both src/dest have to be checked.
- # XXX this might not be true! it may just be LD/ST
- if sv_ptype == SVPtype.P2.value:
- svp64_is_vector = (out_vec or in_vec)
- else:
- svp64_is_vector = out_vec
# check if this was an sv.bc* and if so did it succeed
if self.is_svp64_mode and insn_name.startswith("sv.bc"):
end_loop = self.namespace['end_loop']
self.svp64_reset_loop()
self.update_pc_next()
return False
- if svp64_is_vector and srcstep != vl-1 and dststep != vl-1:
- self.svstate.srcstep += SelectableInt(1, 7)
- self.svstate.dststep += SelectableInt(1, 7)
- self.namespace['SVSTATE'] = self.svstate
- # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
- # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
- # this way we keep repeating the same instruction (with new steps)
- self.pc.NIA.value = self.pc.CIA.value
- self.namespace['NIA'] = self.pc.NIA
- log("end of sub-pc call", self.namespace['CIA'],
- self.namespace['NIA'])
- return False # DO NOT allow PC update whilst Sub-PC loop running
-
- # reset loop to zero and update NIA
- self.svp64_reset_loop()
- self.update_nia()
-
- return True
+ # check if srcstep needs incrementing by one, stop PC advancing
+ # but for 2-pred both src/dest have to be checked.
+ # XXX this might not be true! it may just be LD/ST
+ if sv_ptype == SVPtype.P2.value:
+ svp64_is_vector = (out_vec or in_vec)
+ else:
+ svp64_is_vector = out_vec
+ # loops end at the first "hit" (source or dest)
+ end_src = srcstep == vl-1
+ end_dst = dststep == vl-1
+ loopend = ((end_src and ssubstep == subvl) or
+ (end_dst and dsubstep == subvl))
+ log("loopend", svp64_is_vector, loopend, end_src, end_dst,
+ ssubstep == subvl, dsubstep == subvl)
+ if not svp64_is_vector or loopend:
+ # reset loop to zero and update NIA
+ self.svp64_reset_loop()
+ self.update_nia()
+
+ return True
+
+ # still looping, advance and update NIA
+ yield from self.advance_svstate_steps(end_src, end_dst)
+ self.namespace['SVSTATE'] = self.svstate
+
+ # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
+ # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
+ # this way we keep repeating the same instruction (with new steps)
+ self.pc.NIA.value = self.pc.CIA.value
+ self.namespace['NIA'] = self.pc.NIA
+ log("end of sub-pc call", self.namespace['CIA'], self.namespace['NIA'])
+ return False # DO NOT allow PC update whilst Sub-PC loop running
def update_pc_next(self):
# UPDATE program counter
self.pc.update(self.namespace, self.is_svp64_mode)
- self.svstate.spr = self.namespace['SVSTATE']
+ #self.svstate.spr = self.namespace['SVSTATE']
log("end of call", self.namespace['CIA'],
self.namespace['NIA'],
self.namespace['SVSTATE'])
def svp64_reset_loop(self):
self.svstate.srcstep = 0
self.svstate.dststep = 0
+ self.svstate.ssubstep = 0
+ self.svstate.dsubstep = 0
log(" svstate.srcstep loop end (PC to update)")
self.namespace['SVSTATE'] = self.svstate