# LD not VLD! (ldbrx if brev=True)
# this covers unit stride mode and a type of vector offset
function op_ld(RT, RA, brev, op_width, imm_offs, svctx)
- for (int i = 0, int j = 0; i < svctx.VL && j < svctx.VL;):
-
+ for (int i = 0, int j = 0; i < svctx.VL && j < svctx.VL):
if not svctx.unit/el-strided:
# strange vector mode, compute 64 bit address which is
# not polymorphic! elwidth hardcoded to 64 here
bytereverse = brev XNOR MSR.LE
# read the underlying memory
- memread <= mem[srcbase + imm_offs];
+ memread <= MEM(srcbase + imm_offs, op_width)
# optionally performs byteswap at op width
if (bytereverse):
i++;
j++;
+For LD/Indexed, the key here is that in the calculation of the Effective Address,
+RA has no elwidth override but RB does.
+
+ # LD not VLD!
+ function op_ld(RT, RA, RB, op_width, svctx)
+ for (int i = 0, int j = 0; i < svctx.VL && j < svctx.VL):
+ if not svctx.el-strided:
+ # RA not polymorphic! elwidth hardcoded to 64 here
+ srcbase = get_polymorphed_reg(RA, 64, i)
+ else:
+ # element stride mode, again RA not polymorphic
+ srcbase = get_polymorphed_reg(RA, 64, 0)
+ # RB *is* polymorphic
+ offs = get_polymorphed_reg(RB, svctx.src_elwidth, i)
+ # sign-extend
+ if svctx.SEA: offs = sext(offs, svctx.src_elwidth, 64)
+ # read the underlying memory
+ memread <= MEM(srcbase + offs, op_width)
+ # proceed to check saturation
+ ...
+ ...
+
# Remapped LD/ST
In the [[sv/remap]] page the concept of "Remapping" is described.