print ("err", i, err)
self.assertTrue(err < 1e-5)
+ def test_sv_remap_fpmadds_ldbrev_dct_8_mode_4(self):
+ """>>> lst = [# LOAD bit-reversed with half-swap
+ "svshape 8, 1, 1, 6, 0",
+ "svremap 1, 0, 0, 0, 0, 0, 0, 1",
+ "sv.lfsbr 0.v, 4(1), 2",
+ # Inner butterfly, twin +/- MUL-ADD-SUB
+ "svremap 31, 1, 0, 2, 0, 1, 1",
+ "svshape 8, 1, 1, 4, 0",
+ "sv.fdmadds 0.v, 0.v, 0.v, 8.v"
+ # Outer butterfly, iterative sum
+ "svshape 8, 1, 1, 3, 0",
+ "sv.fadds 0.v, 0.v, 0.v"
+ ]
+ runs a full in-place 8-long O(N log2 N) DCT, both
+ inner and outer butterfly "REMAP" schedules, and using
+ bit-reversed half-swapped LDs.
+ uses shorter pre-loaded COS tables: FRC also needs to be on a
+ Schedule
+ """
+ lst = SVP64Asm( ["addi 1, 0, 0x000",
+ "svshape 8, 1, 1, 6, 0",
+ "svremap 1, 0, 0, 0, 0, 0, 0, 1",
+ "sv.lfsbr 0.v, 4(1), 2",
+ "svremap 31, 1, 0, 2, 0, 1, 1",
+ "svshape 8, 1, 1, 4, 0",
+ "sv.fdmadds 0.v, 0.v, 0.v, 8.v",
+ "svshape 8, 1, 1, 3, 0",
+ "sv.fadds 0.v, 0.v, 0.v"
+ ])
+ lst = list(lst)
+
+ # array and coefficients to test
+ avi = [7.0, -9.8, 3.0, -32.3, 2.1, 3.6, 0.7, -0.2]
+
+ # store in memory, in standard (expected) order, FP32s (2 per 8-bytes)
+ # LD will bring them in, in the correct order.
+ mem = {}
+ val = 0
+ for i, a in enumerate(avi):
+ a = SINGLE(fp64toselectable(a)).value
+ shift = (i % 2) == 1
+ if shift == 0:
+ val = a # accumulate for next iteration
+ else:
+ mem[(i//2)*8] = val | (a << 32) # even and odd 4-byte in same 8
+
+ # calculate the (shortened) COS tables, 4 2 1 not 4 2+2 1+1+1+1
+ n = len(avi)
+ ctable = []
+ size = n
+ while size >= 2:
+ halfsize = size // 2
+ for ci in range(halfsize):
+ ctable.append(math.cos((ci + 0.5) * math.pi / size) * 2.0)
+ size //= 2
+
+ # store in regfile
+ fprs = [0] * 32
+ for i, c in enumerate(ctable):
+ fprs[i+8] = fp64toselectable(1.0 / c) # invert
+
+ with Program(lst, bigendian=False) as program:
+ sim = self.run_tst_program(program, initial_fprs=fprs,
+ initial_mem=mem)
+ print ("spr svshape0", sim.spr['SVSHAPE0'])
+ print (" xdimsz", sim.spr['SVSHAPE0'].xdimsz)
+ print (" ydimsz", sim.spr['SVSHAPE0'].ydimsz)
+ print (" zdimsz", sim.spr['SVSHAPE0'].zdimsz)
+ print ("spr svshape1", sim.spr['SVSHAPE1'])
+ print ("spr svshape2", sim.spr['SVSHAPE2'])
+ print ("spr svshape3", sim.spr['SVSHAPE3'])
+
+ # outer iterative sum
+ res = transform2(avi)
+
+ for i, expected in enumerate(res):
+ print ("i", i, float(sim.fpr(i)), "expected", expected)
+
+ for i, expected in enumerate(res):
+ # convert to Power single
+ expected = DOUBLE2SINGLE(fp64toselectable(expected))
+ expected = float(expected)
+ actual = float(sim.fpr(i))
+ # approximate error calculation, good enough test
+ # reason: we are comparing FMAC against FMUL-plus-FADD-or-FSUB
+ # and the rounding is different
+ err = abs((actual - expected) / expected)
+ print ("err", i, err)
+ self.assertTrue(err < 1e-5)
+
def run_tst_program(self, prog, initial_regs=None,
svstate=None,
initial_mem=None,
self.assertEqual(sim.gpr(14), SelectableInt(0x202, 64))
self.assertEqual(sim.gpr(15), SelectableInt(0x404, 64))
+ def test_sv_load_store_bitreverse_fp(self):
+ """>>> lst = ["addi 1, 0, 0x0010",
+ "addi 2, 0, 0x0004",
+ "addi 3, 0, 0x0002",
+ "addi 5, 0, 0x101",
+ "addi 6, 0, 0x202",
+ "addi 7, 0, 0x303",
+ "addi 8, 0, 0x404",
+ "sv.std 5.v, 0(1)",
+ "sv.lfdbr 12.v, 4(1), 2"]
+
+ note: bitreverse mode is... odd. it's the butterfly generator
+ from Cooley-Tukey FFT:
+ https://en.wikipedia.org/wiki/Cooley%E2%80%93Tukey_FFT_algorithm#Data_reordering,_bit_reversal,_and_in-place_algorithms
+
+ bitreverse LD is computed as:
+ for i in range(VL):
+ EA = (RA|0) + (EXTS(D) * LDSTsize * bitreverse(i, VL)) << RC
+
+ bitreversal of 0 1 2 3 in binary 0b00 0b01 0b10 0b11
+ produces 0 2 1 3 in binary 0b00 0b10 0b01 0b11
+
+ and thus creates the butterfly needed for one iteration of FFT.
+ the RC (shift) is to be able to offset the LDs by Radix-2 spans
+ """
+ lst = SVP64Asm(["addi 1, 0, 0x0010",
+ "addi 2, 0, 0x0000",
+ "addi 5, 0, 0x101",
+ "addi 6, 0, 0x202",
+ "addi 7, 0, 0x303",
+ "addi 8, 0, 0x404",
+ "sv.std 5.v, 0(1)", # scalar r1 + 0 + wordlen*offs
+ "sv.lfdbr 12.v, 8(1), 2"]) # bit-reversed
+ lst = list(lst)
+
+ # SVSTATE (in this case, VL=4)
+ svstate = SVP64State()
+ svstate.vl = 4 # VL
+ svstate.maxvl = 4 # MAXVL
+ print ("SVSTATE", bin(svstate.asint()))
+
+ fprs = [0] * 32
+
+ with Program(lst, bigendian=False) as program:
+ sim = self.run_tst_program(program, svstate=svstate,
+ initial_fprs=fprs)
+ mem = sim.mem.dump(printout=False)
+ print (mem)
+
+ self.assertEqual(mem, [(16, 0x101),
+ (24, 0x202),
+ (32, 0x303),
+ (40, 0x404),
+ ])
+ print(sim.gpr(1))
+ # from STs
+ self.assertEqual(sim.gpr(5), SelectableInt(0x101, 64))
+ self.assertEqual(sim.gpr(6), SelectableInt(0x202, 64))
+ self.assertEqual(sim.gpr(7), SelectableInt(0x303, 64))
+ self.assertEqual(sim.gpr(8), SelectableInt(0x404, 64))
+ # r1=0x10, RC=0, offs=4: contents of memory expected at:
+ # element 0: EA = r1 + bitrev(0b00)*4 => 0x10 + 0b00*4 => 0x10
+ # element 1: EA = r1 + bitrev(0b01)*4 => 0x10 + 0b10*4 => 0x18
+ # element 2: EA = r1 + bitrev(0b10)*4 => 0x10 + 0b01*4 => 0x14
+ # element 3: EA = r1 + bitrev(0b11)*4 => 0x10 + 0b10*4 => 0x1c
+ # therefore loaded from (bit-reversed indexing):
+ # r9 => mem[0x10] which was stored from r5
+ # r10 => mem[0x18] which was stored from r6
+ # r11 => mem[0x18] which was stored from r7
+ # r12 => mem[0x1c] which was stored from r8
+ self.assertEqual(sim.fpr(12), SelectableInt(0x101, 64))
+ self.assertEqual(sim.fpr(13), SelectableInt(0x303, 64))
+ self.assertEqual(sim.fpr(14), SelectableInt(0x202, 64))
+ self.assertEqual(sim.fpr(15), SelectableInt(0x404, 64))
+
def test_sv_load_store_bitreverse2(self):
""">>> lst = ["addi 1, 0, 0x0010",
"addi 2, 0, 0x0004",
initial_fprs = [0] * 32
simulator = run_tst(prog, initial_regs, svstate=svstate,
initial_fprs=initial_fprs)
+ print ("GPRs")
simulator.gpr.dump()
+ print ("FPRs")
+ simulator.fpr.dump()
return simulator