647e793
2 from copy
import deepcopy
4 from nmutil
.formaltest
import FHDLTestCase
5 from openpower
.decoder
.helpers
import fp64toselectable
6 from openpower
.decoder
.isa
.caller
import SVP64State
7 from openpower
.decoder
.isa
.remap_dct_yield
import halfrev2
, reverse_bits
8 from openpower
.decoder
.isa
.test_caller
import run_tst
9 from openpower
.decoder
.selectable_int
import SelectableInt
10 from openpower
.simulator
.program
import Program
11 from openpower
.sv
.trans
.svp64
import SVP64Asm
14 def write_byte(mem
, addr
, val
):
15 addr
, offs
= (addr
// 8)*8, (addr
% 8)*8
17 value
= mem
.get(addr
, 0) & ~mask
18 value
= value |
(val
<< offs
)
19 mem
[addr
] = value
& 0xffff_ffff_ffff_ffff
22 class DecoderTestCase(FHDLTestCase
):
24 def _check_regs(self
, sim
, expected
):
26 self
.assertEqual(sim
.gpr(i
), SelectableInt(expected
[i
], 64))
28 def _check_fpregs(self
, sim
, expected
):
30 self
.assertEqual(sim
.fpr(i
), SelectableInt(expected
[i
], 64))
32 def test_sv_load_store_strncpy(self
):
36 strncpy using post-increment ld/st, sv.bc, and data-dependent ffirst
41 "mtspr 9, 3", # move r3 to CTR
42 "addi 0,0,0", # initialise r0 to zero
43 # chr-copy loop starts here:
44 # for (i = 0; i < n && src[i] != '\0'; i++)
46 # VL (and r1) = MIN(CTR,MAXVL=4)
47 "setvl 1,0,%d,0,1,1" % maxvl
,
48 # load VL bytes (update r10 addr)
49 "sv.lbzu/pi *16, 1(10)",
50 "sv.cmpi/ff=eq/vli *0,1,*16,0", # compare against zero, truncate VL
51 # store VL bytes (update r12 addr)
52 "sv.stbu/pi *16, 1(12)",
53 "sv.bc/all 0, *2, -0x1c", # test CTR, stop if cmpi failed
54 # zeroing loop starts here:
57 # VL (and r1) = MIN(CTR,MAXVL=4)
58 "setvl 1,0,%d,0,1,1" % maxvl
,
59 # store VL zeros (update r12 addr)
60 "sv.stbu/pi 0, 1(12)",
61 "sv.bc 16, *0, -0xc", # decrement CTR by VL, stop at zero
66 tst_string
= "hello\x00bye\x00"
67 initial_regs
= [0] * 32
68 initial_regs
[3] = len(tst_string
) # including the zero
69 initial_regs
[10] = 16 # load address
70 initial_regs
[12] = 40 # store address
72 # some memory with identifying garbage in it
73 initial_mem
= {16: 0xf0f1_f2f3_f4f5_f6f7,
74 24: 0x4041_4243_4445_4647,
75 40: 0x8081_8283_8485_8687,
76 48: 0x9091_9293_9495_9697,
79 for i
, c
in enumerate(tst_string
):
80 write_byte(initial_mem
, 16+i
, ord(c
))
82 # now get the expected results: copy the string to the other address,
83 # but terminate at first zero (strncpy, duh)
84 expected_mem
= deepcopy(initial_mem
)
87 for i
, c
in enumerate(tst_string
):
90 write_byte(expected_mem
, 40+i
, c
)
93 write_byte(expected_mem
, 40+i
, 0)
97 with
Program(lst
, bigendian
=False) as program
:
98 sim
= self
.run_tst_program(program
, initial_mem
=initial_mem
,
99 initial_regs
=initial_regs
)
100 mem
= sim
.mem
.dump(printout
=True, asciidump
=True)
102 # contents of memory expected at:
103 # element 0: r1=0x10, D=24, => EA = 0x10+24*0 = 16 (0x10)
104 # element 1: r1=0x10, D=24, => EA = 0x10+24*1 = 40 (0x28)
105 # therefore, at address 0x10 ==> 0x1234
106 # therefore, at address 0x28 ==> 0x1235
107 for (k
, val
) in expected_mem
.items():
108 print("mem, val", k
, hex(val
))
109 self
.assertEqual(mem
, list(expected_mem
.items()))
111 # reg 10 (the LD EA) is expected to be nearest
112 # 16 + strlen, rounded up
113 rounded
= ((strlen
+maxvl
-1) // maxvl
) * maxvl
114 self
.assertEqual(sim
.gpr(10), SelectableInt(16+rounded
, 64))
115 # whereas reg 10 (the ST EA) is expected to be 40+strlen
116 self
.assertEqual(sim
.gpr(12), SelectableInt(
117 40+len(tst_string
), 64))
119 def test_sv_load_store_postinc(self
):
120 """>>> lst = ["addi 20, 0, 0x0010",
124 "sv.stwu/pi *4, 24(20)",
125 "sv.lwu/pi *8, 24(20)"]
127 element stride is computed as:
129 EA = (RA|0) + EXTS(D) * i
131 load-update with post-increment will do this however:
134 EA = (RA|0) + EXTS(D)
135 RA = EA # update RA *after*
137 whereas without post-increment it would be:
139 EA = (RA|0) + EXTS(D) # EA calculated (and used) *BEFORE* load
141 RA = EA # still updated after but it's used before
143 lst
= SVP64Asm(["addi 20, 0, 0x0010",
144 "addi 22, 0, 0x0010",
148 "sv.stwu/pi *4, 24(22)", # scalar r22 += 24 on update
149 "sv.lwzu/pi *8, 24(20)" # scalar r20 += 24 on update
153 # SVSTATE (in this case, VL=2)
154 svstate
= SVP64State()
156 svstate
.maxvl
= 2 # MAXVL
157 print("SVSTATE", bin(svstate
.asint()))
159 with
Program(lst
, bigendian
=False) as program
:
160 sim
= self
.run_tst_program(program
, svstate
=svstate
)
161 mem
= sim
.mem
.dump(printout
=False)
163 # contents of memory expected at:
164 # element 0: r1=0x10, D=24, => EA = 0x10+24*0 = 16 (0x10)
165 # element 1: r1=0x10, D=24, => EA = 0x10+24*1 = 40 (0x28)
166 # therefore, at address 0x10 ==> 0x1234
167 # therefore, at address 0x28 ==> 0x1235
168 expected_mem
= [(16, 0x1234),
170 self
.assertEqual(mem
, expected_mem
)
172 self
.assertEqual(sim
.gpr(8), SelectableInt(0x1234, 64))
173 self
.assertEqual(sim
.gpr(9), SelectableInt(0x1235, 64))
174 # reg 20 (the EA) is expected to be the initial 16,
175 # plus 2x24 (2 lots of immediates). 16+2*24=64
176 self
.assertEqual(sim
.gpr(20), SelectableInt(64, 64))
177 # likewise, reg 22 - for the store - also 16+2*24.
178 self
.assertEqual(sim
.gpr(22), SelectableInt(64, 64))
180 def test_sv_load_store_elementstride(self
):
181 """>>> lst = ["addi 2, 0, 0x0010",
185 "sv.stw/els *4, 16(2)",
186 "sv.lwz/els *8, 16(2)"]
188 note: element stride mode is only enabled when RA is a scalar
189 and when the immediate is non-zero
191 element stride is computed as:
193 EA = (RA|0) + EXTS(D) * i
195 lst
= SVP64Asm(["addi 2, 0, 0x0010",
199 "sv.stw/els *4, 24(2)", # scalar r1 + 16 + 24*offs
200 "sv.lwz/els *8, 24(2)"]) # scalar r1 + 16 + 24*offs
203 # SVSTATE (in this case, VL=2)
204 svstate
= SVP64State()
206 svstate
.maxvl
= 2 # MAXVL
207 print("SVSTATE", bin(svstate
.asint()))
209 with
Program(lst
, bigendian
=False) as program
:
210 sim
= self
.run_tst_program(program
, svstate
=svstate
)
211 mem
= sim
.mem
.dump(printout
=False)
213 # contents of memory expected at:
214 # element 0: r1=0x10, D=24, => EA = 0x10+24*0 = 16 (0x10)
215 # element 1: r1=0x10, D=24, => EA = 0x10+24*1 = 40 (0x28)
216 # therefore, at address 0x10 ==> 0x1234
217 # therefore, at address 0x28 ==> 0x1235
218 expected_mem
= [(16, 0x1234),
220 self
.assertEqual(mem
, expected_mem
)
222 self
.assertEqual(sim
.gpr(8), SelectableInt(0x1234, 64))
223 self
.assertEqual(sim
.gpr(9), SelectableInt(0x1235, 64))
225 def test_sv_load_store_unitstride(self
):
226 """>>> lst = ["addi 1, 0, 0x0010",
233 note: unit stride mode is only enabled when RA is a scalar.
235 unit stride is computed as:
237 EA = (RA|0) + EXTS(D) + LDSTsize * i
238 where for stw and lwz, LDSTsize is 4 because it is 32-bit words
240 lst
= SVP64Asm(["addi 1, 0, 0x0010",
244 "sv.stw *8, 8(1)", # scalar r1 + 8 + wordlen*offs
245 "sv.lwz *12, 8(1)"]) # scalar r1 + 8 + wordlen*offs
248 # SVSTATE (in this case, VL=2)
249 svstate
= SVP64State()
251 svstate
.maxvl
= 2 # MAXVL
252 print("SVSTATE", bin(svstate
.asint()))
254 with
Program(lst
, bigendian
=False) as program
:
255 sim
= self
.run_tst_program(program
, svstate
=svstate
)
256 mem
= sim
.mem
.dump(printout
=False)
259 # contents of memory expected at:
260 # element 0: r1=0x10, D=8, wordlen=4 => EA = 0x10+8+4*0 = 0x24
261 # element 1: r1=0x10, D=8, wordlen=4 => EA = 0x10+8+4*8 = 0x28
262 # therefore, at address 0x24 ==> 0x1234
263 # therefore, at address 0x28 ==> 0x1235
264 self
.assertEqual(mem
, [(24, 0x123500001234)])
266 self
.assertEqual(sim
.gpr(12), SelectableInt(0x1234, 64))
267 self
.assertEqual(sim
.gpr(13), SelectableInt(0x1235, 64))
269 @unittest.skip("deprecated, needs Scalar LDST-shifted")
270 def test_sv_load_store_shifted(self
):
271 """>>> lst = ["addi 1, 0, 0x0010",
279 "sv.lwzsh *12, 4(1), 2"]
281 shifted LD is computed as:
283 EA = (RA|0) + (EXTS(D) * LDSTsize * i) << RC
285 lst
= SVP64Asm(["addi 1, 0, 0x0010",
291 "sv.stw *4, 0(1)", # scalar r1 + 0 + wordlen*offs
292 "sv.lwzsh *12, 4(1), 2"]) # bit-reversed
295 # SVSTATE (in this case, VL=4)
296 svstate
= SVP64State()
298 svstate
.maxvl
= 4 # MAXVL
299 print("SVSTATE", bin(svstate
.asint()))
301 with
Program(lst
, bigendian
=False) as program
:
302 sim
= self
.run_tst_program(program
, svstate
=svstate
)
303 mem
= sim
.mem
.dump(printout
=False)
306 self
.assertEqual(mem
, [(16, 0x020200000101),
307 (24, 0x040400000303)])
310 self
.assertEqual(sim
.gpr(4), SelectableInt(0x101, 64))
311 self
.assertEqual(sim
.gpr(5), SelectableInt(0x202, 64))
312 self
.assertEqual(sim
.gpr(6), SelectableInt(0x303, 64))
313 self
.assertEqual(sim
.gpr(7), SelectableInt(0x404, 64))
314 # r1=0x10, RC=0, offs=4: contents of memory expected at:
315 # element 0: EA = r1 + 0b00*4 => 0x10 + 0b00*4 => 0x10
316 # element 1: EA = r1 + 0b01*4 => 0x10 + 0b01*4 => 0x18
317 # element 2: EA = r1 + 0b10*4 => 0x10 + 0b10*4 => 0x14
318 # element 3: EA = r1 + 0b11*4 => 0x10 + 0b11*4 => 0x1c
319 # therefore loaded from (bit-reversed indexing):
320 # r9 => mem[0x10] which was stored from r5
321 # r10 => mem[0x18] which was stored from r6
322 # r11 => mem[0x18] which was stored from r7
323 # r12 => mem[0x1c] which was stored from r8
324 self
.assertEqual(sim
.gpr(12), SelectableInt(0x101, 64))
325 self
.assertEqual(sim
.gpr(13), SelectableInt(0x202, 64))
326 self
.assertEqual(sim
.gpr(14), SelectableInt(0x303, 64))
327 self
.assertEqual(sim
.gpr(15), SelectableInt(0x404, 64))
329 @unittest.skip("deprecated, needs Scalar LDST-shifted")
330 def test_sv_load_store_shifted_fp(self
):
331 """>>> lst = ["addi 1, 0, 0x0010",
339 "sv.lfdbr *12, 4(1), 2"]
341 shifted LD is computed as:
343 EA = (RA|0) + (EXTS(D) * LDSTsize * i) << RC
345 lst
= SVP64Asm(["addi 1, 0, 0x0010",
351 "sv.std *4, 0(1)", # scalar r1 + 0 + wordlen*offs
352 "sv.lfdsh *12, 8(1), 2"]) # shifted
355 # SVSTATE (in this case, VL=4)
356 svstate
= SVP64State()
358 svstate
.maxvl
= 4 # MAXVL
359 print("SVSTATE", bin(svstate
.asint()))
363 with
Program(lst
, bigendian
=False) as program
:
364 sim
= self
.run_tst_program(program
, svstate
=svstate
,
366 mem
= sim
.mem
.dump(printout
=False)
369 self
.assertEqual(mem
, [(16, 0x101),
376 self
.assertEqual(sim
.gpr(4), SelectableInt(0x101, 64))
377 self
.assertEqual(sim
.gpr(5), SelectableInt(0x202, 64))
378 self
.assertEqual(sim
.gpr(6), SelectableInt(0x303, 64))
379 self
.assertEqual(sim
.gpr(7), SelectableInt(0x404, 64))
380 # r1=0x10, RC=0, offs=4: contents of memory expected at:
381 # element 0: EA = r1 + bitrev(0b00)*4 => 0x10 + 0b00*4 => 0x10
382 # element 1: EA = r1 + bitrev(0b01)*4 => 0x10 + 0b10*4 => 0x18
383 # element 2: EA = r1 + bitrev(0b10)*4 => 0x10 + 0b01*4 => 0x14
384 # element 3: EA = r1 + bitrev(0b11)*4 => 0x10 + 0b10*4 => 0x1c
385 # therefore loaded from (bit-reversed indexing):
386 # r9 => mem[0x10] which was stored from r5
387 # r10 => mem[0x18] which was stored from r6
388 # r11 => mem[0x18] which was stored from r7
389 # r12 => mem[0x1c] which was stored from r8
390 self
.assertEqual(sim
.fpr(12), SelectableInt(0x101, 64))
391 self
.assertEqual(sim
.fpr(13), SelectableInt(0x202, 64))
392 self
.assertEqual(sim
.fpr(14), SelectableInt(0x303, 64))
393 self
.assertEqual(sim
.fpr(15), SelectableInt(0x404, 64))
395 @unittest.skip("deprecated, needs Scalar LDST-shifted")
396 def test_sv_load_store_shifted2(self
):
397 """>>> lst = ["addi 1, 0, 0x0010",
401 "sv.lfssh *12, 4(1), 2"]
403 shifted LD is computed as:
405 EA = (RA|0) + (EXTS(D) * LDSTsize * i) << RC
408 lst
= SVP64Asm(["addi 1, 0, 0x0010",
410 "sv.stfs *4, 0(1)", # scalar r1 + 0 + wordlen*offs
411 "sv.lfssh *12, 4(1), 2"]) # shifted (by zero, but hey)
414 # SVSTATE (in this case, VL=4)
415 svstate
= SVP64State()
417 svstate
.maxvl
= 4 # MAXVL
418 print("SVSTATE", bin(svstate
.asint()))
423 fprs
[4] = fp64toselectable(1.0)
424 fprs
[5] = fp64toselectable(2.0)
425 fprs
[6] = fp64toselectable(3.0)
426 fprs
[7] = fp64toselectable(4.0)
428 # expected results, remember that bit-reversed load has been done
429 expected_fprs
= deepcopy(fprs
)
430 expected_fprs
[12] = fprs
[4] # 0b00 -> 0b00
431 expected_fprs
[13] = fprs
[5] # 0b10 -> 0b01
432 expected_fprs
[14] = fprs
[6] # 0b01 -> 0b10
433 expected_fprs
[15] = fprs
[7] # 0b11 -> 0b11
435 with
Program(lst
, bigendian
=False) as program
:
436 sim
= self
.run_tst_program(program
, svstate
=svstate
,
438 mem
= sim
.mem
.dump(printout
=False)
445 # self.assertEqual(mem, [(16, 0x020200000101),
446 # (24, 0x040400000303)])
447 self
._check
_fpregs
(sim
, expected_fprs
)
449 def test_sv_load_store_remap_matrix(self
):
450 """>>> lst = ["addi 1, 0, 0x0010",
457 "sv.stw *4, 0(1)", # scalar r1 + 0 + wordlen*offs
458 "svshape 3, 3, 4, 0, 0",
459 "svremap 1, 1, 2, 0, 0, 0, 0",
463 REMAPed a LD operation via a Matrix Multiply Schedule,
464 which is set up as 3x4 result
466 lst
= SVP64Asm(["addi 1, 0, 0x0010",
483 "sv.stw *4, 0(1)", # scalar r1 + 0 + wordlen*offs
484 "svshape 3, 3, 4, 0, 0",
485 "svremap 1, 1, 2, 0, 0, 0, 0",
490 # SVSTATE (in this case, VL=4)
491 svstate
= SVP64State()
493 svstate
.maxvl
= 12 # MAXVL
494 print("SVSTATE", bin(svstate
.asint()))
498 with
Program(lst
, bigendian
=False) as program
:
499 sim
= self
.run_tst_program(program
, svstate
=svstate
,
501 mem
= sim
.mem
.dump(printout
=False)
505 self
.assertEqual(mem
, [(16, 0x020200000101),
506 (24, 0x040400000303),
507 (32, 0x060600000505),
508 (40, 0x080800000707),
509 (48, 0x0a0a00000909),
510 (56, 0x0c0c00000b0b)])
513 self
.assertEqual(sim
.gpr(4), SelectableInt(0x101, 64))
514 self
.assertEqual(sim
.gpr(5), SelectableInt(0x202, 64))
515 self
.assertEqual(sim
.gpr(6), SelectableInt(0x303, 64))
516 self
.assertEqual(sim
.gpr(7), SelectableInt(0x404, 64))
517 self
.assertEqual(sim
.gpr(8), SelectableInt(0x505, 64))
518 self
.assertEqual(sim
.gpr(9), SelectableInt(0x606, 64))
519 self
.assertEqual(sim
.gpr(10), SelectableInt(0x707, 64))
520 self
.assertEqual(sim
.gpr(11), SelectableInt(0x808, 64))
521 # combination of bit-reversed load with a Matrix REMAP
524 self
.assertEqual(sim
.gpr(20+i
), SelectableInt(0x101, 64))
525 self
.assertEqual(sim
.gpr(23+i
), SelectableInt(0x505, 64))
526 self
.assertEqual(sim
.gpr(26+i
), SelectableInt(0x909, 64))
527 self
.assertEqual(sim
.gpr(29+i
), SelectableInt(0x202, 64))
529 def test_sv_load_store_bitreverse_remap_halfswap(self
):
530 """>>> lst = ["addi 1, 0, 0x0010",
541 "svshape 8, 1, 1, 6, 0",
542 "svremap 31, 1, 2, 3, 0, 0, 0",
543 "sv.lwz/els *12, 4(1)"]
545 shifted LD is computed as:
547 EA = (RA|0) + (EXTS(D) * LDSTsize * i) << RC
549 bitreversal of 0 1 2 3 in binary 0b00 0b01 0b10 0b11
550 produces 0 2 1 3 in binary 0b00 0b10 0b01 0b11
552 and thus creates the butterfly needed for one iteration of FFT.
553 the RC (shift) is to be able to offset the LDs by Radix-2 spans
555 on top of the bit-reversal is a REMAP for half-swaps for DCT
558 lst
= SVP64Asm(["addi 1, 0, 0x0010",
568 "sv.stw *4, 0(1)", # scalar r1 + 0 + wordlen*offs
569 "svshape 8, 1, 1, 6, 0",
570 "svremap 1, 0, 0, 0, 0, 0, 0",
571 #"setvl 0, 0, 8, 0, 1, 1",
572 "sv.lwz/els *12, 4(1)",
577 # SVSTATE (in this case, VL=4)
578 svstate
= SVP64State()
580 svstate
.maxvl
= 8 # MAXVL
581 print("SVSTATE", bin(svstate
.asint()))
585 avi
= [0x001, 0x102, 0x203, 0x304, 0x405, 0x506, 0x607, 0x708]
587 levels
= n
.bit_length() - 1
589 ri
= [ri
[reverse_bits(i
, levels
)] for i
in range(n
)]
590 av
= halfrev2(avi
, False)
591 av
= [av
[ri
[i
]] for i
in range(n
)]
593 with
Program(lst
, bigendian
=False) as program
:
594 sim
= self
.run_tst_program(program
, svstate
=svstate
,
596 mem
= sim
.mem
.dump(printout
=False)
600 self
.assertEqual(mem
, [(16, 0x010200000001),
601 (24, 0x030400000203),
602 (32, 0x050600000405),
603 (40, 0x070800000607)])
605 for i
in range(len(avi
)):
606 print("st gpr", i
, sim
.gpr(i
+4), hex(avi
[i
]))
607 for i
in range(len(avi
)):
608 self
.assertEqual(sim
.gpr(i
+4), avi
[i
])
609 # combination of bit-reversed load with a DCT half-swap REMAP
611 for i
in range(len(avi
)):
612 print("ld gpr", i
, sim
.gpr(i
+12), hex(av
[i
]))
613 for i
in range(len(avi
)):
614 self
.assertEqual(sim
.gpr(i
+12), av
[i
])
616 def test_sv_load_store_bitreverse_remap_halfswap_idct(self
):
617 """>>> lst = ["addi 1, 0, 0x0010",
628 "svshape 8, 1, 1, 6, 0",
629 "svremap 31, 1, 2, 3, 0, 0, 0",
630 "sv.lwz/els *12, 4(1)"]
632 bitreverse LD is computed as:
634 EA = (RA|0) + (EXTS(D) * LDSTsize * i) << RC
636 bitreversal of 0 1 2 3 in binary 0b00 0b01 0b10 0b11
637 produces 0 2 1 3 in binary 0b00 0b10 0b01 0b11
639 and thus creates the butterfly needed for one iteration of FFT.
640 the RC (shift) is to be able to offset the LDs by Radix-2 spans
642 on top of the bit-reversal is a REMAP for half-swaps for DCT
645 lst
= SVP64Asm(["addi 1, 0, 0x0010",
655 "sv.stw *4, 0(1)", # scalar r1 + 0 + wordlen*offs
656 "svshape 8, 1, 1, 14, 0",
657 "svremap 16, 0, 0, 0, 0, 0, 0",
658 #"setvl 0, 0, 8, 0, 1, 1",
659 "sv.lwz/els *12, 4(1)",
664 # SVSTATE (in this case, VL=4)
665 svstate
= SVP64State()
667 svstate
.maxvl
= 8 # MAXVL
668 print("SVSTATE", bin(svstate
.asint()))
672 avi
= [0x001, 0x102, 0x203, 0x304, 0x405, 0x506, 0x607, 0x708]
674 levels
= n
.bit_length() - 1
676 ri
= [ri
[reverse_bits(i
, levels
)] for i
in range(n
)]
677 av
= [avi
[ri
[i
]] for i
in range(n
)]
678 av
= halfrev2(av
, True)
680 with
Program(lst
, bigendian
=False) as program
:
681 sim
= self
.run_tst_program(program
, svstate
=svstate
,
683 mem
= sim
.mem
.dump(printout
=False)
687 self
.assertEqual(mem
, [(16, 0x010200000001),
688 (24, 0x030400000203),
689 (32, 0x050600000405),
690 (40, 0x070800000607)])
692 for i
in range(len(avi
)):
693 print("st gpr", i
, sim
.gpr(i
+4), hex(avi
[i
]))
694 for i
in range(len(avi
)):
695 self
.assertEqual(sim
.gpr(i
+4), avi
[i
])
696 # combination of bit-reversed load with a DCT half-swap REMAP
698 for i
in range(len(avi
)):
699 print("ld gpr", i
, sim
.gpr(i
+12), hex(av
[i
]))
700 for i
in range(len(avi
)):
701 self
.assertEqual(sim
.gpr(i
+12), av
[i
])
703 def run_tst_program(self
, prog
, initial_regs
=None,
704 svstate
=None, initial_fprs
=None,
706 if initial_regs
is None:
707 initial_regs
= [0] * 32
708 if initial_fprs
is None:
709 initial_fprs
= [0] * 32
710 simulator
= run_tst(prog
, initial_regs
, svstate
=svstate
,
711 initial_fprs
=initial_fprs
,
720 if __name__
== "__main__":