1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
20 from nmigen
.sim
import Settle
21 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
22 SVP64CROffs
, SVP64MODEb
)
23 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
25 from openpower
.decoder
.isa
.mem
import Mem
, MemException
26 from openpower
.decoder
.isa
.radixmmu
import RADIX
27 from openpower
.decoder
.isa
.svshape
import SVSHAPE
28 from openpower
.decoder
.isa
.svstate
import SVP64State
29 from openpower
.decoder
.orderedset
import OrderedSet
30 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
31 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
32 MicrOp
, OutSel
, SVMode
,
33 SVP64LDSTmode
, SVP64PredCR
,
34 SVP64PredInt
, SVP64PredMode
,
35 SVP64RMMode
, SVPtype
, XER_bits
,
36 insns
, spr_byname
, spr_dict
)
37 from openpower
.decoder
.power_insn
import SVP64Instruction
38 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
39 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
40 SelectableInt
, selectconcat
)
41 from openpower
.util
import LogKind
, log
43 instruction_info
= namedtuple('instruction_info',
44 'func read_regs uninit_regs write_regs ' +
45 'special_regs op_fields form asmregs')
55 # rrright. this is here basically because the compiler pywriter returns
56 # results in a specific priority order. to make sure regs match up they
57 # need partial sorting. sigh.
59 # TODO (lkcl): adjust other registers that should be in a particular order
60 # probably CA, CA32, and CR
86 "overflow": 7, # should definitely be last
90 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
93 def get_masked_reg(regs
, base
, offs
, ew_bits
):
94 # rrrright. start by breaking down into row/col, based on elwidth
95 gpr_offs
= offs
// (64//ew_bits
)
96 gpr_col
= offs
% (64//ew_bits
)
97 # compute the mask based on ew_bits
98 mask
= (1 << ew_bits
)-1
99 # now select the 64-bit register, but get its value (easier)
100 val
= regs
[base
+gpr_offs
]
101 # now mask out the bit we don't want
102 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
103 # then return the bits we want, shifted down
104 return val
>> (gpr_col
*ew_bits
)
107 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
108 # rrrright. start by breaking down into row/col, based on elwidth
109 gpr_offs
= offs
// (64//ew_bits
)
110 gpr_col
= offs
% (64//ew_bits
)
111 # compute the mask based on ew_bits
112 mask
= (1 << ew_bits
)-1
113 # now select the 64-bit register, but get its value (easier)
114 val
= regs
[base
+gpr_offs
]
115 # now mask out the bit we don't want
116 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
117 # then wipe the bit we don't want from the value
119 # OR the new value in, shifted up
120 val |
= value
<< (gpr_col
*ew_bits
)
121 regs
[base
+gpr_offs
] = val
124 def create_args(reglist
, extra
=None):
125 retval
= list(OrderedSet(reglist
))
126 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
127 if extra
is not None:
128 return [extra
] + retval
133 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
136 self
.isacaller
= isacaller
137 self
.svstate
= svstate
138 for i
in range(len(regfile
)):
139 self
[i
] = SelectableInt(regfile
[i
], 64)
141 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
142 if isinstance(ridx
, SelectableInt
):
145 return self
[ridx
+offs
]
146 # rrrright. start by breaking down into row/col, based on elwidth
147 gpr_offs
= offs
// (64//elwidth
)
148 gpr_col
= offs
% (64//elwidth
)
149 # now select the 64-bit register, but get its value (easier)
150 val
= self
[ridx
+gpr_offs
].value
151 # now shift down and mask out
152 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
153 # finally, return a SelectableInt at the required elwidth
154 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
155 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
156 return SelectableInt(val
, elwidth
)
158 def set_form(self
, form
):
161 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
163 if isinstance(rnum
, SelectableInt
):
165 if isinstance(value
, SelectableInt
):
168 if isinstance(rnum
, tuple):
169 rnum
, base
, offs
= rnum
172 # rrrright. start by breaking down into row/col, based on elwidth
173 gpr_offs
= offs
// (64//elwidth
)
174 gpr_col
= offs
% (64//elwidth
)
175 # compute the mask based on elwidth
176 mask
= (1 << elwidth
)-1
177 # now select the 64-bit register, but get its value (easier)
178 val
= self
[base
+gpr_offs
].value
179 # now mask out the bit we don't want
180 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
181 # then wipe the bit we don't want from the value
183 # OR the new value in, shifted up
184 val |
= value
<< (gpr_col
*elwidth
)
185 # finally put the damn value into the regfile
186 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
187 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
189 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
191 def __setitem__(self
, rnum
, value
):
192 # rnum = rnum.value # only SelectableInt allowed
193 log("GPR setitem", rnum
, value
)
194 if isinstance(rnum
, SelectableInt
):
196 dict.__setitem
__(self
, rnum
, value
)
198 def getz(self
, rnum
):
199 # rnum = rnum.value # only SelectableInt allowed
200 log("GPR getzero?", rnum
)
202 return SelectableInt(0, 64)
205 def _get_regnum(self
, attr
):
206 getform
= self
.sd
.sigforms
[self
.form
]
207 rnum
= getattr(getform
, attr
)
210 def ___getitem__(self
, attr
):
211 """ XXX currently not used
213 rnum
= self
._get
_regnum
(attr
)
214 log("GPR getitem", attr
, rnum
)
215 return self
.regfile
[rnum
]
217 def dump(self
, printout
=True):
219 for i
in range(len(self
)):
220 res
.append(self
[i
].value
)
222 for i
in range(0, len(res
), 8):
225 s
.append("%08x" % res
[i
+j
])
227 print("reg", "%2d" % i
, s
)
232 def __init__(self
, dec2
, initial_sprs
={}):
235 for key
, v
in initial_sprs
.items():
236 if isinstance(key
, SelectableInt
):
238 key
= special_sprs
.get(key
, key
)
239 if isinstance(key
, int):
242 info
= spr_byname
[key
]
243 if not isinstance(v
, SelectableInt
):
244 v
= SelectableInt(v
, info
.length
)
247 def __getitem__(self
, key
):
249 log("dict", self
.items())
250 # if key in special_sprs get the special spr, otherwise return key
251 if isinstance(key
, SelectableInt
):
253 if isinstance(key
, int):
254 key
= spr_dict
[key
].SPR
255 key
= special_sprs
.get(key
, key
)
256 if key
== 'HSRR0': # HACK!
258 if key
== 'HSRR1': # HACK!
261 res
= dict.__getitem
__(self
, key
)
263 if isinstance(key
, int):
266 info
= spr_byname
[key
]
267 dict.__setitem
__(self
, key
, SelectableInt(0, info
.length
))
268 res
= dict.__getitem
__(self
, key
)
269 log("spr returning", key
, res
)
272 def __setitem__(self
, key
, value
):
273 if isinstance(key
, SelectableInt
):
275 if isinstance(key
, int):
276 key
= spr_dict
[key
].SPR
278 key
= special_sprs
.get(key
, key
)
279 if key
== 'HSRR0': # HACK!
280 self
.__setitem
__('SRR0', value
)
281 if key
== 'HSRR1': # HACK!
282 self
.__setitem
__('SRR1', value
)
283 log("setting spr", key
, value
)
284 dict.__setitem
__(self
, key
, value
)
286 def __call__(self
, ridx
):
289 def dump(self
, printout
=True):
291 keys
= list(self
.keys())
294 sprname
= spr_dict
.get(k
, None)
298 sprname
= sprname
.SPR
299 res
.append((sprname
, self
[k
].value
))
301 for sprname
, value
in res
:
302 print(" ", sprname
, hex(value
))
307 def __init__(self
, pc_init
=0):
308 self
.CIA
= SelectableInt(pc_init
, 64)
309 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
311 def update_nia(self
, is_svp64
):
312 increment
= 8 if is_svp64
else 4
313 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
315 def update(self
, namespace
, is_svp64
):
316 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
318 self
.CIA
= namespace
['NIA'].narrow(64)
319 self
.update_nia(is_svp64
)
320 namespace
['CIA'] = self
.CIA
321 namespace
['NIA'] = self
.NIA
325 # See PowerISA Version 3.0 B Book 1
326 # Section 2.3.1 Condition Register pages 30 - 31
328 LT
= FL
= 0 # negative, less than, floating-point less than
329 GT
= FG
= 1 # positive, greater than, floating-point greater than
330 EQ
= FE
= 2 # equal, floating-point equal
331 SO
= FU
= 3 # summary overflow, floating-point unordered
333 def __init__(self
, init
=0):
334 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
335 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
336 self
.cr
= SelectableInt(init
, 64) # underlying reg
337 # field-selectable versions of Condition Register TODO check bitranges?
340 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
341 _cr
= FieldSelectableInt(self
.cr
, bits
)
345 # decode SVP64 predicate integer to reg number and invert
346 def get_predint(gpr
, mask
):
350 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
351 if mask
== SVP64PredInt
.ALWAYS
.value
:
352 return 0xffff_ffff_ffff_ffff # 64 bits of 1
353 if mask
== SVP64PredInt
.R3_UNARY
.value
:
354 return 1 << (r3
.value
& 0b111111)
355 if mask
== SVP64PredInt
.R3
.value
:
357 if mask
== SVP64PredInt
.R3_N
.value
:
359 if mask
== SVP64PredInt
.R10
.value
:
361 if mask
== SVP64PredInt
.R10_N
.value
:
363 if mask
== SVP64PredInt
.R30
.value
:
365 if mask
== SVP64PredInt
.R30_N
.value
:
369 # decode SVP64 predicate CR to reg number and invert status
370 def _get_predcr(mask
):
371 if mask
== SVP64PredCR
.LT
.value
:
373 if mask
== SVP64PredCR
.GE
.value
:
375 if mask
== SVP64PredCR
.GT
.value
:
377 if mask
== SVP64PredCR
.LE
.value
:
379 if mask
== SVP64PredCR
.EQ
.value
:
381 if mask
== SVP64PredCR
.NE
.value
:
383 if mask
== SVP64PredCR
.SO
.value
:
385 if mask
== SVP64PredCR
.NS
.value
:
389 # read individual CR fields (0..VL-1), extract the required bit
390 # and construct the mask
391 def get_predcr(crl
, mask
, vl
):
392 idx
, noninv
= _get_predcr(mask
)
395 cr
= crl
[i
+SVP64CROffs
.CRPred
]
396 if cr
[idx
].value
== noninv
:
401 # TODO, really should just be using PowerDecoder2
402 def get_idx_map(dec2
, name
):
404 in1_sel
= yield op
.in1_sel
405 in2_sel
= yield op
.in2_sel
406 in3_sel
= yield op
.in3_sel
407 in1
= yield dec2
.e
.read_reg1
.data
408 # identify which regnames map to in1/2/3
409 if name
== 'RA' or name
== 'RA_OR_ZERO':
410 if (in1_sel
== In1Sel
.RA
.value
or
411 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
413 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
416 if in2_sel
== In2Sel
.RB
.value
:
418 if in3_sel
== In3Sel
.RB
.value
:
420 # XXX TODO, RC doesn't exist yet!
422 if in3_sel
== In3Sel
.RC
.value
:
424 elif name
in ['EA', 'RS']:
425 if in1_sel
== In1Sel
.RS
.value
:
427 if in2_sel
== In2Sel
.RS
.value
:
429 if in3_sel
== In3Sel
.RS
.value
:
432 if in1_sel
== In1Sel
.FRA
.value
:
435 if in2_sel
== In2Sel
.FRB
.value
:
438 if in3_sel
== In3Sel
.FRC
.value
:
441 if in1_sel
== In1Sel
.FRS
.value
:
443 if in3_sel
== In3Sel
.FRS
.value
:
448 # TODO, really should just be using PowerDecoder2
449 def get_idx_in(dec2
, name
, ewmode
=False):
450 idx
= yield from get_idx_map(dec2
, name
)
454 in1_sel
= yield op
.in1_sel
455 in2_sel
= yield op
.in2_sel
456 in3_sel
= yield op
.in3_sel
457 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
458 in1
= yield dec2
.e
.read_reg1
.data
459 in2
= yield dec2
.e
.read_reg2
.data
460 in3
= yield dec2
.e
.read_reg3
.data
462 in1_base
= yield dec2
.e
.read_reg1
.base
463 in2_base
= yield dec2
.e
.read_reg2
.base
464 in3_base
= yield dec2
.e
.read_reg3
.base
465 in1_offs
= yield dec2
.e
.read_reg1
.offs
466 in2_offs
= yield dec2
.e
.read_reg2
.offs
467 in3_offs
= yield dec2
.e
.read_reg3
.offs
468 in1
= (in1
, in1_base
, in1_offs
)
469 in2
= (in2
, in2_base
, in2_offs
)
470 in3
= (in3
, in3_base
, in3_offs
)
472 in1_isvec
= yield dec2
.in1_isvec
473 in2_isvec
= yield dec2
.in2_isvec
474 in3_isvec
= yield dec2
.in3_isvec
475 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
477 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
479 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
481 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
483 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
485 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
488 return in1
, in1_isvec
490 return in2
, in2_isvec
492 return in3
, in3_isvec
496 # TODO, really should just be using PowerDecoder2
497 def get_cr_in(dec2
, name
):
499 in_sel
= yield op
.cr_in
500 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
501 sv_cr_in
= yield op
.sv_cr_in
502 spec
= yield dec2
.crin_svdec
.spec
503 sv_override
= yield dec2
.dec_cr_in
.sv_override
504 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
505 in1
= yield dec2
.e
.read_cr1
.data
506 cr_isvec
= yield dec2
.cr_in_isvec
507 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
508 log(" sv_cr_in", sv_cr_in
)
509 log(" cr_bf", in_bitfield
)
511 log(" override", sv_override
)
512 # identify which regnames map to in / o2
514 if in_sel
== CRInSel
.BI
.value
:
516 log("get_cr_in not found", name
)
520 # TODO, really should just be using PowerDecoder2
521 def get_cr_out(dec2
, name
):
523 out_sel
= yield op
.cr_out
524 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
525 sv_cr_out
= yield op
.sv_cr_out
526 spec
= yield dec2
.crout_svdec
.spec
527 sv_override
= yield dec2
.dec_cr_out
.sv_override
528 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
529 out
= yield dec2
.e
.write_cr
.data
530 o_isvec
= yield dec2
.cr_out_isvec
531 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
532 log(" sv_cr_out", sv_cr_out
)
533 log(" cr_bf", out_bitfield
)
535 log(" override", sv_override
)
536 # identify which regnames map to out / o2
538 if out_sel
== CROutSel
.BF
.value
:
541 if out_sel
== CROutSel
.CR0
.value
:
543 if name
== 'CR1': # these are not actually calculated correctly
544 if out_sel
== CROutSel
.CR1
.value
:
546 log("get_cr_out not found", name
)
550 # TODO, really should just be using PowerDecoder2
551 def get_out_map(dec2
, name
):
553 out_sel
= yield op
.out_sel
554 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
555 out
= yield dec2
.e
.write_reg
.data
556 # identify which regnames map to out / o2
558 if out_sel
== OutSel
.RA
.value
:
561 if out_sel
== OutSel
.RT
.value
:
563 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
565 elif name
== 'RT_OR_ZERO':
566 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
569 if out_sel
== OutSel
.FRA
.value
:
572 if out_sel
== OutSel
.FRT
.value
:
577 # TODO, really should just be using PowerDecoder2
578 def get_idx_out(dec2
, name
, ewmode
=False):
580 out_sel
= yield op
.out_sel
581 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
582 out
= yield dec2
.e
.write_reg
.data
583 o_isvec
= yield dec2
.o_isvec
585 offs
= yield dec2
.e
.write_reg
.offs
586 base
= yield dec2
.e
.write_reg
.base
587 out
= (out
, base
, offs
)
588 # identify which regnames map to out / o2
589 ismap
= yield from get_out_map(dec2
, name
)
591 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
593 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
597 # TODO, really should just be using PowerDecoder2
598 def get_out2_map(dec2
, name
):
599 # check first if register is activated for write
601 out_sel
= yield op
.out_sel
602 out
= yield dec2
.e
.write_ea
.data
603 out_ok
= yield dec2
.e
.write_ea
.ok
607 if name
in ['EA', 'RA']:
608 if hasattr(op
, "upd"):
609 # update mode LD/ST uses read-reg A also as an output
611 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
612 out_sel
, OutSel
.RA
.value
,
614 if upd
== LDSTMode
.update
.value
:
617 fft_en
= yield dec2
.implicit_rs
619 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
623 fft_en
= yield dec2
.implicit_rs
625 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
631 # TODO, really should just be using PowerDecoder2
632 def get_idx_out2(dec2
, name
, ewmode
=False):
633 # check first if register is activated for write
635 out_sel
= yield op
.out_sel
636 out
= yield dec2
.e
.write_ea
.data
638 offs
= yield dec2
.e
.write_ea
.offs
639 base
= yield dec2
.e
.write_ea
.base
640 out
= (out
, base
, offs
)
641 o_isvec
= yield dec2
.o2_isvec
642 ismap
= yield from get_out2_map(dec2
, name
)
644 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
650 """deals with svstate looping.
653 def __init__(self
, svstate
):
654 self
.svstate
= svstate
657 def new_iterators(self
):
658 self
.src_it
= self
.src_iterator()
659 self
.dst_it
= self
.dst_iterator()
663 self
.new_ssubstep
= 0
664 self
.new_dsubstep
= 0
665 self
.pred_dst_zero
= 0
666 self
.pred_src_zero
= 0
668 def src_iterator(self
):
669 """source-stepping iterator
671 pack
= self
.svstate
.pack
675 # pack advances subvl in *outer* loop
676 while True: # outer subvl loop
677 while True: # inner vl loop
680 srcmask
= self
.srcmask
681 srcstep
= self
.svstate
.srcstep
682 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
683 if self
.pred_sz
or pred_src_zero
:
684 self
.pred_src_zero
= not pred_src_zero
685 log(" advance src", srcstep
, vl
,
686 self
.svstate
.ssubstep
, subvl
)
687 # yield actual substep/srcstep
688 yield (self
.svstate
.ssubstep
, srcstep
)
689 # the way yield works these could have been modified.
692 srcstep
= self
.svstate
.srcstep
693 log(" advance src check", srcstep
, vl
,
694 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
695 self
.svstate
.ssubstep
== subvl
)
696 if srcstep
== vl
-1: # end-point
697 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
698 if self
.svstate
.ssubstep
== subvl
: # end-point
699 log(" advance pack stop")
701 break # exit inner loop
702 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
704 if self
.svstate
.ssubstep
== subvl
: # end-point
705 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
706 log(" advance pack stop")
708 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
711 # these cannot be done as for-loops because SVSTATE may change
712 # (srcstep/substep may be modified, interrupted, subvl/vl change)
713 # but they *can* be done as while-loops as long as every SVSTATE
714 # "thing" is re-read every single time a yield gives indices
715 while True: # outer vl loop
716 while True: # inner subvl loop
719 srcmask
= self
.srcmask
720 srcstep
= self
.svstate
.srcstep
721 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
722 if self
.pred_sz
or pred_src_zero
:
723 self
.pred_src_zero
= not pred_src_zero
724 log(" advance src", srcstep
, vl
,
725 self
.svstate
.ssubstep
, subvl
)
726 # yield actual substep/srcstep
727 yield (self
.svstate
.ssubstep
, srcstep
)
728 if self
.svstate
.ssubstep
== subvl
: # end-point
729 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
730 break # exit inner loop
731 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
733 if srcstep
== vl
-1: # end-point
734 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
737 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
739 def dst_iterator(self
):
740 """dest-stepping iterator
742 unpack
= self
.svstate
.unpack
746 # pack advances subvl in *outer* loop
747 while True: # outer subvl loop
748 while True: # inner vl loop
751 dstmask
= self
.dstmask
752 dststep
= self
.svstate
.dststep
753 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
754 if self
.pred_dz
or pred_dst_zero
:
755 self
.pred_dst_zero
= not pred_dst_zero
756 log(" advance dst", dststep
, vl
,
757 self
.svstate
.dsubstep
, subvl
)
758 # yield actual substep/dststep
759 yield (self
.svstate
.dsubstep
, dststep
)
760 # the way yield works these could have been modified.
762 dststep
= self
.svstate
.dststep
763 log(" advance dst check", dststep
, vl
,
764 self
.svstate
.ssubstep
, subvl
)
765 if dststep
== vl
-1: # end-point
766 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
767 if self
.svstate
.dsubstep
== subvl
: # end-point
768 log(" advance unpack stop")
771 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
773 if self
.svstate
.dsubstep
== subvl
: # end-point
774 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
775 log(" advance unpack stop")
777 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
779 # these cannot be done as for-loops because SVSTATE may change
780 # (dststep/substep may be modified, interrupted, subvl/vl change)
781 # but they *can* be done as while-loops as long as every SVSTATE
782 # "thing" is re-read every single time a yield gives indices
783 while True: # outer vl loop
784 while True: # inner subvl loop
786 dstmask
= self
.dstmask
787 dststep
= self
.svstate
.dststep
788 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
789 if self
.pred_dz
or pred_dst_zero
:
790 self
.pred_dst_zero
= not pred_dst_zero
791 log(" advance dst", dststep
, self
.svstate
.vl
,
792 self
.svstate
.dsubstep
, subvl
)
793 # yield actual substep/dststep
794 yield (self
.svstate
.dsubstep
, dststep
)
795 if self
.svstate
.dsubstep
== subvl
: # end-point
796 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
798 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
801 if dststep
== vl
-1: # end-point
802 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
804 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
806 def src_iterate(self
):
807 """source-stepping iterator
811 pack
= self
.svstate
.pack
812 unpack
= self
.svstate
.unpack
813 ssubstep
= self
.svstate
.ssubstep
814 end_ssub
= ssubstep
== subvl
815 end_src
= self
.svstate
.srcstep
== vl
-1
816 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
820 srcstep
= self
.svstate
.srcstep
821 srcmask
= self
.srcmask
823 # pack advances subvl in *outer* loop
825 assert srcstep
<= vl
-1
826 end_src
= srcstep
== vl
-1
831 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
835 srcstep
+= 1 # advance srcstep
836 if not self
.srcstep_skip
:
838 if ((1 << srcstep
) & srcmask
) != 0:
841 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
843 # advance subvl in *inner* loop
846 assert srcstep
<= vl
-1
847 end_src
= srcstep
== vl
-1
848 if end_src
: # end-point
854 if not self
.srcstep_skip
:
856 if ((1 << srcstep
) & srcmask
) != 0:
859 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
860 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
863 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
865 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
866 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
869 def dst_iterate(self
):
870 """dest step iterator
874 pack
= self
.svstate
.pack
875 unpack
= self
.svstate
.unpack
876 dsubstep
= self
.svstate
.dsubstep
877 end_dsub
= dsubstep
== subvl
878 dststep
= self
.svstate
.dststep
879 end_dst
= dststep
== vl
-1
880 dstmask
= self
.dstmask
881 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
886 # unpack advances subvl in *outer* loop
888 assert dststep
<= vl
-1
889 end_dst
= dststep
== vl
-1
894 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
898 dststep
+= 1 # advance dststep
899 if not self
.dststep_skip
:
901 if ((1 << dststep
) & dstmask
) != 0:
904 log(" dskip", bin(dstmask
), bin(1 << dststep
))
906 # advance subvl in *inner* loop
909 assert dststep
<= vl
-1
910 end_dst
= dststep
== vl
-1
911 if end_dst
: # end-point
917 if not self
.dststep_skip
:
919 if ((1 << dststep
) & dstmask
) != 0:
922 log(" dskip", bin(dstmask
), bin(1 << dststep
))
923 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
926 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
928 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
929 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
932 def at_loopend(self
):
933 """tells if this is the last possible element. uses the cached values
934 for src/dst-step and sub-steps
938 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
939 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
940 end_ssub
= ssubstep
== subvl
941 end_dsub
= dsubstep
== subvl
942 if srcstep
== vl
-1 and end_ssub
:
944 if dststep
== vl
-1 and end_dsub
:
948 def advance_svstate_steps(self
):
949 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
950 TODO when Pack/Unpack is set, substep becomes the *outer* loop
952 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
953 if self
.loopend
: # huhn??
958 def read_src_mask(self
):
959 """read/update pred_sz and src mask
961 # get SVSTATE VL (oh and print out some debug stuff)
963 srcstep
= self
.svstate
.srcstep
964 ssubstep
= self
.svstate
.ssubstep
966 # get predicate mask (all 64 bits)
967 srcmask
= 0xffff_ffff_ffff_ffff
969 pmode
= yield self
.dec2
.rm_dec
.predmode
970 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
971 srcpred
= yield self
.dec2
.rm_dec
.srcpred
972 dstpred
= yield self
.dec2
.rm_dec
.dstpred
973 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
974 if pmode
== SVP64PredMode
.INT
.value
:
975 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
976 if sv_ptype
== SVPtype
.P2
.value
:
977 srcmask
= get_predint(self
.gpr
, srcpred
)
978 elif pmode
== SVP64PredMode
.CR
.value
:
979 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
980 if sv_ptype
== SVPtype
.P2
.value
:
981 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
982 # work out if the ssubsteps are completed
983 ssubstart
= ssubstep
== 0
985 log(" ptype", sv_ptype
)
986 log(" srcpred", bin(srcpred
))
987 log(" srcmask", bin(srcmask
))
988 log(" pred_sz", bin(pred_sz
))
989 log(" ssubstart", ssubstart
)
991 # store all that above
992 self
.srcstep_skip
= False
993 self
.srcmask
= srcmask
994 self
.pred_sz
= pred_sz
995 self
.new_ssubstep
= ssubstep
996 log(" new ssubstep", ssubstep
)
997 # until the predicate mask has a "1" bit... or we run out of VL
998 # let srcstep==VL be the indicator to move to next instruction
1000 self
.srcstep_skip
= True
1002 def read_dst_mask(self
):
1003 """same as read_src_mask - check and record everything needed
1005 # get SVSTATE VL (oh and print out some debug stuff)
1006 # yield Delay(1e-10) # make changes visible
1007 vl
= self
.svstate
.vl
1008 dststep
= self
.svstate
.dststep
1009 dsubstep
= self
.svstate
.dsubstep
1011 # get predicate mask (all 64 bits)
1012 dstmask
= 0xffff_ffff_ffff_ffff
1014 pmode
= yield self
.dec2
.rm_dec
.predmode
1015 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1016 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1017 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1018 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1019 if pmode
== SVP64PredMode
.INT
.value
:
1020 dstmask
= get_predint(self
.gpr
, dstpred
)
1021 elif pmode
== SVP64PredMode
.CR
.value
:
1022 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1023 # work out if the ssubsteps are completed
1024 dsubstart
= dsubstep
== 0
1025 log(" pmode", pmode
)
1026 log(" ptype", sv_ptype
)
1027 log(" dstpred", bin(dstpred
))
1028 log(" dstmask", bin(dstmask
))
1029 log(" pred_dz", bin(pred_dz
))
1030 log(" dsubstart", dsubstart
)
1032 self
.dststep_skip
= False
1033 self
.dstmask
= dstmask
1034 self
.pred_dz
= pred_dz
1035 self
.new_dsubstep
= dsubstep
1036 log(" new dsubstep", dsubstep
)
1038 self
.dststep_skip
= True
1040 def svstate_pre_inc(self
):
1041 """check if srcstep/dststep need to skip over masked-out predicate bits
1042 note that this is not supposed to do anything to substep,
1043 it is purely for skipping masked-out bits
1046 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1047 yield from self
.read_src_mask()
1048 yield from self
.read_dst_mask()
1055 srcstep
= self
.svstate
.srcstep
1056 srcmask
= self
.srcmask
1057 pred_src_zero
= self
.pred_sz
1058 vl
= self
.svstate
.vl
1059 # srcstep-skipping opportunity identified
1060 if self
.srcstep_skip
:
1061 # cannot do this with sv.bc - XXX TODO
1064 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1065 log(" sskip", bin(1 << srcstep
))
1068 # now work out if the relevant mask bits require zeroing
1070 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1072 # store new srcstep / dststep
1073 self
.new_srcstep
= srcstep
1074 self
.pred_src_zero
= pred_src_zero
1075 log(" new srcstep", srcstep
)
1078 # dststep-skipping opportunity identified
1079 dststep
= self
.svstate
.dststep
1080 dstmask
= self
.dstmask
1081 pred_dst_zero
= self
.pred_dz
1082 vl
= self
.svstate
.vl
1083 if self
.dststep_skip
:
1084 # cannot do this with sv.bc - XXX TODO
1087 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1088 log(" dskip", bin(1 << dststep
))
1091 # now work out if the relevant mask bits require zeroing
1093 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1095 # store new srcstep / dststep
1096 self
.new_dststep
= dststep
1097 self
.pred_dst_zero
= pred_dst_zero
1098 log(" new dststep", dststep
)
1101 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1102 # decoder2 - an instance of power_decoder2
1103 # regfile - a list of initial values for the registers
1104 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1105 # respect_pc - tracks the program counter. requires initial_insns
1106 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1107 initial_mem
=None, initial_msr
=0,
1118 self
.bigendian
= bigendian
1120 self
.is_svp64_mode
= False
1121 self
.respect_pc
= respect_pc
1122 if initial_sprs
is None:
1124 if initial_mem
is None:
1126 if fpregfile
is None:
1127 fpregfile
= [0] * 32
1128 if initial_insns
is None:
1130 assert self
.respect_pc
== False, "instructions required to honor pc"
1132 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1133 log("ISACaller initial_msr", initial_msr
)
1135 # "fake program counter" mode (for unit testing)
1139 if isinstance(initial_mem
, tuple):
1140 self
.fake_pc
= initial_mem
[0]
1141 disasm_start
= self
.fake_pc
1143 disasm_start
= initial_pc
1145 # disassembly: we need this for now (not given from the decoder)
1146 self
.disassembly
= {}
1148 for i
, code
in enumerate(disassembly
):
1149 self
.disassembly
[i
*4 + disasm_start
] = code
1151 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1152 self
.svp64rm
= SVP64RM()
1153 if initial_svstate
is None:
1155 if isinstance(initial_svstate
, int):
1156 initial_svstate
= SVP64State(initial_svstate
)
1157 # SVSTATE, MSR and PC
1158 StepLoop
.__init
__(self
, initial_svstate
)
1159 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1161 # GPR FPR SPR registers
1162 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1163 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1164 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1165 self
.spr
= SPR(decoder2
, initial_sprs
) # initialise SPRs before MMU
1167 # set up 4 dummy SVSHAPEs if they aren't already set up
1169 sname
= 'SVSHAPE%d' % i
1170 val
= self
.spr
.get(sname
, 0)
1171 # make sure it's an SVSHAPE
1172 self
.spr
[sname
] = SVSHAPE(val
, self
.gpr
)
1173 self
.last_op_svshape
= False
1176 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
)
1177 self
.mem
.log_fancy(kind
=LogKind
.InstrInOuts
)
1178 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1179 # MMU mode, redirect underlying Mem through RADIX
1181 self
.mem
= RADIX(self
.mem
, self
)
1183 self
.imem
= RADIX(self
.imem
, self
)
1185 # TODO, needed here:
1186 # FPR (same as GPR except for FP nums)
1187 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1188 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1189 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1190 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1192 # 2.3.2 LR (actually SPR #8) -- Done
1193 # 2.3.3 CTR (actually SPR #9) -- Done
1194 # 2.3.4 TAR (actually SPR #815)
1195 # 3.2.2 p45 XER (actually SPR #1) -- Done
1196 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1198 # create CR then allow portions of it to be "selectable" (below)
1199 self
.cr_fields
= CRFields(initial_cr
)
1200 self
.cr
= self
.cr_fields
.cr
1201 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1203 # "undefined", just set to variable-bit-width int (use exts "max")
1204 # self.undefined = SelectableInt(0, 256) # TODO, not hard-code 256!
1207 self
.namespace
.update(self
.spr
)
1208 self
.namespace
.update({'GPR': self
.gpr
,
1212 'memassign': self
.memassign
,
1215 'SVSTATE': self
.svstate
,
1216 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1217 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1218 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1219 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1222 'undefined': undefined
,
1223 'mode_is_64bit': True,
1224 'SO': XER_bits
['SO'],
1225 'XLEN': 64 # elwidth overrides
1228 # update pc to requested start point
1229 self
.set_pc(initial_pc
)
1231 # field-selectable versions of Condition Register
1232 self
.crl
= self
.cr_fields
.crl
1234 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1236 self
.decoder
= decoder2
.dec
1237 self
.dec2
= decoder2
1239 super().__init
__(XLEN
=self
.namespace
["XLEN"])
1243 return self
.namespace
["XLEN"]
1245 def call_trap(self
, trap_addr
, trap_bit
):
1246 """calls TRAP and sets up NIA to the new execution location.
1247 next instruction will begin at trap_addr.
1249 self
.TRAP(trap_addr
, trap_bit
)
1250 self
.namespace
['NIA'] = self
.trap_nia
1251 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1253 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1254 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1256 TRAP function is callable from inside the pseudocode itself,
1257 hence the default arguments. when calling from inside ISACaller
1258 it is best to use call_trap()
1260 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1261 kaivb
= self
.spr
['KAIVB'].value
1262 msr
= self
.namespace
['MSR'].value
1263 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1264 # store CIA(+4?) in SRR0, set NIA to 0x700
1265 # store MSR in SRR1, set MSR to um errr something, have to check spec
1266 # store SVSTATE (if enabled) in SVSRR0
1267 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1268 self
.spr
['SRR1'].value
= msr
1269 if self
.is_svp64_mode
:
1270 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1271 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1272 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1274 # set exception bits. TODO: this should, based on the address
1275 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1276 # bits appropriately. however it turns out that *for now* in all
1277 # cases (all trap_addrs) the exact same thing is needed.
1278 self
.msr
[MSRb
.IR
] = 0
1279 self
.msr
[MSRb
.DR
] = 0
1280 self
.msr
[MSRb
.FE0
] = 0
1281 self
.msr
[MSRb
.FE1
] = 0
1282 self
.msr
[MSRb
.EE
] = 0
1283 self
.msr
[MSRb
.RI
] = 0
1284 self
.msr
[MSRb
.SF
] = 1
1285 self
.msr
[MSRb
.TM
] = 0
1286 self
.msr
[MSRb
.VEC
] = 0
1287 self
.msr
[MSRb
.VSX
] = 0
1288 self
.msr
[MSRb
.PR
] = 0
1289 self
.msr
[MSRb
.FP
] = 0
1290 self
.msr
[MSRb
.PMM
] = 0
1291 self
.msr
[MSRb
.TEs
] = 0
1292 self
.msr
[MSRb
.TEe
] = 0
1293 self
.msr
[MSRb
.UND
] = 0
1294 self
.msr
[MSRb
.LE
] = 1
1296 def memassign(self
, ea
, sz
, val
):
1297 self
.mem
.memassign(ea
, sz
, val
)
1299 def prep_namespace(self
, insn_name
, formname
, op_fields
, xlen
):
1300 # TODO: get field names from form in decoder*1* (not decoder2)
1301 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1303 # then "yield" fields only from op_fields rather than hard-coded
1305 fields
= self
.decoder
.sigforms
[formname
]
1306 log("prep_namespace", formname
, op_fields
, insn_name
)
1307 for name
in op_fields
:
1308 # CR immediates. deal with separately. needs modifying
1310 if self
.is_svp64_mode
and name
in ['BI']: # TODO, more CRs
1311 # BI is a 5-bit, must reconstruct the value
1312 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1313 sig
= getattr(fields
, name
)
1315 # low 2 LSBs (CR field selector) remain same, CR num extended
1316 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1317 val
= (val
& 0b11) |
(regnum
<< 2)
1318 elif self
.is_svp64_mode
and name
in ['BF']: # TODO, more CRs
1319 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, "BF")
1320 log('hack %s' % name
, regnum
, is_vec
)
1323 sig
= getattr(fields
, name
)
1325 # these are all opcode fields involved in index-selection of CR,
1326 # and need to do "standard" arithmetic. CR[BA+32] for example
1327 # would, if using SelectableInt, only be 5-bit.
1328 if name
in ['BF', 'BFA', 'BC', 'BA', 'BB', 'BT', 'BI']:
1329 self
.namespace
[name
] = val
1331 self
.namespace
[name
] = SelectableInt(val
, sig
.width
)
1333 self
.namespace
['XER'] = self
.spr
['XER']
1334 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1335 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1336 self
.namespace
['XLEN'] = xlen
1338 # add some SVSTATE convenience variables
1339 vl
= self
.svstate
.vl
1340 srcstep
= self
.svstate
.srcstep
1341 self
.namespace
['VL'] = vl
1342 self
.namespace
['srcstep'] = srcstep
1344 # take a copy of the CR field value: if non-VLi fail-first fails
1345 # this is because the pseudocode writes *directly* to CR. sigh
1346 self
.cr_backup
= self
.cr
.value
1348 # sv.bc* need some extra fields
1349 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
1350 # blegh grab bits manually
1351 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1352 # convert to SelectableInt before test
1353 mode
= SelectableInt(mode
, 5)
1354 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1355 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1356 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1357 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1358 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1359 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1360 sz
= yield self
.dec2
.rm_dec
.pred_sz
1361 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1362 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1363 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1364 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1365 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1366 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1367 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1368 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1370 def handle_carry_(self
, inputs
, output
, ca
, ca32
):
1371 inv_a
= yield self
.dec2
.e
.do
.invert_in
1373 inputs
[0] = ~inputs
[0]
1375 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1377 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1378 inputs
.append(SelectableInt(imm
, 64))
1381 log("gt input", x
, output
)
1382 gt
= (gtu(x
, output
))
1385 cy
= 1 if any(gts
) else 0
1387 if ca
is None: # already written
1388 self
.spr
['XER'][XER_bits
['CA']] = cy
1391 # ARGH... different for OP_ADD... *sigh*...
1392 op
= yield self
.dec2
.e
.do
.insn_type
1393 if op
== MicrOp
.OP_ADD
.value
:
1394 res32
= (output
.value
& (1 << 32)) != 0
1395 a32
= (inputs
[0].value
& (1 << 32)) != 0
1396 if len(inputs
) >= 2:
1397 b32
= (inputs
[1].value
& (1 << 32)) != 0
1400 cy32
= res32 ^ a32 ^ b32
1401 log("CA32 ADD", cy32
)
1405 log("input", x
, output
)
1406 log(" x[32:64]", x
, x
[32:64])
1407 log(" o[32:64]", output
, output
[32:64])
1408 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1410 cy32
= 1 if any(gts
) else 0
1411 log("CA32", cy32
, gts
)
1412 if ca32
is None: # already written
1413 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1415 def handle_overflow(self
, inputs
, output
, div_overflow
):
1416 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1417 inv_a
= yield self
.dec2
.e
.do
.invert_in
1419 inputs
[0] = ~inputs
[0]
1421 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1423 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1424 inputs
.append(SelectableInt(imm
, 64))
1425 log("handle_overflow", inputs
, output
, div_overflow
)
1426 if len(inputs
) < 2 and div_overflow
is None:
1429 # div overflow is different: it's returned by the pseudo-code
1430 # because it's more complex than can be done by analysing the output
1431 if div_overflow
is not None:
1432 ov
, ov32
= div_overflow
, div_overflow
1433 # arithmetic overflow can be done by analysing the input and output
1434 elif len(inputs
) >= 2:
1436 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1437 output_sgn
= exts(output
.value
, output
.bits
) < 0
1438 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1439 output_sgn
!= input_sgn
[0] else 0
1442 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1443 output32_sgn
= exts(output
.value
, 32) < 0
1444 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1445 output32_sgn
!= input32_sgn
[0] else 0
1447 # now update XER OV/OV32/SO
1448 so
= self
.spr
['XER'][XER_bits
['SO']]
1449 new_so
= so | ov
# sticky overflow ORs in old with new
1450 self
.spr
['XER'][XER_bits
['OV']] = ov
1451 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1452 self
.spr
['XER'][XER_bits
['SO']] = new_so
1453 log(" set overflow", ov
, ov32
, so
, new_so
)
1455 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1456 assert isinstance(out
, SelectableInt
), \
1457 "out zero not a SelectableInt %s" % repr(outputs
)
1458 log("handle_comparison", out
.bits
, hex(out
.value
))
1459 # TODO - XXX *processor* in 32-bit mode
1460 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1462 # o32 = exts(out.value, 32)
1463 # print ("handle_comparison exts 32 bit", hex(o32))
1464 out
= exts(out
.value
, out
.bits
)
1465 log("handle_comparison exts", hex(out
))
1466 # create the three main CR flags, EQ GT LT
1467 zero
= SelectableInt(out
== 0, 1)
1468 positive
= SelectableInt(out
> 0, 1)
1469 negative
= SelectableInt(out
< 0, 1)
1470 # get (or not) XER.SO. for setvl this is important *not* to read SO
1472 SO
= SelectableInt(1, 0)
1474 SO
= self
.spr
['XER'][XER_bits
['SO']]
1475 log("handle_comparison SO overflow", SO
, overflow
)
1476 # alternative overflow checking (setvl mainly at the moment)
1477 if overflow
is not None and overflow
== 1:
1478 SO
= SelectableInt(1, 1)
1479 # create the four CR field values and set the required CR field
1480 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1481 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1482 self
.crl
[cr_idx
].eq(cr_field
)
1484 def set_pc(self
, pc_val
):
1485 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1486 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1488 def get_next_insn(self
):
1489 """check instruction
1492 pc
= self
.pc
.CIA
.value
1495 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1497 raise KeyError("no instruction at 0x%x" % pc
)
1500 def setup_one(self
):
1501 """set up one instruction
1503 pc
, insn
= self
.get_next_insn()
1504 yield from self
.setup_next_insn(pc
, insn
)
1506 def setup_next_insn(self
, pc
, ins
):
1507 """set up next instruction
1510 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
1511 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
1513 yield self
.dec2
.sv_rm
.eq(0)
1514 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
1515 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
1516 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
1517 yield self
.dec2
.state
.pc
.eq(pc
)
1518 if self
.svstate
is not None:
1519 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
1521 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
1523 opcode
= yield self
.dec2
.dec
.opcode_in
1524 opcode
= SelectableInt(value
=opcode
, bits
=32)
1525 pfx
= SVP64Instruction
.Prefix(opcode
)
1526 log("prefix test: opcode:", pfx
.po
, bin(pfx
.po
), pfx
.id)
1527 self
.is_svp64_mode
= bool((pfx
.po
== 0b000001) and (pfx
.id == 0b11))
1528 self
.pc
.update_nia(self
.is_svp64_mode
)
1530 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
1531 self
.namespace
['NIA'] = self
.pc
.NIA
1532 self
.namespace
['SVSTATE'] = self
.svstate
1533 if not self
.is_svp64_mode
:
1536 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
1537 log("svp64.rm", bin(pfx
.rm
))
1538 log(" svstate.vl", self
.svstate
.vl
)
1539 log(" svstate.mvl", self
.svstate
.maxvl
)
1540 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
1541 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
1542 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
1543 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
1546 def execute_one(self
):
1547 """execute one instruction
1549 # get the disassembly code for this instruction
1550 if not self
.disassembly
:
1551 code
= yield from self
.get_assembly_name()
1554 if self
.is_svp64_mode
:
1555 offs
, dbg
= 4, "svp64 "
1556 code
= self
.disassembly
[self
._pc
+offs
]
1557 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
1558 opname
= code
.split(' ')[0]
1560 yield from self
.call(opname
) # execute the instruction
1561 except MemException
as e
: # check for memory errors
1562 if e
.args
[0] == 'unaligned': # alignment error
1563 # run a Trap but set DAR first
1564 print("memory unaligned exception, DAR", e
.dar
)
1565 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
1566 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
1568 elif e
.args
[0] == 'invalid': # invalid
1569 # run a Trap but set DAR first
1570 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
1571 if e
.mode
== 'EXECUTE':
1572 # XXX TODO: must set a few bits in SRR1,
1573 # see microwatt loadstore1.vhdl
1574 # if m_in.segerr = '0' then
1575 # v.srr1(47 - 33) := m_in.invalid;
1576 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
1577 # v.srr1(47 - 44) := m_in.badtree;
1578 # v.srr1(47 - 45) := m_in.rc_error;
1579 # v.intr_vec := 16#400#;
1581 # v.intr_vec := 16#480#;
1582 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
1584 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
1586 # not supported yet:
1587 raise e
# ... re-raise
1589 log("gprs after code", code
)
1592 for i
in range(len(self
.crl
)):
1593 crs
.append(bin(self
.crl
[i
].asint()))
1594 log("crs", " ".join(crs
))
1595 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
1597 # don't use this except in special circumstances
1598 if not self
.respect_pc
:
1601 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
1602 hex(self
.pc
.NIA
.value
))
1604 def get_assembly_name(self
):
1605 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1606 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1607 dec_insn
= yield self
.dec2
.e
.do
.insn
1608 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
1609 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1610 int_op
= yield self
.dec2
.dec
.op
.internal_op
1611 log("get assembly name asmcode", asmcode
, int_op
,
1612 hex(dec_insn
), bin(insn_1_11
))
1613 asmop
= insns
.get(asmcode
, None)
1615 # sigh reconstruct the assembly instruction name
1616 if hasattr(self
.dec2
.e
.do
, "oe"):
1617 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
1618 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
1622 if hasattr(self
.dec2
.e
.do
, "rc"):
1623 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
1624 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
1628 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
1629 RC1
= yield self
.dec2
.rm_dec
.RC1
1633 # grrrr have to special-case MUL op (see DecodeOE)
1634 log("ov %d en %d rc %d en %d op %d" %
1635 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
1636 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
1641 if not asmop
.endswith("."): # don't add "." to "andis."
1644 if hasattr(self
.dec2
.e
.do
, "lk"):
1645 lk
= yield self
.dec2
.e
.do
.lk
1648 log("int_op", int_op
)
1649 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
1650 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
1654 spr_msb
= yield from self
.get_spr_msb()
1655 if int_op
== MicrOp
.OP_MFCR
.value
:
1660 # XXX TODO: for whatever weird reason this doesn't work
1661 # https://bugs.libre-soc.org/show_bug.cgi?id=390
1662 if int_op
== MicrOp
.OP_MTCRF
.value
:
1669 def reset_remaps(self
):
1670 self
.remap_loopends
= [0] * 4
1671 self
.remap_idxs
= [0, 1, 2, 3]
1673 def get_remap_indices(self
):
1674 """WARNING, this function stores remap_idxs and remap_loopends
1675 in the class for later use. this to avoid problems with yield
1677 # go through all iterators in lock-step, advance to next remap_idx
1678 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
1679 # get four SVSHAPEs. here we are hard-coding
1681 SVSHAPE0
= self
.spr
['SVSHAPE0']
1682 SVSHAPE1
= self
.spr
['SVSHAPE1']
1683 SVSHAPE2
= self
.spr
['SVSHAPE2']
1684 SVSHAPE3
= self
.spr
['SVSHAPE3']
1685 # set up the iterators
1686 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
1687 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
1688 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
1689 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
1693 for i
, (shape
, remap
) in enumerate(remaps
):
1694 # zero is "disabled"
1695 if shape
.value
== 0x0:
1696 self
.remap_idxs
[i
] = 0
1697 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
1698 step
= dststep
if (i
in [3, 4]) else srcstep
1699 # this is terrible. O(N^2) looking for the match. but hey.
1700 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
1703 self
.remap_idxs
[i
] = remap_idx
1704 self
.remap_loopends
[i
] = loopends
1705 dbg
.append((i
, step
, remap_idx
, loopends
))
1706 for (i
, step
, remap_idx
, loopends
) in dbg
:
1707 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
1710 def get_spr_msb(self
):
1711 dec_insn
= yield self
.dec2
.e
.do
.insn
1712 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
1714 def call(self
, name
):
1715 """call(opcode) - the primary execution point for instructions
1717 self
.last_st_addr
= None # reset the last known store address
1718 self
.last_ld_addr
= None # etc.
1720 ins_name
= name
.strip() # remove spaces if not already done so
1722 log("halted - not executing", ins_name
)
1725 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1726 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1727 asmop
= yield from self
.get_assembly_name()
1728 log("call", ins_name
, asmop
)
1730 # sv.setvl is *not* a loop-function. sigh
1731 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
1734 int_op
= yield self
.dec2
.dec
.op
.internal_op
1735 spr_msb
= yield from self
.get_spr_msb()
1737 instr_is_privileged
= False
1738 if int_op
in [MicrOp
.OP_ATTN
.value
,
1739 MicrOp
.OP_MFMSR
.value
,
1740 MicrOp
.OP_MTMSR
.value
,
1741 MicrOp
.OP_MTMSRD
.value
,
1743 MicrOp
.OP_RFID
.value
]:
1744 instr_is_privileged
= True
1745 if int_op
in [MicrOp
.OP_MFSPR
.value
,
1746 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
1747 instr_is_privileged
= True
1749 log("is priv", instr_is_privileged
, hex(self
.msr
.value
),
1751 # check MSR priv bit and whether op is privileged: if so, throw trap
1752 if instr_is_privileged
and self
.msr
[MSRb
.PR
] == 1:
1753 self
.call_trap(0x700, PIb
.PRIV
)
1756 # check halted condition
1757 if ins_name
== 'attn':
1761 # check illegal instruction
1763 if ins_name
not in ['mtcrf', 'mtocrf']:
1764 illegal
= ins_name
!= asmop
1766 # list of instructions not being supported by binutils (.long)
1767 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
1768 if dotstrp
in [*FPTRANS_INSNS
,
1769 'ffmadds', 'fdmadds', 'ffadds',
1770 'mins', 'maxs', 'minu', 'maxu',
1771 'setvl', 'svindex', 'svremap', 'svstep',
1772 'svshape', 'svshape2',
1773 'grev', 'ternlogi', 'bmask', 'cprop',
1774 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
1775 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
1781 # branch-conditional redirects to sv.bc
1782 if asmop
.startswith('bc') and self
.is_svp64_mode
:
1783 ins_name
= 'sv.%s' % ins_name
1785 # ld-immediate-with-pi mode redirects to ld-with-postinc
1786 ldst_imm_postinc
= False
1787 if 'u' in ins_name
and self
.is_svp64_mode
:
1788 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
1790 ins_name
= ins_name
.replace("u", "up")
1791 ldst_imm_postinc
= True
1792 log(" enable ld/st postinc", ins_name
)
1794 log(" post-processed name", dotstrp
, ins_name
, asmop
)
1796 # illegal instructions call TRAP at 0x700
1798 print("illegal", ins_name
, asmop
)
1799 self
.call_trap(0x700, PIb
.ILLEG
)
1800 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
1801 (ins_name
, asmop
, self
.pc
.CIA
.value
))
1804 # this is for setvl "Vertical" mode: if set true,
1805 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
1806 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
1807 self
.allow_next_step_inc
= False
1808 self
.svstate_next_mode
= 0
1810 # nop has to be supported, we could let the actual op calculate
1811 # but PowerDecoder has a pattern for nop
1812 if ins_name
== 'nop':
1813 self
.update_pc_next()
1816 # get elwidths, defaults to 64
1820 if self
.is_svp64_mode
:
1821 ew_src
= yield self
.dec2
.rm_dec
.ew_src
1822 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
1823 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
1824 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
1825 xlen
= max(ew_src
, ew_dst
)
1826 log("elwdith", ew_src
, ew_dst
)
1827 log("XLEN:", self
.is_svp64_mode
, xlen
)
1829 # look up instruction in ISA.instrs, prepare namespace
1830 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
1831 info
= self
.instrs
[ins_name
+"."]
1833 info
= self
.instrs
[ins_name
]
1834 yield from self
.prep_namespace(ins_name
, info
.form
, info
.op_fields
,
1837 # preserve order of register names
1838 input_names
= create_args(list(info
.read_regs
) +
1839 list(info
.uninit_regs
))
1840 log("input names", input_names
)
1842 # get SVP64 entry for the current instruction
1843 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
1844 if sv_rm
is not None:
1845 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
1847 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
1848 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
1850 # see if srcstep/dststep need skipping over masked-out predicate bits
1851 # svstep also needs advancement because it calls SVSTATE_NEXT.
1852 # bit the remaps get computed just after pre_inc moves them on
1853 # with remap_set_steps substituting for PowerDecider2 not doing it,
1854 # and SVSTATE_NEXT not being able to.use yield, the preinc on
1855 # svstep is necessary for now.
1857 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
1858 yield from self
.svstate_pre_inc()
1859 if self
.is_svp64_mode
:
1860 pre
= yield from self
.update_new_svstate_steps()
1862 self
.svp64_reset_loop()
1864 self
.update_pc_next()
1866 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
1867 pred_dst_zero
= self
.pred_dst_zero
1868 pred_src_zero
= self
.pred_src_zero
1869 vl
= self
.svstate
.vl
1870 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1872 # VL=0 in SVP64 mode means "do nothing: skip instruction"
1873 if self
.is_svp64_mode
and vl
== 0:
1874 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1875 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
1876 self
.namespace
['NIA'], kind
=LogKind
.InstrInOuts
)
1879 # for when SVREMAP is active, using pre-arranged schedule.
1880 # note: modifying PowerDecoder2 needs to "settle"
1881 remap_en
= self
.svstate
.SVme
1882 persist
= self
.svstate
.RMpst
1883 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
1884 if self
.is_svp64_mode
:
1885 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
1887 if persist
or self
.last_op_svshape
:
1888 remaps
= self
.get_remap_indices()
1889 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
1890 yield from self
.remap_set_steps(remaps
)
1891 # after that, settle down (combinatorial) to let Vector reg numbers
1892 # work themselves out
1894 if self
.is_svp64_mode
:
1895 remap_active
= yield self
.dec2
.remap_active
1897 remap_active
= False
1898 log("remap active", bin(remap_active
))
1900 # main input registers (RT, RA ...)
1902 for name
in input_names
:
1903 regval
= (yield from self
.get_input(name
, ew_src
))
1904 log("regval name", name
, regval
)
1905 inputs
.append(regval
)
1907 # arrrrgh, awful hack, to get _RT into namespace
1908 if ins_name
in ['setvl', 'svstep']:
1910 RT
= yield self
.dec2
.dec
.RT
1911 self
.namespace
[regname
] = SelectableInt(RT
, 5)
1913 self
.namespace
["RT"] = SelectableInt(0, 5)
1914 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
1915 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
1917 # in SVP64 mode for LD/ST work out immediate
1918 # XXX TODO: replace_ds for DS-Form rather than D-Form.
1919 # use info.form to detect
1920 if self
.is_svp64_mode
and not ldst_imm_postinc
:
1921 yield from self
.check_replace_d(info
, remap_active
)
1923 # "special" registers
1924 for special
in info
.special_regs
:
1925 if special
in special_sprs
:
1926 inputs
.append(self
.spr
[special
])
1928 inputs
.append(self
.namespace
[special
])
1930 # clear trap (trap) NIA
1931 self
.trap_nia
= None
1933 # check if this was an sv.bc* and create an indicator that
1934 # this is the last check to be made as a loop. combined with
1935 # the ALL/ANY mode we can early-exit
1936 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
1937 no_in_vec
= yield self
.dec2
.no_in_vec
# BI is scalar
1938 end_loop
= no_in_vec
or srcstep
== vl
-1 or dststep
== vl
-1
1939 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
1941 # execute actual instruction here (finally)
1942 log("inputs", inputs
)
1943 results
= info
.func(self
, *inputs
)
1944 output_names
= create_args(info
.write_regs
)
1946 for out
, n
in zip(results
or [], output_names
):
1948 log("results", outs
)
1950 # "inject" decorator takes namespace from function locals: we need to
1951 # overwrite NIA being overwritten (sigh)
1952 if self
.trap_nia
is not None:
1953 self
.namespace
['NIA'] = self
.trap_nia
1955 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
1957 # check if op was a LD/ST so that debugging can check the
1959 if int_op
in [MicrOp
.OP_STORE
.value
,
1961 self
.last_st_addr
= self
.mem
.last_st_addr
1962 if int_op
in [MicrOp
.OP_LOAD
.value
,
1964 self
.last_ld_addr
= self
.mem
.last_ld_addr
1965 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
1966 self
.last_st_addr
, self
.last_ld_addr
)
1968 # detect if CA/CA32 already in outputs (sra*, basically)
1970 ca32
= outs
.get("CA32 ")
1972 log("carry already done?", ca
, ca32
, output_names
)
1973 carry_en
= yield self
.dec2
.e
.do
.output_carry
1975 yield from self
.handle_carry_(inputs
, results
[0], ca
, ca32
)
1977 # get outout named "overflow" and "CR0"
1978 overflow
= outs
.get('overflow')
1979 cr0
= outs
.get('CR0')
1981 if not self
.is_svp64_mode
: # yeah just no. not in parallel processing
1982 # detect if overflow was in return result
1983 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
1984 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
1985 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
1987 yield from self
.handle_overflow(inputs
, results
[0], overflow
)
1989 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
1991 if not self
.is_svp64_mode
or not pred_dst_zero
:
1992 if hasattr(self
.dec2
.e
.do
, "rc"):
1993 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
1994 # don't do Rc=1 for svstep it is handled explicitly.
1995 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
1996 # to write directly to CR0 instead of in ISACaller. hooyahh.
1997 if rc_en
and ins_name
not in ['svstep']:
1998 yield from self
.do_rc_ov(ins_name
, results
[0], overflow
, cr0
)
2001 ffirst_hit
= False, False
2002 if self
.is_svp64_mode
:
2003 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2004 is_cr
= sv_mode
== SVMode
.CROP
.value
2005 chk
= rc_en
or is_cr
2006 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2008 # any modified return results?
2009 yield from self
.do_outregs_nia(asmop
, ins_name
, info
, outs
,
2010 carry_en
, rc_en
, ffirst_hit
, ew_dst
)
2012 def check_ffirst(self
, info
, rc_en
, srcstep
):
2013 """fail-first mode: checks a bit of Rc Vector, truncates VL
2015 rm_mode
= yield self
.dec2
.rm_dec
.mode
2016 ff_inv
= yield self
.dec2
.rm_dec
.inv
2017 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2018 RC1
= yield self
.dec2
.rm_dec
.RC1
2019 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2020 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2024 log(" cr_bit", cr_bit
)
2025 log(" rc_en", rc_en
)
2026 if not rc_en
or rm_mode
!= SVP64RMMode
.FFIRST
.value
:
2028 # get the CR vevtor, do BO-test
2030 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2031 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2033 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2034 crtest
= self
.crl
[regnum
]
2035 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2036 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2037 log("cr test?", ffirst_hit
)
2040 # Fail-first activated, truncate VL
2041 vli
= SelectableInt(int(vli_
), 7)
2042 self
.svstate
.vl
= srcstep
+ vli
2043 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2044 yield Settle() # let decoder update
2047 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
):
2048 if ins_name
.startswith("f"):
2049 rc_reg
= "CR1" # not calculated correctly yet (not FP compares)
2052 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2053 # hang on... for `setvl` actually you want to test SVSTATE.VL
2054 is_setvl
= ins_name
in ('svstep', 'setvl')
2056 result
= SelectableInt(result
.vl
, 64)
2058 overflow
= None # do not override overflow except in setvl
2060 # if there was not an explicit CR0 in the pseudocode, do implicit Rc=1
2062 self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2064 # otherwise we just blat CR0 into the required regnum
2065 log("explicit rc0", cr0
)
2066 self
.crl
[regnum
].eq(cr0
)
2068 def do_outregs_nia(self
, asmop
, ins_name
, info
, outs
,
2069 carry_en
, rc_en
, ffirst_hit
, ew_dst
):
2070 ffirst_hit
, vli
= ffirst_hit
2071 # write out any regs for this instruction
2072 for name
, output
in outs
.items():
2073 yield from self
.check_write(info
, name
, output
, carry_en
, ew_dst
)
2074 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2075 # which write directly to CR in the pseudocode (gah, what a mess)
2076 # if ffirst_hit and not vli:
2077 # self.cr.value = self.cr_backup
2080 self
.svp64_reset_loop()
2083 # check advancement of src/dst/sub-steps and if PC needs updating
2084 nia_update
= (yield from self
.check_step_increment(rc_en
,
2087 self
.update_pc_next()
2089 def check_replace_d(self
, info
, remap_active
):
2090 replace_d
= False # update / replace constant in pseudocode
2091 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2092 vl
= self
.svstate
.vl
2093 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2094 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2095 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2096 if info
.form
== 'DS':
2097 # DS-Form, multiply by 4 then knock 2 bits off after
2098 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2100 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2101 imm
= exts(imm
, 16) # sign-extend to integer
2102 # get the right step. LD is from srcstep, ST is dststep
2103 op
= yield self
.dec2
.e
.do
.insn_type
2105 if op
== MicrOp
.OP_LOAD
.value
:
2107 offsmul
= yield self
.dec2
.in1_step
2108 log("D-field REMAP src", imm
, offsmul
)
2110 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2111 log("D-field src", imm
, offsmul
)
2112 elif op
== MicrOp
.OP_STORE
.value
:
2113 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2114 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2115 log("D-field dst", imm
, offsmul
)
2116 # Unit-Strided LD/ST adds offset*width to immediate
2117 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2118 ldst_len
= yield self
.dec2
.e
.do
.data_len
2119 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2121 # Element-strided multiplies the immediate by element step
2122 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2123 imm
= SelectableInt(imm
* offsmul
, 32)
2126 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2127 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2128 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2129 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2130 # new replacement D... errr.. DS
2132 if info
.form
== 'DS':
2133 # TODO: assert 2 LSBs are zero?
2134 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2135 imm
.value
= imm
.value
>> 2
2136 self
.namespace
['DS'] = imm
2138 self
.namespace
['D'] = imm
2140 def get_input(self
, name
, ew_src
):
2141 # using PowerDecoder2, first, find the decoder index.
2142 # (mapping name RA RB RC RS to in1, in2, in3)
2143 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2145 # doing this is not part of svp64, it's because output
2146 # registers, to be modified, need to be in the namespace.
2147 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2149 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2151 if isinstance(regnum
, tuple):
2152 (regnum
, base
, offs
) = regnum
2154 base
, offs
= regnum
, 0 # temporary HACK
2156 # in case getting the register number is needed, _RA, _RB
2157 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2158 regname
= "_" + name
2159 if not self
.is_svp64_mode
or ew_src
== 64:
2160 self
.namespace
[regname
] = regnum
2161 elif regname
in self
.namespace
:
2162 del self
.namespace
[regname
]
2164 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2165 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2167 reg_val
= SelectableInt(self
.fpr(base
, is_vec
, offs
, ew_src
))
2168 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2169 elif name
is not None:
2170 reg_val
= SelectableInt(self
.gpr(base
, is_vec
, offs
, ew_src
))
2171 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2173 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2174 reg_val
= SelectableInt(0, ew_src
)
2177 def remap_set_steps(self
, remaps
):
2178 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2179 they work in concert with PowerDecoder2 at the moment,
2180 there is no HDL implementation of REMAP. therefore this
2181 function, because ISACaller still uses PowerDecoder2,
2182 will *explicitly* write the dec2.XX_step values. this has
2185 # just some convenient debug info
2187 sname
= 'SVSHAPE%d' % i
2188 shape
= self
.spr
[sname
]
2189 log(sname
, bin(shape
.value
))
2190 log(" lims", shape
.lims
)
2191 log(" mode", shape
.mode
)
2192 log(" skip", shape
.skip
)
2194 # set up the list of steps to remap
2195 mi0
= self
.svstate
.mi0
2196 mi1
= self
.svstate
.mi1
2197 mi2
= self
.svstate
.mi2
2198 mo0
= self
.svstate
.mo0
2199 mo1
= self
.svstate
.mo1
2200 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2201 [self
.dec2
.in2_step
, mi1
], # RB
2202 [self
.dec2
.in3_step
, mi2
], # RC
2203 [self
.dec2
.o_step
, mo0
], # RT
2204 [self
.dec2
.o2_step
, mo1
], # EA
2207 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2208 for i
, reg
in enumerate(rnames
):
2209 idx
= yield from get_idx_map(self
.dec2
, reg
)
2211 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2213 steps
[i
][0] = self
.dec2
.in1_step
2215 steps
[i
][0] = self
.dec2
.in2_step
2217 steps
[i
][0] = self
.dec2
.in3_step
2218 log("remap step", i
, reg
, idx
, steps
[i
][1])
2219 remap_idxs
= self
.remap_idxs
2221 # now cross-index the required SHAPE for each of 3-in 2-out regs
2222 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2223 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2224 (shape
, remap
) = remaps
[shape_idx
]
2225 remap_idx
= remap_idxs
[shape_idx
]
2226 # zero is "disabled"
2227 if shape
.value
== 0x0:
2229 # now set the actual requested step to the current index
2230 if dstep
is not None:
2231 yield dstep
.eq(remap_idx
)
2233 # debug printout info
2234 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2235 i
, rnames
[i
], shape_idx
, remap_idx
))
2237 log("shape remap", x
)
2239 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2240 if name
== 'overflow': # ignore, done already (above)
2242 if name
== 'CR0': # ignore, done already (above)
2244 if isinstance(output
, int):
2245 output
= SelectableInt(output
, 256)
2247 if name
in ['CA', 'CA32']:
2249 log("writing %s to XER" % name
, output
)
2250 log("write XER %s 0x%x" % (name
, output
.value
))
2251 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2253 log("NOT writing %s to XER" % name
, output
)
2255 # write special SPRs
2256 if name
in info
.special_regs
:
2257 log('writing special %s' % name
, output
, special_sprs
)
2258 log("write reg %s 0x%x" % (name
, output
.value
))
2259 if name
in special_sprs
:
2260 self
.spr
[name
] = output
2262 self
.namespace
[name
].eq(output
)
2264 log('msr written', hex(self
.msr
.value
))
2266 # find out1/out2 PR/FPR
2267 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2269 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2271 # temporary hack for not having 2nd output
2272 regnum
= yield getattr(self
.decoder
, name
)
2274 # convenient debug prefix
2279 # check zeroing due to predicate bit being zero
2280 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2281 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2282 output
= SelectableInt(0, 256)
2283 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2284 output
.value
, ew_dst
),
2285 kind
=LogKind
.InstrInOuts
)
2286 # zero-extend tov64 bit begore storing (should use EXT oh well)
2287 if output
.bits
> 64:
2288 output
= SelectableInt(output
.value
, 64)
2290 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2292 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2294 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2295 # check if it is the SVSTATE.src/dest step that needs incrementing
2296 # this is our Sub-Program-Counter loop from 0 to VL-1
2297 if not self
.allow_next_step_inc
:
2298 if self
.is_svp64_mode
:
2299 return (yield from self
.svstate_post_inc(ins_name
))
2301 # XXX only in non-SVP64 mode!
2302 # record state of whether the current operation was an svshape,
2304 # to be able to know if it should apply in the next instruction.
2305 # also (if going to use this instruction) should disable ability
2306 # to interrupt in between. sigh.
2307 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2314 log("SVSTATE_NEXT: inc requested, mode",
2315 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2316 yield from self
.svstate_pre_inc()
2317 pre
= yield from self
.update_new_svstate_steps()
2319 # reset at end of loop including exit Vertical Mode
2320 log("SVSTATE_NEXT: end of loop, reset")
2321 self
.svp64_reset_loop()
2322 self
.svstate
.vfirst
= 0
2326 self
.handle_comparison(SelectableInt(0, 64)) # CR0
2328 if self
.allow_next_step_inc
== 2:
2329 log("SVSTATE_NEXT: read")
2330 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
2332 log("SVSTATE_NEXT: post-inc")
2333 # use actual (cached) src/dst-step here to check end
2334 remaps
= self
.get_remap_indices()
2335 remap_idxs
= self
.remap_idxs
2336 vl
= self
.svstate
.vl
2337 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2338 if self
.allow_next_step_inc
!= 2:
2339 yield from self
.advance_svstate_steps()
2340 #self.namespace['SVSTATE'] = self.svstate.spr
2341 # set CR0 (if Rc=1) based on end
2342 endtest
= 1 if self
.at_loopend() else 0
2344 #results = [SelectableInt(endtest, 64)]
2345 # self.handle_comparison(results) # CR0
2347 # see if svstep was requested, if so, which SVSTATE
2349 if self
.svstate_next_mode
> 0:
2350 shape_idx
= self
.svstate_next_mode
.value
-1
2351 endings
= self
.remap_loopends
[shape_idx
]
2352 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
2353 log("svstep Rc=1, CR0", cr_field
, endtest
)
2354 self
.crl
[0].eq(cr_field
) # CR0
2356 # reset at end of loop including exit Vertical Mode
2357 log("SVSTATE_NEXT: after increments, reset")
2358 self
.svp64_reset_loop()
2359 self
.svstate
.vfirst
= 0
2362 def SVSTATE_NEXT(self
, mode
, submode
):
2363 """explicitly moves srcstep/dststep on to next element, for
2364 "Vertical-First" mode. this function is called from
2365 setvl pseudo-code, as a pseudo-op "svstep"
2367 WARNING: this function uses information that was created EARLIER
2368 due to it being in the middle of a yield, but this function is
2369 *NOT* called from yield (it's called from compiled pseudocode).
2371 self
.allow_next_step_inc
= submode
.value
+ 1
2372 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
2373 self
.svstate_next_mode
= mode
2374 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
2375 shape_idx
= self
.svstate_next_mode
.value
-1
2376 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
2377 if self
.svstate_next_mode
== 5:
2378 self
.svstate_next_mode
= 0
2379 return SelectableInt(self
.svstate
.srcstep
, 7)
2380 if self
.svstate_next_mode
== 6:
2381 self
.svstate_next_mode
= 0
2382 return SelectableInt(self
.svstate
.dststep
, 7)
2383 if self
.svstate_next_mode
== 7:
2384 self
.svstate_next_mode
= 0
2385 return SelectableInt(self
.svstate
.ssubstep
, 7)
2386 if self
.svstate_next_mode
== 8:
2387 self
.svstate_next_mode
= 0
2388 return SelectableInt(self
.svstate
.dsubstep
, 7)
2389 return SelectableInt(0, 7)
2391 def get_src_dststeps(self
):
2392 """gets srcstep, dststep, and ssubstep, dsubstep
2394 return (self
.new_srcstep
, self
.new_dststep
,
2395 self
.new_ssubstep
, self
.new_dsubstep
)
2397 def update_svstate_namespace(self
, overwrite_svstate
=True):
2398 if overwrite_svstate
:
2399 # note, do not get the bit-reversed srcstep here!
2400 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2401 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2403 # update SVSTATE with new srcstep
2404 self
.svstate
.srcstep
= srcstep
2405 self
.svstate
.dststep
= dststep
2406 self
.svstate
.ssubstep
= ssubstep
2407 self
.svstate
.dsubstep
= dsubstep
2408 self
.namespace
['SVSTATE'] = self
.svstate
2409 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2410 yield Settle() # let decoder update
2412 def update_new_svstate_steps(self
, overwrite_svstate
=True):
2413 yield from self
.update_svstate_namespace(overwrite_svstate
)
2414 srcstep
= self
.svstate
.srcstep
2415 dststep
= self
.svstate
.dststep
2416 ssubstep
= self
.svstate
.ssubstep
2417 dsubstep
= self
.svstate
.dsubstep
2418 pack
= self
.svstate
.pack
2419 unpack
= self
.svstate
.unpack
2420 vl
= self
.svstate
.vl
2421 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2422 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2423 rm_mode
= yield self
.dec2
.rm_dec
.mode
2424 ff_inv
= yield self
.dec2
.rm_dec
.inv
2425 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2426 log(" srcstep", srcstep
)
2427 log(" dststep", dststep
)
2429 log(" unpack", unpack
)
2430 log(" ssubstep", ssubstep
)
2431 log(" dsubstep", dsubstep
)
2433 log(" subvl", subvl
)
2434 log(" rm_mode", rm_mode
)
2435 log(" sv_mode", sv_mode
)
2437 log(" cr_bit", cr_bit
)
2439 # check if end reached (we let srcstep overrun, above)
2440 # nothing needs doing (TODO zeroing): just do next instruction
2443 return ((ssubstep
== subvl
and srcstep
== vl
) or
2444 (dsubstep
== subvl
and dststep
== vl
))
2446 def svstate_post_inc(self
, insn_name
, vf
=0):
2447 # check if SV "Vertical First" mode is enabled
2448 vfirst
= self
.svstate
.vfirst
2449 log(" SV Vertical First", vf
, vfirst
)
2450 if not vf
and vfirst
== 1:
2454 # check if it is the SVSTATE.src/dest step that needs incrementing
2455 # this is our Sub-Program-Counter loop from 0 to VL-1
2456 # XXX twin predication TODO
2457 vl
= self
.svstate
.vl
2458 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2459 mvl
= self
.svstate
.maxvl
2460 srcstep
= self
.svstate
.srcstep
2461 dststep
= self
.svstate
.dststep
2462 ssubstep
= self
.svstate
.ssubstep
2463 dsubstep
= self
.svstate
.dsubstep
2464 pack
= self
.svstate
.pack
2465 unpack
= self
.svstate
.unpack
2466 rm_mode
= yield self
.dec2
.rm_dec
.mode
2467 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
2468 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
2469 out_vec
= not (yield self
.dec2
.no_out_vec
)
2470 in_vec
= not (yield self
.dec2
.no_in_vec
)
2471 log(" svstate.vl", vl
)
2472 log(" svstate.mvl", mvl
)
2473 log(" rm.subvl", subvl
)
2474 log(" svstate.srcstep", srcstep
)
2475 log(" svstate.dststep", dststep
)
2476 log(" svstate.ssubstep", ssubstep
)
2477 log(" svstate.dsubstep", dsubstep
)
2478 log(" svstate.pack", pack
)
2479 log(" svstate.unpack", unpack
)
2480 log(" mode", rm_mode
)
2481 log(" reverse", reverse_gear
)
2482 log(" out_vec", out_vec
)
2483 log(" in_vec", in_vec
)
2484 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPtype
.P2
.value
)
2485 # check if this was an sv.bc* and if so did it succeed
2486 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
2487 end_loop
= self
.namespace
['end_loop']
2488 log("branch %s end_loop" % insn_name
, end_loop
)
2490 self
.svp64_reset_loop()
2491 self
.update_pc_next()
2493 # check if srcstep needs incrementing by one, stop PC advancing
2494 # but for 2-pred both src/dest have to be checked.
2495 # XXX this might not be true! it may just be LD/ST
2496 if sv_ptype
== SVPtype
.P2
.value
:
2497 svp64_is_vector
= (out_vec
or in_vec
)
2499 svp64_is_vector
= out_vec
2500 # loops end at the first "hit" (source or dest)
2501 yield from self
.advance_svstate_steps()
2502 loopend
= self
.loopend
2503 log("loopend", svp64_is_vector
, loopend
)
2504 if not svp64_is_vector
or loopend
:
2505 # reset loop to zero and update NIA
2506 self
.svp64_reset_loop()
2511 # still looping, advance and update NIA
2512 self
.namespace
['SVSTATE'] = self
.svstate
2514 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
2515 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
2516 # this way we keep repeating the same instruction (with new steps)
2517 self
.pc
.NIA
.value
= self
.pc
.CIA
.value
2518 self
.namespace
['NIA'] = self
.pc
.NIA
2519 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
2520 return False # DO NOT allow PC update whilst Sub-PC loop running
2522 def update_pc_next(self
):
2523 # UPDATE program counter
2524 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2525 #self.svstate.spr = self.namespace['SVSTATE']
2526 log("end of call", self
.namespace
['CIA'],
2527 self
.namespace
['NIA'],
2528 self
.namespace
['SVSTATE'])
2530 def svp64_reset_loop(self
):
2531 self
.svstate
.srcstep
= 0
2532 self
.svstate
.dststep
= 0
2533 self
.svstate
.ssubstep
= 0
2534 self
.svstate
.dsubstep
= 0
2535 self
.loopend
= False
2536 log(" svstate.srcstep loop end (PC to update)")
2537 self
.namespace
['SVSTATE'] = self
.svstate
2539 def update_nia(self
):
2540 self
.pc
.update_nia(self
.is_svp64_mode
)
2541 self
.namespace
['NIA'] = self
.pc
.NIA
2545 """Decorator factory.
2547 this decorator will "inject" variables into the function's namespace,
2548 from the *dictionary* in self.namespace. it therefore becomes possible
2549 to make it look like a whole stack of variables which would otherwise
2550 need "self." inserted in front of them (*and* for those variables to be
2551 added to the instance) "appear" in the function.
2553 "self.namespace['SI']" for example becomes accessible as just "SI" but
2554 *only* inside the function, when decorated.
2556 def variable_injector(func
):
2558 def decorator(*args
, **kwargs
):
2560 func_globals
= func
.__globals
__ # Python 2.6+
2561 except AttributeError:
2562 func_globals
= func
.func_globals
# Earlier versions.
2564 context
= args
[0].namespace
# variables to be injected
2565 saved_values
= func_globals
.copy() # Shallow copy of dict.
2566 log("globals before", context
.keys())
2567 func_globals
.update(context
)
2568 result
= func(*args
, **kwargs
)
2569 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
2570 log("args[0]", args
[0].namespace
['CIA'],
2571 args
[0].namespace
['NIA'],
2572 args
[0].namespace
['SVSTATE'])
2573 if 'end_loop' in func_globals
:
2574 log("args[0] end_loop", func_globals
['end_loop'])
2575 args
[0].namespace
= func_globals
2576 #exec (func.__code__, func_globals)
2579 # func_globals = saved_values # Undo changes.
2585 return variable_injector