1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
20 from nmigen
.sim
import Settle
21 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
22 SVP64CROffs
, SVP64MODEb
)
23 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
25 from openpower
.decoder
.isa
.mem
import Mem
, MemException
26 from openpower
.decoder
.isa
.radixmmu
import RADIX
27 from openpower
.decoder
.isa
.svshape
import SVSHAPE
28 from openpower
.decoder
.isa
.svstate
import SVP64State
29 from openpower
.decoder
.orderedset
import OrderedSet
30 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
31 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
32 MicrOp
, OutSel
, SVMode
,
33 SVP64LDSTmode
, SVP64PredCR
,
34 SVP64PredInt
, SVP64PredMode
,
35 SVP64RMMode
, SVPType
, XER_bits
,
36 insns
, spr_byname
, spr_dict
,
38 from openpower
.insndb
.types
import SVP64Instruction
39 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
40 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
41 SelectableInt
, selectconcat
,
42 EFFECTIVELY_UNLIMITED
)
43 from openpower
.fpscr
import FPSCRState
44 from openpower
.xer
import XERState
45 from openpower
.util
import LogKind
, log
47 LDST_UPDATE_INSNS
= ['ldu', 'lwzu', 'lbzu', 'lhzu', 'lhau', 'lfsu', 'lfdu',
48 'stwu', 'stbu', 'sthu', 'stfsu', 'stfdu', 'stdu',
52 instruction_info
= namedtuple('instruction_info',
53 'func read_regs uninit_regs write_regs ' +
54 'special_regs op_fields form asmregs')
64 # rrright. this is here basically because the compiler pywriter returns
65 # results in a specific priority order. to make sure regs match up they
66 # need partial sorting. sigh.
68 # TODO (lkcl): adjust other registers that should be in a particular order
69 # probably CA, CA32, and CR
97 "overflow": 7, # should definitely be last
101 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
104 def get_masked_reg(regs
, base
, offs
, ew_bits
):
105 # rrrright. start by breaking down into row/col, based on elwidth
106 gpr_offs
= offs
// (64 // ew_bits
)
107 gpr_col
= offs
% (64 // ew_bits
)
108 # compute the mask based on ew_bits
109 mask
= (1 << ew_bits
) - 1
110 # now select the 64-bit register, but get its value (easier)
111 val
= regs
[base
+ gpr_offs
]
112 # shift down so element we want is at LSB
113 val
>>= gpr_col
* ew_bits
114 # mask so we only return the LSB element
118 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
119 # rrrright. start by breaking down into row/col, based on elwidth
120 gpr_offs
= offs
// (64//ew_bits
)
121 gpr_col
= offs
% (64//ew_bits
)
122 # compute the mask based on ew_bits
123 mask
= (1 << ew_bits
)-1
124 # now select the 64-bit register, but get its value (easier)
125 val
= regs
[base
+gpr_offs
]
126 # now mask out the bit we don't want
127 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
128 # then wipe the bit we don't want from the value
130 # OR the new value in, shifted up
131 val |
= value
<< (gpr_col
*ew_bits
)
132 regs
[base
+gpr_offs
] = val
135 def create_args(reglist
, extra
=None):
136 retval
= list(OrderedSet(reglist
))
137 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
138 if extra
is not None:
139 return [extra
] + retval
144 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
147 self
.isacaller
= isacaller
148 self
.svstate
= svstate
149 for i
in range(len(regfile
)):
150 self
[i
] = SelectableInt(regfile
[i
], 64)
152 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
153 if isinstance(ridx
, SelectableInt
):
156 return self
[ridx
+offs
]
157 # rrrright. start by breaking down into row/col, based on elwidth
158 gpr_offs
= offs
// (64//elwidth
)
159 gpr_col
= offs
% (64//elwidth
)
160 # now select the 64-bit register, but get its value (easier)
161 val
= self
[ridx
+gpr_offs
].value
162 # now shift down and mask out
163 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
164 # finally, return a SelectableInt at the required elwidth
165 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
166 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
167 return SelectableInt(val
, elwidth
)
169 def set_form(self
, form
):
172 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
174 if isinstance(rnum
, SelectableInt
):
176 if isinstance(value
, SelectableInt
):
179 if isinstance(rnum
, tuple):
180 rnum
, base
, offs
= rnum
183 # rrrright. start by breaking down into row/col, based on elwidth
184 gpr_offs
= offs
// (64//elwidth
)
185 gpr_col
= offs
% (64//elwidth
)
186 # compute the mask based on elwidth
187 mask
= (1 << elwidth
)-1
188 # now select the 64-bit register, but get its value (easier)
189 val
= self
[base
+gpr_offs
].value
190 # now mask out the bit we don't want
191 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
192 # then wipe the bit we don't want from the value
194 # OR the new value in, shifted up
195 val |
= value
<< (gpr_col
*elwidth
)
196 # finally put the damn value into the regfile
197 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
198 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
200 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
202 def __setitem__(self
, rnum
, value
):
203 # rnum = rnum.value # only SelectableInt allowed
204 log("GPR setitem", rnum
, value
)
205 if isinstance(rnum
, SelectableInt
):
207 dict.__setitem
__(self
, rnum
, value
)
209 def getz(self
, rnum
):
210 # rnum = rnum.value # only SelectableInt allowed
211 log("GPR getzero?", rnum
)
213 return SelectableInt(0, 64)
216 def _get_regnum(self
, attr
):
217 getform
= self
.sd
.sigforms
[self
.form
]
218 rnum
= getattr(getform
, attr
)
221 def ___getitem__(self
, attr
):
222 """ XXX currently not used
224 rnum
= self
._get
_regnum
(attr
)
225 log("GPR getitem", attr
, rnum
)
226 return self
.regfile
[rnum
]
228 def dump(self
, printout
=True):
230 for i
in range(len(self
)):
231 res
.append(self
[i
].value
)
233 for i
in range(0, len(res
), 8):
236 s
.append("%08x" % res
[i
+j
])
238 print("reg", "%2d" % i
, s
)
243 def __init__(self
, dec2
, initial_sprs
={}):
246 for key
, v
in initial_sprs
.items():
247 if isinstance(key
, SelectableInt
):
249 key
= special_sprs
.get(key
, key
)
250 if isinstance(key
, int):
253 info
= spr_byname
[key
]
254 if not isinstance(v
, SelectableInt
):
255 v
= SelectableInt(v
, info
.length
)
258 def __getitem__(self
, key
):
260 log("dict", self
.items())
261 # if key in special_sprs get the special spr, otherwise return key
262 if isinstance(key
, SelectableInt
):
264 if isinstance(key
, int):
265 key
= spr_dict
[key
].SPR
266 key
= special_sprs
.get(key
, key
)
267 if key
== 'HSRR0': # HACK!
269 if key
== 'HSRR1': # HACK!
272 res
= dict.__getitem
__(self
, key
)
274 if isinstance(key
, int):
277 info
= spr_byname
[key
]
278 self
[key
] = SelectableInt(0, info
.length
)
279 res
= dict.__getitem
__(self
, key
)
280 log("spr returning", key
, res
)
283 def __setitem__(self
, key
, value
):
284 if isinstance(key
, SelectableInt
):
286 if isinstance(key
, int):
287 key
= spr_dict
[key
].SPR
289 key
= special_sprs
.get(key
, key
)
290 if key
== 'HSRR0': # HACK!
291 self
.__setitem
__('SRR0', value
)
292 if key
== 'HSRR1': # HACK!
293 self
.__setitem
__('SRR1', value
)
295 value
= XERState(value
)
296 log("setting spr", key
, value
)
297 dict.__setitem
__(self
, key
, value
)
299 def __call__(self
, ridx
):
302 def dump(self
, printout
=True):
304 keys
= list(self
.keys())
307 sprname
= spr_dict
.get(k
, None)
311 sprname
= sprname
.SPR
312 res
.append((sprname
, self
[k
].value
))
314 for sprname
, value
in res
:
315 print(" ", sprname
, hex(value
))
320 def __init__(self
, pc_init
=0):
321 self
.CIA
= SelectableInt(pc_init
, 64)
322 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
324 def update_nia(self
, is_svp64
):
325 increment
= 8 if is_svp64
else 4
326 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
328 def update(self
, namespace
, is_svp64
):
329 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
331 self
.CIA
= namespace
['NIA'].narrow(64)
332 self
.update_nia(is_svp64
)
333 namespace
['CIA'] = self
.CIA
334 namespace
['NIA'] = self
.NIA
338 # See PowerISA Version 3.0 B Book 1
339 # Section 2.3.1 Condition Register pages 30 - 31
341 LT
= FL
= 0 # negative, less than, floating-point less than
342 GT
= FG
= 1 # positive, greater than, floating-point greater than
343 EQ
= FE
= 2 # equal, floating-point equal
344 SO
= FU
= 3 # summary overflow, floating-point unordered
346 def __init__(self
, init
=0):
347 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
348 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
349 self
.cr
= SelectableInt(init
, 64) # underlying reg
350 # field-selectable versions of Condition Register TODO check bitranges?
353 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
354 _cr
= FieldSelectableInt(self
.cr
, bits
)
358 # decode SVP64 predicate integer to reg number and invert
359 def get_predint(gpr
, mask
):
363 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
364 if mask
== SVP64PredInt
.ALWAYS
.value
:
365 return 0xffff_ffff_ffff_ffff # 64 bits of 1
366 if mask
== SVP64PredInt
.R3_UNARY
.value
:
367 return 1 << (r3
.value
& 0b111111)
368 if mask
== SVP64PredInt
.R3
.value
:
370 if mask
== SVP64PredInt
.R3_N
.value
:
372 if mask
== SVP64PredInt
.R10
.value
:
374 if mask
== SVP64PredInt
.R10_N
.value
:
376 if mask
== SVP64PredInt
.R30
.value
:
378 if mask
== SVP64PredInt
.R30_N
.value
:
382 # decode SVP64 predicate CR to reg number and invert status
383 def _get_predcr(mask
):
384 if mask
== SVP64PredCR
.LT
.value
:
386 if mask
== SVP64PredCR
.GE
.value
:
388 if mask
== SVP64PredCR
.GT
.value
:
390 if mask
== SVP64PredCR
.LE
.value
:
392 if mask
== SVP64PredCR
.EQ
.value
:
394 if mask
== SVP64PredCR
.NE
.value
:
396 if mask
== SVP64PredCR
.SO
.value
:
398 if mask
== SVP64PredCR
.NS
.value
:
402 # read individual CR fields (0..VL-1), extract the required bit
403 # and construct the mask
404 def get_predcr(crl
, mask
, vl
):
405 idx
, noninv
= _get_predcr(mask
)
408 cr
= crl
[i
+SVP64CROffs
.CRPred
]
409 if cr
[idx
].value
== noninv
:
414 # TODO, really should just be using PowerDecoder2
415 def get_idx_map(dec2
, name
):
417 in1_sel
= yield op
.in1_sel
418 in2_sel
= yield op
.in2_sel
419 in3_sel
= yield op
.in3_sel
420 in1
= yield dec2
.e
.read_reg1
.data
421 # identify which regnames map to in1/2/3
422 if name
== 'RA' or name
== 'RA_OR_ZERO':
423 if (in1_sel
== In1Sel
.RA
.value
or
424 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
426 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
429 if in2_sel
== In2Sel
.RB
.value
:
431 if in3_sel
== In3Sel
.RB
.value
:
433 # XXX TODO, RC doesn't exist yet!
435 if in3_sel
== In3Sel
.RC
.value
:
437 elif name
in ['EA', 'RS']:
438 if in1_sel
== In1Sel
.RS
.value
:
440 if in2_sel
== In2Sel
.RS
.value
:
442 if in3_sel
== In3Sel
.RS
.value
:
445 if in1_sel
== In1Sel
.FRA
.value
:
447 if in3_sel
== In3Sel
.FRA
.value
:
450 if in2_sel
== In2Sel
.FRB
.value
:
453 if in3_sel
== In3Sel
.FRC
.value
:
456 if in1_sel
== In1Sel
.FRS
.value
:
458 if in3_sel
== In3Sel
.FRS
.value
:
461 if in1_sel
== In1Sel
.FRT
.value
:
464 if in1_sel
== In1Sel
.RT
.value
:
469 # TODO, really should just be using PowerDecoder2
470 def get_idx_in(dec2
, name
, ewmode
=False):
471 idx
= yield from get_idx_map(dec2
, name
)
475 in1_sel
= yield op
.in1_sel
476 in2_sel
= yield op
.in2_sel
477 in3_sel
= yield op
.in3_sel
478 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
479 in1
= yield dec2
.e
.read_reg1
.data
480 in2
= yield dec2
.e
.read_reg2
.data
481 in3
= yield dec2
.e
.read_reg3
.data
483 in1_base
= yield dec2
.e
.read_reg1
.base
484 in2_base
= yield dec2
.e
.read_reg2
.base
485 in3_base
= yield dec2
.e
.read_reg3
.base
486 in1_offs
= yield dec2
.e
.read_reg1
.offs
487 in2_offs
= yield dec2
.e
.read_reg2
.offs
488 in3_offs
= yield dec2
.e
.read_reg3
.offs
489 in1
= (in1
, in1_base
, in1_offs
)
490 in2
= (in2
, in2_base
, in2_offs
)
491 in3
= (in3
, in3_base
, in3_offs
)
493 in1_isvec
= yield dec2
.in1_isvec
494 in2_isvec
= yield dec2
.in2_isvec
495 in3_isvec
= yield dec2
.in3_isvec
496 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
498 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
500 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
502 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
504 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
506 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
509 return in1
, in1_isvec
511 return in2
, in2_isvec
513 return in3
, in3_isvec
517 # TODO, really should just be using PowerDecoder2
518 def get_cr_in(dec2
, name
):
520 in_sel
= yield op
.cr_in
521 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
522 sv_cr_in
= yield op
.sv_cr_in
523 spec
= yield dec2
.crin_svdec
.spec
524 sv_override
= yield dec2
.dec_cr_in
.sv_override
525 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
526 in1
= yield dec2
.e
.read_cr1
.data
527 cr_isvec
= yield dec2
.cr_in_isvec
528 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
529 log(" sv_cr_in", sv_cr_in
)
530 log(" cr_bf", in_bitfield
)
532 log(" override", sv_override
)
533 # identify which regnames map to in / o2
535 if in_sel
== CRInSel
.BI
.value
:
537 log("get_cr_in not found", name
)
541 # TODO, really should just be using PowerDecoder2
542 def get_cr_out(dec2
, name
):
544 out_sel
= yield op
.cr_out
545 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
546 sv_cr_out
= yield op
.sv_cr_out
547 spec
= yield dec2
.crout_svdec
.spec
548 sv_override
= yield dec2
.dec_cr_out
.sv_override
549 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
550 out
= yield dec2
.e
.write_cr
.data
551 o_isvec
= yield dec2
.cr_out_isvec
552 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
553 log(" sv_cr_out", sv_cr_out
)
554 log(" cr_bf", out_bitfield
)
556 log(" override", sv_override
)
557 # identify which regnames map to out / o2
559 if out_sel
== CROutSel
.BF
.value
:
562 if out_sel
== CROutSel
.CR0
.value
:
564 if name
== 'CR1': # these are not actually calculated correctly
565 if out_sel
== CROutSel
.CR1
.value
:
567 # check RC1 set? if so return implicit vector, this is a REAL bad hack
568 RC1
= yield dec2
.rm_dec
.RC1
570 log("get_cr_out RC1 mode")
572 return 0, True # XXX TODO: offset CR0 from SVSTATE SPR
574 return 1, True # XXX TODO: offset CR1 from SVSTATE SPR
576 log("get_cr_out not found", name
)
580 # TODO, really should just be using PowerDecoder2
581 def get_out_map(dec2
, name
):
583 out_sel
= yield op
.out_sel
584 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
585 out
= yield dec2
.e
.write_reg
.data
586 # identify which regnames map to out / o2
588 if out_sel
== OutSel
.RA
.value
:
591 if out_sel
== OutSel
.RT
.value
:
593 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
595 elif name
== 'RT_OR_ZERO':
596 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
599 if out_sel
== OutSel
.FRA
.value
:
602 if out_sel
== OutSel
.FRS
.value
:
605 if out_sel
== OutSel
.FRT
.value
:
610 # TODO, really should just be using PowerDecoder2
611 def get_idx_out(dec2
, name
, ewmode
=False):
613 out_sel
= yield op
.out_sel
614 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
615 out
= yield dec2
.e
.write_reg
.data
616 o_isvec
= yield dec2
.o_isvec
618 offs
= yield dec2
.e
.write_reg
.offs
619 base
= yield dec2
.e
.write_reg
.base
620 out
= (out
, base
, offs
)
621 # identify which regnames map to out / o2
622 ismap
= yield from get_out_map(dec2
, name
)
624 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
626 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
630 # TODO, really should just be using PowerDecoder2
631 def get_out2_map(dec2
, name
):
632 # check first if register is activated for write
634 out_sel
= yield op
.out_sel
635 out
= yield dec2
.e
.write_ea
.data
636 out_ok
= yield dec2
.e
.write_ea
.ok
640 if name
in ['EA', 'RA']:
641 if hasattr(op
, "upd"):
642 # update mode LD/ST uses read-reg A also as an output
644 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
645 out_sel
, OutSel
.RA
.value
,
647 if upd
== LDSTMode
.update
.value
:
650 fft_en
= yield dec2
.implicit_rs
652 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
656 fft_en
= yield dec2
.implicit_rs
658 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
664 # TODO, really should just be using PowerDecoder2
665 def get_idx_out2(dec2
, name
, ewmode
=False):
666 # check first if register is activated for write
668 out_sel
= yield op
.out_sel
669 out
= yield dec2
.e
.write_ea
.data
671 offs
= yield dec2
.e
.write_ea
.offs
672 base
= yield dec2
.e
.write_ea
.base
673 out
= (out
, base
, offs
)
674 o_isvec
= yield dec2
.o2_isvec
675 ismap
= yield from get_out2_map(dec2
, name
)
677 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
683 """deals with svstate looping.
686 def __init__(self
, svstate
):
687 self
.svstate
= svstate
690 def new_iterators(self
):
691 self
.src_it
= self
.src_iterator()
692 self
.dst_it
= self
.dst_iterator()
696 self
.new_ssubstep
= 0
697 self
.new_dsubstep
= 0
698 self
.pred_dst_zero
= 0
699 self
.pred_src_zero
= 0
701 def src_iterator(self
):
702 """source-stepping iterator
704 pack
= self
.svstate
.pack
708 # pack advances subvl in *outer* loop
709 while True: # outer subvl loop
710 while True: # inner vl loop
713 srcmask
= self
.srcmask
714 srcstep
= self
.svstate
.srcstep
715 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
716 if self
.pred_sz
or pred_src_zero
:
717 self
.pred_src_zero
= not pred_src_zero
718 log(" advance src", srcstep
, vl
,
719 self
.svstate
.ssubstep
, subvl
)
720 # yield actual substep/srcstep
721 yield (self
.svstate
.ssubstep
, srcstep
)
722 # the way yield works these could have been modified.
725 srcstep
= self
.svstate
.srcstep
726 log(" advance src check", srcstep
, vl
,
727 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
728 self
.svstate
.ssubstep
== subvl
)
729 if srcstep
== vl
-1: # end-point
730 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
731 if self
.svstate
.ssubstep
== subvl
: # end-point
732 log(" advance pack stop")
734 break # exit inner loop
735 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
737 if self
.svstate
.ssubstep
== subvl
: # end-point
738 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
739 log(" advance pack stop")
741 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
744 # these cannot be done as for-loops because SVSTATE may change
745 # (srcstep/substep may be modified, interrupted, subvl/vl change)
746 # but they *can* be done as while-loops as long as every SVSTATE
747 # "thing" is re-read every single time a yield gives indices
748 while True: # outer vl loop
749 while True: # inner subvl loop
752 srcmask
= self
.srcmask
753 srcstep
= self
.svstate
.srcstep
754 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
755 if self
.pred_sz
or pred_src_zero
:
756 self
.pred_src_zero
= not pred_src_zero
757 log(" advance src", srcstep
, vl
,
758 self
.svstate
.ssubstep
, subvl
)
759 # yield actual substep/srcstep
760 yield (self
.svstate
.ssubstep
, srcstep
)
761 if self
.svstate
.ssubstep
== subvl
: # end-point
762 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
763 break # exit inner loop
764 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
766 if srcstep
== vl
-1: # end-point
767 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
770 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
772 def dst_iterator(self
):
773 """dest-stepping iterator
775 unpack
= self
.svstate
.unpack
779 # pack advances subvl in *outer* loop
780 while True: # outer subvl loop
781 while True: # inner vl loop
784 dstmask
= self
.dstmask
785 dststep
= self
.svstate
.dststep
786 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
787 if self
.pred_dz
or pred_dst_zero
:
788 self
.pred_dst_zero
= not pred_dst_zero
789 log(" advance dst", dststep
, vl
,
790 self
.svstate
.dsubstep
, subvl
)
791 # yield actual substep/dststep
792 yield (self
.svstate
.dsubstep
, dststep
)
793 # the way yield works these could have been modified.
795 dststep
= self
.svstate
.dststep
796 log(" advance dst check", dststep
, vl
,
797 self
.svstate
.ssubstep
, subvl
)
798 if dststep
== vl
-1: # end-point
799 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
800 if self
.svstate
.dsubstep
== subvl
: # end-point
801 log(" advance unpack stop")
804 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
806 if self
.svstate
.dsubstep
== subvl
: # end-point
807 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
808 log(" advance unpack stop")
810 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
812 # these cannot be done as for-loops because SVSTATE may change
813 # (dststep/substep may be modified, interrupted, subvl/vl change)
814 # but they *can* be done as while-loops as long as every SVSTATE
815 # "thing" is re-read every single time a yield gives indices
816 while True: # outer vl loop
817 while True: # inner subvl loop
819 dstmask
= self
.dstmask
820 dststep
= self
.svstate
.dststep
821 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
822 if self
.pred_dz
or pred_dst_zero
:
823 self
.pred_dst_zero
= not pred_dst_zero
824 log(" advance dst", dststep
, self
.svstate
.vl
,
825 self
.svstate
.dsubstep
, subvl
)
826 # yield actual substep/dststep
827 yield (self
.svstate
.dsubstep
, dststep
)
828 if self
.svstate
.dsubstep
== subvl
: # end-point
829 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
831 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
834 if dststep
== vl
-1: # end-point
835 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
837 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
839 def src_iterate(self
):
840 """source-stepping iterator
844 pack
= self
.svstate
.pack
845 unpack
= self
.svstate
.unpack
846 ssubstep
= self
.svstate
.ssubstep
847 end_ssub
= ssubstep
== subvl
848 end_src
= self
.svstate
.srcstep
== vl
-1
849 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
853 srcstep
= self
.svstate
.srcstep
854 srcmask
= self
.srcmask
856 # pack advances subvl in *outer* loop
858 assert srcstep
<= vl
-1
859 end_src
= srcstep
== vl
-1
864 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
868 srcstep
+= 1 # advance srcstep
869 if not self
.srcstep_skip
:
871 if ((1 << srcstep
) & srcmask
) != 0:
874 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
876 # advance subvl in *inner* loop
879 assert srcstep
<= vl
-1
880 end_src
= srcstep
== vl
-1
881 if end_src
: # end-point
887 if not self
.srcstep_skip
:
889 if ((1 << srcstep
) & srcmask
) != 0:
892 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
893 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
896 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
898 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
899 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
902 def dst_iterate(self
):
903 """dest step iterator
907 pack
= self
.svstate
.pack
908 unpack
= self
.svstate
.unpack
909 dsubstep
= self
.svstate
.dsubstep
910 end_dsub
= dsubstep
== subvl
911 dststep
= self
.svstate
.dststep
912 end_dst
= dststep
== vl
-1
913 dstmask
= self
.dstmask
914 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
919 # unpack advances subvl in *outer* loop
921 assert dststep
<= vl
-1
922 end_dst
= dststep
== vl
-1
927 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
931 dststep
+= 1 # advance dststep
932 if not self
.dststep_skip
:
934 if ((1 << dststep
) & dstmask
) != 0:
937 log(" dskip", bin(dstmask
), bin(1 << dststep
))
939 # advance subvl in *inner* loop
942 assert dststep
<= vl
-1
943 end_dst
= dststep
== vl
-1
944 if end_dst
: # end-point
950 if not self
.dststep_skip
:
952 if ((1 << dststep
) & dstmask
) != 0:
955 log(" dskip", bin(dstmask
), bin(1 << dststep
))
956 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
959 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
961 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
962 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
965 def at_loopend(self
):
966 """tells if this is the last possible element. uses the cached values
967 for src/dst-step and sub-steps
971 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
972 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
973 end_ssub
= ssubstep
== subvl
974 end_dsub
= dsubstep
== subvl
975 if srcstep
== vl
-1 and end_ssub
:
977 if dststep
== vl
-1 and end_dsub
:
981 def advance_svstate_steps(self
):
982 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
983 TODO when Pack/Unpack is set, substep becomes the *outer* loop
985 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
986 if self
.loopend
: # huhn??
991 def read_src_mask(self
):
992 """read/update pred_sz and src mask
994 # get SVSTATE VL (oh and print out some debug stuff)
996 srcstep
= self
.svstate
.srcstep
997 ssubstep
= self
.svstate
.ssubstep
999 # get predicate mask (all 64 bits)
1000 srcmask
= 0xffff_ffff_ffff_ffff
1002 pmode
= yield self
.dec2
.rm_dec
.predmode
1003 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1004 srcpred
= yield self
.dec2
.rm_dec
.srcpred
1005 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1006 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
1007 if pmode
== SVP64PredMode
.INT
.value
:
1008 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
1009 if sv_ptype
== SVPType
.P2
.value
:
1010 srcmask
= get_predint(self
.gpr
, srcpred
)
1011 elif pmode
== SVP64PredMode
.CR
.value
:
1012 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1013 if sv_ptype
== SVPType
.P2
.value
:
1014 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
1015 # work out if the ssubsteps are completed
1016 ssubstart
= ssubstep
== 0
1017 log(" pmode", pmode
)
1018 log(" ptype", sv_ptype
)
1019 log(" srcpred", bin(srcpred
))
1020 log(" srcmask", bin(srcmask
))
1021 log(" pred_sz", bin(pred_sz
))
1022 log(" ssubstart", ssubstart
)
1024 # store all that above
1025 self
.srcstep_skip
= False
1026 self
.srcmask
= srcmask
1027 self
.pred_sz
= pred_sz
1028 self
.new_ssubstep
= ssubstep
1029 log(" new ssubstep", ssubstep
)
1030 # until the predicate mask has a "1" bit... or we run out of VL
1031 # let srcstep==VL be the indicator to move to next instruction
1033 self
.srcstep_skip
= True
1035 def read_dst_mask(self
):
1036 """same as read_src_mask - check and record everything needed
1038 # get SVSTATE VL (oh and print out some debug stuff)
1039 # yield Delay(1e-10) # make changes visible
1040 vl
= self
.svstate
.vl
1041 dststep
= self
.svstate
.dststep
1042 dsubstep
= self
.svstate
.dsubstep
1044 # get predicate mask (all 64 bits)
1045 dstmask
= 0xffff_ffff_ffff_ffff
1047 pmode
= yield self
.dec2
.rm_dec
.predmode
1048 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1049 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1050 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1051 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1052 if pmode
== SVP64PredMode
.INT
.value
:
1053 dstmask
= get_predint(self
.gpr
, dstpred
)
1054 elif pmode
== SVP64PredMode
.CR
.value
:
1055 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1056 # work out if the ssubsteps are completed
1057 dsubstart
= dsubstep
== 0
1058 log(" pmode", pmode
)
1059 log(" ptype", sv_ptype
)
1060 log(" dstpred", bin(dstpred
))
1061 log(" dstmask", bin(dstmask
))
1062 log(" pred_dz", bin(pred_dz
))
1063 log(" dsubstart", dsubstart
)
1065 self
.dststep_skip
= False
1066 self
.dstmask
= dstmask
1067 self
.pred_dz
= pred_dz
1068 self
.new_dsubstep
= dsubstep
1069 log(" new dsubstep", dsubstep
)
1071 self
.dststep_skip
= True
1073 def svstate_pre_inc(self
):
1074 """check if srcstep/dststep need to skip over masked-out predicate bits
1075 note that this is not supposed to do anything to substep,
1076 it is purely for skipping masked-out bits
1079 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1080 yield from self
.read_src_mask()
1081 yield from self
.read_dst_mask()
1088 srcstep
= self
.svstate
.srcstep
1089 srcmask
= self
.srcmask
1090 pred_src_zero
= self
.pred_sz
1091 vl
= self
.svstate
.vl
1092 # srcstep-skipping opportunity identified
1093 if self
.srcstep_skip
:
1094 # cannot do this with sv.bc - XXX TODO
1097 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1098 log(" sskip", bin(1 << srcstep
))
1101 # now work out if the relevant mask bits require zeroing
1103 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1105 # store new srcstep / dststep
1106 self
.new_srcstep
= srcstep
1107 self
.pred_src_zero
= pred_src_zero
1108 log(" new srcstep", srcstep
)
1111 # dststep-skipping opportunity identified
1112 dststep
= self
.svstate
.dststep
1113 dstmask
= self
.dstmask
1114 pred_dst_zero
= self
.pred_dz
1115 vl
= self
.svstate
.vl
1116 if self
.dststep_skip
:
1117 # cannot do this with sv.bc - XXX TODO
1120 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1121 log(" dskip", bin(1 << dststep
))
1124 # now work out if the relevant mask bits require zeroing
1126 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1128 # store new srcstep / dststep
1129 self
.new_dststep
= dststep
1130 self
.pred_dst_zero
= pred_dst_zero
1131 log(" new dststep", dststep
)
1134 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1135 # decoder2 - an instance of power_decoder2
1136 # regfile - a list of initial values for the registers
1137 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1138 # respect_pc - tracks the program counter. requires initial_insns
1139 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1140 initial_mem
=None, initial_msr
=0,
1153 # trace log file for model output. if None do nothing
1154 self
.insnlog
= insnlog
1155 self
.insnlog_is_file
= hasattr(insnlog
, "write")
1156 if not self
.insnlog_is_file
and self
.insnlog
:
1157 self
.insnlog
= open(self
.insnlog
, "w")
1159 self
.bigendian
= bigendian
1161 self
.is_svp64_mode
= False
1162 self
.respect_pc
= respect_pc
1163 if initial_sprs
is None:
1165 if initial_mem
is None:
1167 if fpregfile
is None:
1168 fpregfile
= [0] * 32
1169 if initial_insns
is None:
1171 assert self
.respect_pc
== False, "instructions required to honor pc"
1173 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1174 log("ISACaller initial_msr", initial_msr
)
1176 # "fake program counter" mode (for unit testing)
1180 if isinstance(initial_mem
, tuple):
1181 self
.fake_pc
= initial_mem
[0]
1182 disasm_start
= self
.fake_pc
1184 disasm_start
= initial_pc
1186 # disassembly: we need this for now (not given from the decoder)
1187 self
.disassembly
= {}
1189 for i
, code
in enumerate(disassembly
):
1190 self
.disassembly
[i
*4 + disasm_start
] = code
1192 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1193 self
.svp64rm
= SVP64RM()
1194 if initial_svstate
is None:
1196 if isinstance(initial_svstate
, int):
1197 initial_svstate
= SVP64State(initial_svstate
)
1198 # SVSTATE, MSR and PC
1199 StepLoop
.__init
__(self
, initial_svstate
)
1200 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1202 # GPR FPR SPR registers
1203 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1204 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1205 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1206 self
.spr
= SPR(decoder2
, initial_sprs
) # initialise SPRs before MMU
1208 # set up 4 dummy SVSHAPEs if they aren't already set up
1210 sname
= 'SVSHAPE%d' % i
1211 val
= self
.spr
.get(sname
, 0)
1212 # make sure it's an SVSHAPE
1213 self
.spr
[sname
] = SVSHAPE(val
, self
.gpr
)
1214 self
.last_op_svshape
= False
1217 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
, misaligned_ok
=True)
1218 self
.mem
.log_fancy(kind
=LogKind
.InstrInOuts
)
1219 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1220 # MMU mode, redirect underlying Mem through RADIX
1222 self
.mem
= RADIX(self
.mem
, self
)
1224 self
.imem
= RADIX(self
.imem
, self
)
1226 # TODO, needed here:
1227 # FPR (same as GPR except for FP nums)
1228 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1229 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1230 self
.fpscr
= FPSCRState(initial_fpscr
)
1232 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1233 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1235 # 2.3.2 LR (actually SPR #8) -- Done
1236 # 2.3.3 CTR (actually SPR #9) -- Done
1237 # 2.3.4 TAR (actually SPR #815)
1238 # 3.2.2 p45 XER (actually SPR #1) -- Done
1239 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1241 # create CR then allow portions of it to be "selectable" (below)
1242 self
.cr_fields
= CRFields(initial_cr
)
1243 self
.cr
= self
.cr_fields
.cr
1244 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1246 # "undefined", just set to variable-bit-width int (use exts "max")
1247 # self.undefined = SelectableInt(0, EFFECTIVELY_UNLIMITED)
1250 self
.namespace
.update(self
.spr
)
1251 self
.namespace
.update({'GPR': self
.gpr
,
1255 'memassign': self
.memassign
,
1258 'SVSTATE': self
.svstate
,
1259 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1260 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1261 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1262 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1265 'FPSCR': self
.fpscr
,
1266 'undefined': undefined
,
1267 'mode_is_64bit': True,
1268 'SO': XER_bits
['SO'],
1269 'XLEN': 64 # elwidth overrides
1272 for name
in BFP_FLAG_NAMES
:
1273 setattr(self
, name
, 0)
1275 # update pc to requested start point
1276 self
.set_pc(initial_pc
)
1278 # field-selectable versions of Condition Register
1279 self
.crl
= self
.cr_fields
.crl
1281 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1283 self
.decoder
= decoder2
.dec
1284 self
.dec2
= decoder2
1286 super().__init
__(XLEN
=self
.namespace
["XLEN"], FPSCR
=self
.fpscr
)
1288 def trace(self
, out
):
1289 if self
.insnlog
is None: return
1290 self
.insnlog
.write(out
)
1294 return self
.namespace
["XLEN"]
1300 def call_trap(self
, trap_addr
, trap_bit
):
1301 """calls TRAP and sets up NIA to the new execution location.
1302 next instruction will begin at trap_addr.
1304 self
.TRAP(trap_addr
, trap_bit
)
1305 self
.namespace
['NIA'] = self
.trap_nia
1306 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1308 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1309 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1311 TRAP function is callable from inside the pseudocode itself,
1312 hence the default arguments. when calling from inside ISACaller
1313 it is best to use call_trap()
1315 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1316 kaivb
= self
.spr
['KAIVB'].value
1317 msr
= self
.namespace
['MSR'].value
1318 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1319 # store CIA(+4?) in SRR0, set NIA to 0x700
1320 # store MSR in SRR1, set MSR to um errr something, have to check spec
1321 # store SVSTATE (if enabled) in SVSRR0
1322 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1323 self
.spr
['SRR1'].value
= msr
1324 if self
.is_svp64_mode
:
1325 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1326 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1327 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1329 # set exception bits. TODO: this should, based on the address
1330 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1331 # bits appropriately. however it turns out that *for now* in all
1332 # cases (all trap_addrs) the exact same thing is needed.
1333 self
.msr
[MSRb
.IR
] = 0
1334 self
.msr
[MSRb
.DR
] = 0
1335 self
.msr
[MSRb
.FE0
] = 0
1336 self
.msr
[MSRb
.FE1
] = 0
1337 self
.msr
[MSRb
.EE
] = 0
1338 self
.msr
[MSRb
.RI
] = 0
1339 self
.msr
[MSRb
.SF
] = 1
1340 self
.msr
[MSRb
.TM
] = 0
1341 self
.msr
[MSRb
.VEC
] = 0
1342 self
.msr
[MSRb
.VSX
] = 0
1343 self
.msr
[MSRb
.PR
] = 0
1344 self
.msr
[MSRb
.FP
] = 0
1345 self
.msr
[MSRb
.PMM
] = 0
1346 self
.msr
[MSRb
.TEs
] = 0
1347 self
.msr
[MSRb
.TEe
] = 0
1348 self
.msr
[MSRb
.UND
] = 0
1349 self
.msr
[MSRb
.LE
] = 1
1351 def memassign(self
, ea
, sz
, val
):
1352 self
.mem
.memassign(ea
, sz
, val
)
1354 def prep_namespace(self
, insn_name
, formname
, op_fields
, xlen
):
1355 # TODO: get field names from form in decoder*1* (not decoder2)
1356 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1358 # then "yield" fields only from op_fields rather than hard-coded
1360 fields
= self
.decoder
.sigforms
[formname
]
1361 log("prep_namespace", formname
, op_fields
, insn_name
)
1362 for name
in op_fields
:
1363 # CR immediates. deal with separately. needs modifying
1365 if self
.is_svp64_mode
and name
in ['BI']: # TODO, more CRs
1366 # BI is a 5-bit, must reconstruct the value
1367 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1368 sig
= getattr(fields
, name
)
1370 # low 2 LSBs (CR field selector) remain same, CR num extended
1371 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1372 val
= (val
& 0b11) |
(regnum
<< 2)
1373 elif self
.is_svp64_mode
and name
in ['BF']: # TODO, more CRs
1374 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, "BF")
1375 log('hack %s' % name
, regnum
, is_vec
)
1378 sig
= getattr(fields
, name
)
1380 # these are all opcode fields involved in index-selection of CR,
1381 # and need to do "standard" arithmetic. CR[BA+32] for example
1382 # would, if using SelectableInt, only be 5-bit.
1383 if name
in ['BF', 'BFA', 'BC', 'BA', 'BB', 'BT', 'BI']:
1384 self
.namespace
[name
] = val
1386 self
.namespace
[name
] = SelectableInt(val
, sig
.width
)
1388 self
.namespace
['XER'] = self
.spr
['XER']
1389 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1390 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1391 self
.namespace
['OV'] = self
.spr
['XER'][XER_bits
['OV']].value
1392 self
.namespace
['OV32'] = self
.spr
['XER'][XER_bits
['OV32']].value
1393 self
.namespace
['XLEN'] = xlen
1395 # add some SVSTATE convenience variables
1396 vl
= self
.svstate
.vl
1397 srcstep
= self
.svstate
.srcstep
1398 self
.namespace
['VL'] = vl
1399 self
.namespace
['srcstep'] = srcstep
1401 # take a copy of the CR field value: if non-VLi fail-first fails
1402 # this is because the pseudocode writes *directly* to CR. sigh
1403 self
.cr_backup
= self
.cr
.value
1405 # sv.bc* need some extra fields
1406 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
1407 # blegh grab bits manually
1408 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1409 # convert to SelectableInt before test
1410 mode
= SelectableInt(mode
, 5)
1411 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1412 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1413 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1414 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1415 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1416 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1417 sz
= yield self
.dec2
.rm_dec
.pred_sz
1418 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1419 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1420 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1421 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1422 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1423 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1424 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1425 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1427 def get_kludged_op_add_ca_ov(self
, inputs
, inp_ca_ov
):
1428 """ this was not at all necessary to do. this function massively
1429 duplicates - in a laborious and complex fashion - the contents of
1430 the CSV files that were extracted two years ago from microwatt's
1431 source code. A-inversion is the "inv A" column, output inversion
1432 is the "inv out" column, carry-in equal to 0 or 1 or CA is the
1435 all of that information is available in
1436 self.instrs[ins_name].op_fields
1437 where info is usually assigned to self.instrs[ins_name]
1439 https://git.libre-soc.org/?p=openpower-isa.git;a=blob;f=openpower/isatables/minor_31.csv;hb=HEAD
1441 the immediate constants are *also* decoded correctly and placed
1442 usually by DecodeIn2Imm into operand2, as part of power_decoder2.py
1444 def ca(a
, b
, ca_in
, width
):
1445 mask
= (1 << width
) - 1
1446 y
= (a
& mask
) + (b
& mask
) + ca_in
1449 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1450 insn
= insns
.get(asmcode
)
1451 SI
= yield self
.dec2
.dec
.SI
1454 inputs
= [i
.value
for i
in inputs
]
1457 if insn
in ("add", "addo", "addc", "addco"):
1461 elif insn
== "addic" or insn
== "addic.":
1465 elif insn
in ("subf", "subfo", "subfc", "subfco"):
1469 elif insn
== "subfic":
1473 elif insn
== "adde" or insn
== "addeo":
1477 elif insn
== "subfe" or insn
== "subfeo":
1481 elif insn
== "addme" or insn
== "addmeo":
1485 elif insn
== "addze" or insn
== "addzeo":
1489 elif insn
== "subfme" or insn
== "subfmeo":
1493 elif insn
== "subfze" or insn
== "subfzeo":
1497 elif insn
== "addex":
1498 # CA[32] aren't actually written, just generate so we have
1499 # something to return
1500 ca64
= ov64
= ca(inputs
[0], inputs
[1], OV
, 64)
1501 ca32
= ov32
= ca(inputs
[0], inputs
[1], OV
, 32)
1502 return ca64
, ca32
, ov64
, ov32
1503 elif insn
== "neg" or insn
== "nego":
1508 raise NotImplementedError(
1509 "op_add kludge unimplemented instruction: ", asmcode
, insn
)
1511 ca64
= ca(a
, b
, ca_in
, 64)
1512 ca32
= ca(a
, b
, ca_in
, 32)
1513 ov64
= ca64
!= ca(a
, b
, ca_in
, 63)
1514 ov32
= ca32
!= ca(a
, b
, ca_in
, 31)
1515 return ca64
, ca32
, ov64
, ov32
1517 def handle_carry_(self
, inputs
, output
, ca
, ca32
, inp_ca_ov
):
1518 op
= yield self
.dec2
.e
.do
.insn_type
1519 if op
== MicrOp
.OP_ADD
.value
and ca
is None and ca32
is None:
1520 retval
= yield from self
.get_kludged_op_add_ca_ov(
1522 ca
, ca32
, ov
, ov32
= retval
1523 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1524 if insns
.get(asmcode
) == 'addex':
1525 # TODO: if 32-bit mode, set ov to ov32
1526 self
.spr
['XER'][XER_bits
['OV']] = ov
1527 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1529 # TODO: if 32-bit mode, set ca to ca32
1530 self
.spr
['XER'][XER_bits
['CA']] = ca
1531 self
.spr
['XER'][XER_bits
['CA32']] = ca32
1533 inv_a
= yield self
.dec2
.e
.do
.invert_in
1535 inputs
[0] = ~inputs
[0]
1537 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1539 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1540 inputs
.append(SelectableInt(imm
, 64))
1543 log("gt input", x
, output
)
1544 gt
= (gtu(x
, output
))
1547 cy
= 1 if any(gts
) else 0
1549 if ca
is None: # already written
1550 self
.spr
['XER'][XER_bits
['CA']] = cy
1553 # ARGH... different for OP_ADD... *sigh*...
1554 op
= yield self
.dec2
.e
.do
.insn_type
1555 if op
== MicrOp
.OP_ADD
.value
:
1556 res32
= (output
.value
& (1 << 32)) != 0
1557 a32
= (inputs
[0].value
& (1 << 32)) != 0
1558 if len(inputs
) >= 2:
1559 b32
= (inputs
[1].value
& (1 << 32)) != 0
1562 cy32
= res32 ^ a32 ^ b32
1563 log("CA32 ADD", cy32
)
1567 log("input", x
, output
)
1568 log(" x[32:64]", x
, x
[32:64])
1569 log(" o[32:64]", output
, output
[32:64])
1570 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1572 cy32
= 1 if any(gts
) else 0
1573 log("CA32", cy32
, gts
)
1574 if ca32
is None: # already written
1575 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1577 def handle_overflow(self
, inputs
, output
, div_overflow
, inp_ca_ov
):
1578 op
= yield self
.dec2
.e
.do
.insn_type
1579 if op
== MicrOp
.OP_ADD
.value
:
1580 retval
= yield from self
.get_kludged_op_add_ca_ov(
1582 ca
, ca32
, ov
, ov32
= retval
1583 # TODO: if 32-bit mode, set ov to ov32
1584 self
.spr
['XER'][XER_bits
['OV']] = ov
1585 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1586 self
.spr
['XER'][XER_bits
['SO']] |
= ov
1588 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1589 inv_a
= yield self
.dec2
.e
.do
.invert_in
1591 inputs
[0] = ~inputs
[0]
1593 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1595 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1596 inputs
.append(SelectableInt(imm
, 64))
1597 log("handle_overflow", inputs
, output
, div_overflow
)
1598 if len(inputs
) < 2 and div_overflow
is None:
1601 # div overflow is different: it's returned by the pseudo-code
1602 # because it's more complex than can be done by analysing the output
1603 if div_overflow
is not None:
1604 ov
, ov32
= div_overflow
, div_overflow
1605 # arithmetic overflow can be done by analysing the input and output
1606 elif len(inputs
) >= 2:
1608 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1609 output_sgn
= exts(output
.value
, output
.bits
) < 0
1610 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1611 output_sgn
!= input_sgn
[0] else 0
1614 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1615 output32_sgn
= exts(output
.value
, 32) < 0
1616 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1617 output32_sgn
!= input32_sgn
[0] else 0
1619 # now update XER OV/OV32/SO
1620 so
= self
.spr
['XER'][XER_bits
['SO']]
1621 new_so
= so | ov
# sticky overflow ORs in old with new
1622 self
.spr
['XER'][XER_bits
['OV']] = ov
1623 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1624 self
.spr
['XER'][XER_bits
['SO']] = new_so
1625 log(" set overflow", ov
, ov32
, so
, new_so
)
1627 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1628 assert isinstance(out
, SelectableInt
), \
1629 "out zero not a SelectableInt %s" % repr(outputs
)
1630 log("handle_comparison", out
.bits
, hex(out
.value
))
1631 # TODO - XXX *processor* in 32-bit mode
1632 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1634 # o32 = exts(out.value, 32)
1635 # print ("handle_comparison exts 32 bit", hex(o32))
1636 out
= exts(out
.value
, out
.bits
)
1637 log("handle_comparison exts", hex(out
))
1638 # create the three main CR flags, EQ GT LT
1639 zero
= SelectableInt(out
== 0, 1)
1640 positive
= SelectableInt(out
> 0, 1)
1641 negative
= SelectableInt(out
< 0, 1)
1642 # get (or not) XER.SO. for setvl this is important *not* to read SO
1644 SO
= SelectableInt(1, 0)
1646 SO
= self
.spr
['XER'][XER_bits
['SO']]
1647 log("handle_comparison SO", SO
.value
,
1648 "overflow", overflow
,
1650 "+ve", positive
.value
,
1651 "-ve", negative
.value
)
1652 # alternative overflow checking (setvl mainly at the moment)
1653 if overflow
is not None and overflow
== 1:
1654 SO
= SelectableInt(1, 1)
1655 # create the four CR field values and set the required CR field
1656 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1657 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1658 self
.crl
[cr_idx
].eq(cr_field
)
1660 def set_pc(self
, pc_val
):
1661 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1662 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1664 def get_next_insn(self
):
1665 """check instruction
1668 pc
= self
.pc
.CIA
.value
1671 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1673 raise KeyError("no instruction at 0x%x" % pc
)
1676 def setup_one(self
):
1677 """set up one instruction
1679 pc
, insn
= self
.get_next_insn()
1680 yield from self
.setup_next_insn(pc
, insn
)
1682 def setup_next_insn(self
, pc
, ins
):
1683 """set up next instruction
1686 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
1687 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
1689 yield self
.dec2
.sv_rm
.eq(0)
1690 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
1691 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
1692 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
1693 yield self
.dec2
.state
.pc
.eq(pc
)
1694 if self
.svstate
is not None:
1695 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
1697 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
1699 opcode
= yield self
.dec2
.dec
.opcode_in
1700 opcode
= SelectableInt(value
=opcode
, bits
=32)
1701 pfx
= SVP64Instruction
.Prefix(opcode
)
1702 log("prefix test: opcode:", pfx
.PO
, bin(pfx
.PO
), pfx
.id)
1703 self
.is_svp64_mode
= bool((pfx
.PO
== 0b000001) and (pfx
.id == 0b11))
1704 self
.pc
.update_nia(self
.is_svp64_mode
)
1706 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
1707 self
.namespace
['NIA'] = self
.pc
.NIA
1708 self
.namespace
['SVSTATE'] = self
.svstate
1709 if not self
.is_svp64_mode
:
1712 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
1713 log("svp64.rm", bin(pfx
.rm
))
1714 log(" svstate.vl", self
.svstate
.vl
)
1715 log(" svstate.mvl", self
.svstate
.maxvl
)
1716 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
1717 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
1718 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
1719 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
1722 def execute_one(self
):
1723 """execute one instruction
1725 # get the disassembly code for this instruction
1726 if not self
.disassembly
:
1727 code
= yield from self
.get_assembly_name()
1730 if self
.is_svp64_mode
:
1731 offs
, dbg
= 4, "svp64 "
1732 code
= self
.disassembly
[self
._pc
+offs
]
1733 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
1734 opname
= code
.split(' ')[0]
1736 yield from self
.call(opname
) # execute the instruction
1737 except MemException
as e
: # check for memory errors
1738 if e
.args
[0] == 'unaligned': # alignment error
1739 # run a Trap but set DAR first
1740 print("memory unaligned exception, DAR", e
.dar
, repr(e
))
1741 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
1742 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
1744 elif e
.args
[0] == 'invalid': # invalid
1745 # run a Trap but set DAR first
1746 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
1747 if e
.mode
== 'EXECUTE':
1748 # XXX TODO: must set a few bits in SRR1,
1749 # see microwatt loadstore1.vhdl
1750 # if m_in.segerr = '0' then
1751 # v.srr1(47 - 33) := m_in.invalid;
1752 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
1753 # v.srr1(47 - 44) := m_in.badtree;
1754 # v.srr1(47 - 45) := m_in.rc_error;
1755 # v.intr_vec := 16#400#;
1757 # v.intr_vec := 16#480#;
1758 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
1760 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
1762 # not supported yet:
1763 raise e
# ... re-raise
1765 # append to the trace log file
1766 self
.trace(" # %s\n" % code
)
1768 log("gprs after code", code
)
1771 for i
in range(len(self
.crl
)):
1772 crs
.append(bin(self
.crl
[i
].asint()))
1773 log("crs", " ".join(crs
))
1774 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
1776 # don't use this except in special circumstances
1777 if not self
.respect_pc
:
1780 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
1781 hex(self
.pc
.NIA
.value
))
1783 def get_assembly_name(self
):
1784 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1785 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1786 dec_insn
= yield self
.dec2
.e
.do
.insn
1787 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
1788 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1789 int_op
= yield self
.dec2
.dec
.op
.internal_op
1790 log("get assembly name asmcode", asmcode
, int_op
,
1791 hex(dec_insn
), bin(insn_1_11
))
1792 asmop
= insns
.get(asmcode
, None)
1794 # sigh reconstruct the assembly instruction name
1795 if hasattr(self
.dec2
.e
.do
, "oe"):
1796 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
1797 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
1801 if hasattr(self
.dec2
.e
.do
, "rc"):
1802 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
1803 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
1807 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
1808 RC1
= yield self
.dec2
.rm_dec
.RC1
1812 # grrrr have to special-case MUL op (see DecodeOE)
1813 log("ov %d en %d rc %d en %d op %d" %
1814 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
1815 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
1820 if not asmop
.endswith("."): # don't add "." to "andis."
1823 if hasattr(self
.dec2
.e
.do
, "lk"):
1824 lk
= yield self
.dec2
.e
.do
.lk
1827 log("int_op", int_op
)
1828 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
1829 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
1833 spr_msb
= yield from self
.get_spr_msb()
1834 if int_op
== MicrOp
.OP_MFCR
.value
:
1839 # XXX TODO: for whatever weird reason this doesn't work
1840 # https://bugs.libre-soc.org/show_bug.cgi?id=390
1841 if int_op
== MicrOp
.OP_MTCRF
.value
:
1848 def reset_remaps(self
):
1849 self
.remap_loopends
= [0] * 4
1850 self
.remap_idxs
= [0, 1, 2, 3]
1852 def get_remap_indices(self
):
1853 """WARNING, this function stores remap_idxs and remap_loopends
1854 in the class for later use. this to avoid problems with yield
1856 # go through all iterators in lock-step, advance to next remap_idx
1857 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
1858 # get four SVSHAPEs. here we are hard-coding
1860 SVSHAPE0
= self
.spr
['SVSHAPE0']
1861 SVSHAPE1
= self
.spr
['SVSHAPE1']
1862 SVSHAPE2
= self
.spr
['SVSHAPE2']
1863 SVSHAPE3
= self
.spr
['SVSHAPE3']
1864 # set up the iterators
1865 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
1866 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
1867 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
1868 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
1872 for i
, (shape
, remap
) in enumerate(remaps
):
1873 # zero is "disabled"
1874 if shape
.value
== 0x0:
1875 self
.remap_idxs
[i
] = 0
1876 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
1877 step
= dststep
if (i
in [3, 4]) else srcstep
1878 # this is terrible. O(N^2) looking for the match. but hey.
1879 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
1882 self
.remap_idxs
[i
] = remap_idx
1883 self
.remap_loopends
[i
] = loopends
1884 dbg
.append((i
, step
, remap_idx
, loopends
))
1885 for (i
, step
, remap_idx
, loopends
) in dbg
:
1886 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
1889 def get_spr_msb(self
):
1890 dec_insn
= yield self
.dec2
.e
.do
.insn
1891 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
1893 def call(self
, name
):
1894 """call(opcode) - the primary execution point for instructions
1896 self
.last_st_addr
= None # reset the last known store address
1897 self
.last_ld_addr
= None # etc.
1899 ins_name
= name
.strip() # remove spaces if not already done so
1901 log("halted - not executing", ins_name
)
1904 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1905 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1906 asmop
= yield from self
.get_assembly_name()
1907 log("call", ins_name
, asmop
)
1909 # sv.setvl is *not* a loop-function. sigh
1910 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
1913 int_op
= yield self
.dec2
.dec
.op
.internal_op
1914 spr_msb
= yield from self
.get_spr_msb()
1916 instr_is_privileged
= False
1917 if int_op
in [MicrOp
.OP_ATTN
.value
,
1918 MicrOp
.OP_MFMSR
.value
,
1919 MicrOp
.OP_MTMSR
.value
,
1920 MicrOp
.OP_MTMSRD
.value
,
1922 MicrOp
.OP_RFID
.value
]:
1923 instr_is_privileged
= True
1924 if int_op
in [MicrOp
.OP_MFSPR
.value
,
1925 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
1926 instr_is_privileged
= True
1928 log("is priv", instr_is_privileged
, hex(self
.msr
.value
),
1930 # check MSR priv bit and whether op is privileged: if so, throw trap
1931 if instr_is_privileged
and self
.msr
[MSRb
.PR
] == 1:
1932 self
.call_trap(0x700, PIb
.PRIV
)
1935 # check halted condition
1936 if ins_name
== 'attn':
1940 # check illegal instruction
1942 if ins_name
not in ['mtcrf', 'mtocrf']:
1943 illegal
= ins_name
!= asmop
1945 # list of instructions not being supported by binutils (.long)
1946 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
1947 if dotstrp
in [*FPTRANS_INSNS
,
1949 'ffmadds', 'fdmadds', 'ffadds',
1951 'setvl', 'svindex', 'svremap', 'svstep',
1952 'svshape', 'svshape2',
1953 'grev', 'ternlogi', 'bmask', 'cprop',
1954 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
1955 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
1956 "dsld", "dsrd", "maddedus",
1957 "sadd", "saddw", "sadduw",
1958 "fcvttg", "fcvttgo", "fcvttgs", "fcvttgso",
1960 "fcvtfg", "fcvtfgs",
1962 "maddsubrs", "maddrs"
1967 # branch-conditional redirects to sv.bc
1968 if asmop
.startswith('bc') and self
.is_svp64_mode
:
1969 ins_name
= 'sv.%s' % ins_name
1971 # ld-immediate-with-pi mode redirects to ld-with-postinc
1972 ldst_imm_postinc
= False
1973 if 'u' in ins_name
and self
.is_svp64_mode
:
1974 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
1976 ins_name
= ins_name
.replace("u", "up")
1977 ldst_imm_postinc
= True
1978 log(" enable ld/st postinc", ins_name
)
1980 log(" post-processed name", dotstrp
, ins_name
, asmop
)
1982 # illegal instructions call TRAP at 0x700
1984 print("illegal", ins_name
, asmop
)
1985 self
.call_trap(0x700, PIb
.ILLEG
)
1986 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
1987 (ins_name
, asmop
, self
.pc
.CIA
.value
))
1990 # this is for setvl "Vertical" mode: if set true,
1991 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
1992 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
1993 self
.allow_next_step_inc
= False
1994 self
.svstate_next_mode
= 0
1996 # nop has to be supported, we could let the actual op calculate
1997 # but PowerDecoder has a pattern for nop
1998 if ins_name
== 'nop':
1999 self
.update_pc_next()
2002 # get elwidths, defaults to 64
2006 if self
.is_svp64_mode
:
2007 ew_src
= yield self
.dec2
.rm_dec
.ew_src
2008 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
2009 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
2010 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
2011 xlen
= max(ew_src
, ew_dst
)
2012 log("elwdith", ew_src
, ew_dst
)
2013 log("XLEN:", self
.is_svp64_mode
, xlen
)
2015 # look up instruction in ISA.instrs, prepare namespace
2016 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
2017 info
= self
.instrs
[ins_name
+"."]
2018 elif asmop
[-1] == '.' and asmop
in self
.instrs
:
2019 info
= self
.instrs
[asmop
]
2021 info
= self
.instrs
[ins_name
]
2022 yield from self
.prep_namespace(ins_name
, info
.form
, info
.op_fields
,
2025 # preserve order of register names
2026 input_names
= create_args(list(info
.read_regs
) +
2027 list(info
.uninit_regs
))
2028 log("input names", input_names
)
2030 # get SVP64 entry for the current instruction
2031 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
2032 if sv_rm
is not None:
2033 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
2035 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
2036 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
2038 # see if srcstep/dststep need skipping over masked-out predicate bits
2039 # svstep also needs advancement because it calls SVSTATE_NEXT.
2040 # bit the remaps get computed just after pre_inc moves them on
2041 # with remap_set_steps substituting for PowerDecider2 not doing it,
2042 # and SVSTATE_NEXT not being able to.use yield, the preinc on
2043 # svstep is necessary for now.
2045 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
2046 yield from self
.svstate_pre_inc()
2047 if self
.is_svp64_mode
:
2048 pre
= yield from self
.update_new_svstate_steps()
2050 self
.svp64_reset_loop()
2052 self
.update_pc_next()
2054 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2055 pred_dst_zero
= self
.pred_dst_zero
2056 pred_src_zero
= self
.pred_src_zero
2057 vl
= self
.svstate
.vl
2058 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2060 # VL=0 in SVP64 mode means "do nothing: skip instruction"
2061 if self
.is_svp64_mode
and vl
== 0:
2062 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2063 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
2064 self
.namespace
['NIA'], kind
=LogKind
.InstrInOuts
)
2067 # for when SVREMAP is active, using pre-arranged schedule.
2068 # note: modifying PowerDecoder2 needs to "settle"
2069 remap_en
= self
.svstate
.SVme
2070 persist
= self
.svstate
.RMpst
2071 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
2072 if self
.is_svp64_mode
:
2073 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
2075 if persist
or self
.last_op_svshape
:
2076 remaps
= self
.get_remap_indices()
2077 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
2078 yield from self
.remap_set_steps(remaps
)
2079 # after that, settle down (combinatorial) to let Vector reg numbers
2080 # work themselves out
2082 if self
.is_svp64_mode
:
2083 remap_active
= yield self
.dec2
.remap_active
2085 remap_active
= False
2086 log("remap active", bin(remap_active
))
2088 # main input registers (RT, RA ...)
2090 for name
in input_names
:
2091 regval
= (yield from self
.get_input(name
, ew_src
))
2092 log("regval name", name
, regval
)
2093 inputs
.append(regval
)
2095 # arrrrgh, awful hack, to get _RT into namespace
2096 if ins_name
in ['setvl', 'svstep']:
2098 RT
= yield self
.dec2
.dec
.RT
2099 self
.namespace
[regname
] = SelectableInt(RT
, 5)
2101 self
.namespace
["RT"] = SelectableInt(0, 5)
2102 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
2103 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
2105 # in SVP64 mode for LD/ST work out immediate
2106 # XXX TODO: replace_ds for DS-Form rather than D-Form.
2107 # use info.form to detect
2108 if self
.is_svp64_mode
and not ldst_imm_postinc
:
2109 yield from self
.check_replace_d(info
, remap_active
)
2111 # "special" registers
2112 for special
in info
.special_regs
:
2113 if special
in special_sprs
:
2114 inputs
.append(self
.spr
[special
])
2116 inputs
.append(self
.namespace
[special
])
2118 # clear trap (trap) NIA
2119 self
.trap_nia
= None
2121 # check if this was an sv.bc* and create an indicator that
2122 # this is the last check to be made as a loop. combined with
2123 # the ALL/ANY mode we can early-exit
2124 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
2125 no_in_vec
= yield self
.dec2
.no_in_vec
# BI is scalar
2126 end_loop
= no_in_vec
or srcstep
== vl
-1 or dststep
== vl
-1
2127 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
2129 inp_ca_ov
= (self
.spr
['XER'][XER_bits
['CA']].value
,
2130 self
.spr
['XER'][XER_bits
['OV']].value
)
2132 # execute actual instruction here (finally)
2133 log("inputs", inputs
)
2134 results
= info
.func(self
, *inputs
)
2135 output_names
= create_args(info
.write_regs
)
2137 for out
, n
in zip(results
or [], output_names
):
2139 log("results", outs
)
2141 # "inject" decorator takes namespace from function locals: we need to
2142 # overwrite NIA being overwritten (sigh)
2143 if self
.trap_nia
is not None:
2144 self
.namespace
['NIA'] = self
.trap_nia
2146 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
2148 # check if op was a LD/ST so that debugging can check the
2150 if int_op
in [MicrOp
.OP_STORE
.value
,
2152 self
.last_st_addr
= self
.mem
.last_st_addr
2153 if int_op
in [MicrOp
.OP_LOAD
.value
,
2155 self
.last_ld_addr
= self
.mem
.last_ld_addr
2156 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
2157 self
.last_st_addr
, self
.last_ld_addr
)
2159 # detect if CA/CA32 already in outputs (sra*, basically)
2161 ca32
= outs
.get("CA32")
2163 log("carry already done?", ca
, ca32
, output_names
)
2164 carry_en
= yield self
.dec2
.e
.do
.output_carry
2166 yield from self
.handle_carry_(
2167 inputs
, results
[0], ca
, ca32
, inp_ca_ov
=inp_ca_ov
)
2169 # get outout named "overflow" and "CR0"
2170 overflow
= outs
.get('overflow')
2171 cr0
= outs
.get('CR0')
2172 cr1
= outs
.get('CR1')
2174 if not self
.is_svp64_mode
: # yeah just no. not in parallel processing
2175 # detect if overflow was in return result
2176 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2177 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2178 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
2180 yield from self
.handle_overflow(
2181 inputs
, results
[0], overflow
, inp_ca_ov
=inp_ca_ov
)
2183 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
2185 if not self
.is_svp64_mode
or not pred_dst_zero
:
2186 if hasattr(self
.dec2
.e
.do
, "rc"):
2187 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2188 # don't do Rc=1 for svstep it is handled explicitly.
2189 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
2190 # to write directly to CR0 instead of in ISACaller. hooyahh.
2191 if rc_en
and ins_name
not in ['svstep']:
2192 yield from self
.do_rc_ov(
2193 ins_name
, results
[0], overflow
, cr0
, cr1
, output_names
)
2196 ffirst_hit
= False, False
2197 if self
.is_svp64_mode
:
2198 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2199 is_cr
= sv_mode
== SVMode
.CROP
.value
2200 chk
= rc_en
or is_cr
2201 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2203 # check if a FP Exception occurred. TODO for DD-FFirst, check VLi
2204 # and raise the exception *after* if VLi=1 but if VLi=0 then
2205 # truncate and make the exception "disappear".
2206 if self
.FPSCR
.FEX
and (self
.msr
[MSRb
.FE0
] or self
.msr
[MSRb
.FE1
]):
2207 self
.call_trap(0x700, PIb
.FP
)
2210 # any modified return results?
2211 yield from self
.do_outregs_nia(asmop
, ins_name
, info
, outs
,
2212 carry_en
, rc_en
, ffirst_hit
, ew_dst
)
2214 def check_ffirst(self
, info
, rc_en
, srcstep
):
2215 """fail-first mode: checks a bit of Rc Vector, truncates VL
2217 rm_mode
= yield self
.dec2
.rm_dec
.mode
2218 ff_inv
= yield self
.dec2
.rm_dec
.inv
2219 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2220 RC1
= yield self
.dec2
.rm_dec
.RC1
2221 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2222 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2226 log(" cr_bit", cr_bit
)
2227 log(" rc_en", rc_en
)
2228 if not rc_en
or rm_mode
!= SVP64RMMode
.FFIRST
.value
:
2230 # get the CR vevtor, do BO-test
2232 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2233 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2235 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2236 crtest
= self
.crl
[regnum
]
2237 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2238 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2239 log("cr test?", ffirst_hit
)
2242 # Fail-first activated, truncate VL
2243 vli
= SelectableInt(int(vli_
), 7)
2244 self
.svstate
.vl
= srcstep
+ vli
2245 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2246 yield Settle() # let decoder update
2249 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
, cr1
, output_names
):
2250 if ins_name
.startswith("f") and "RT" not in output_names
:
2251 rc_reg
= "CR1" # not calculated correctly yet for FP compares
2254 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2255 # hang on... for `setvl` actually you want to test SVSTATE.VL
2256 is_setvl
= ins_name
in ('svstep', 'setvl')
2258 result
= SelectableInt(result
.vl
, 64)
2260 # overflow = None # do not override overflow except in setvl
2264 cr1
= int(self
.FPSCR
.FX
) << 3
2265 cr1 |
= int(self
.FPSCR
.FEX
) << 2
2266 cr1 |
= int(self
.FPSCR
.VX
) << 1
2267 cr1 |
= int(self
.FPSCR
.OX
)
2268 log("default fp cr1", cr1
)
2270 log("explicit cr1", cr1
)
2271 self
.crl
[regnum
].eq(cr1
)
2273 # if there was not an explicit CR0 in the pseudocode,
2275 self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2277 # otherwise we just blat CR0 into the required regnum
2278 log("explicit rc0", cr0
)
2279 self
.crl
[regnum
].eq(cr0
)
2281 def do_outregs_nia(self
, asmop
, ins_name
, info
, outs
,
2282 ca_en
, rc_en
, ffirst_hit
, ew_dst
):
2283 ffirst_hit
, vli
= ffirst_hit
2284 # write out any regs for this instruction, but only if fail-first is ok
2285 # XXX TODO: allow CR-vector to be written out even if ffirst fails
2286 if not ffirst_hit
or vli
:
2287 for name
, output
in outs
.items():
2288 yield from self
.check_write(info
, name
, output
, ca_en
, ew_dst
)
2289 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2290 # which write directly to CR in the pseudocode (gah, what a mess)
2291 # if ffirst_hit and not vli:
2292 # self.cr.value = self.cr_backup
2295 self
.svp64_reset_loop()
2298 # check advancement of src/dst/sub-steps and if PC needs updating
2299 nia_update
= (yield from self
.check_step_increment(rc_en
,
2302 self
.update_pc_next()
2304 def check_replace_d(self
, info
, remap_active
):
2305 replace_d
= False # update / replace constant in pseudocode
2306 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2307 vl
= self
.svstate
.vl
2308 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2309 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2310 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2311 if info
.form
== 'DS':
2312 # DS-Form, multiply by 4 then knock 2 bits off after
2313 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2315 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2316 imm
= exts(imm
, 16) # sign-extend to integer
2317 # get the right step. LD is from srcstep, ST is dststep
2318 op
= yield self
.dec2
.e
.do
.insn_type
2320 if op
== MicrOp
.OP_LOAD
.value
:
2322 offsmul
= yield self
.dec2
.in1_step
2323 log("D-field REMAP src", imm
, offsmul
, ldstmode
)
2325 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2326 log("D-field src", imm
, offsmul
, ldstmode
)
2327 elif op
== MicrOp
.OP_STORE
.value
:
2328 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2329 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2330 log("D-field dst", imm
, offsmul
, ldstmode
)
2331 # Unit-Strided LD/ST adds offset*width to immediate
2332 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2333 ldst_len
= yield self
.dec2
.e
.do
.data_len
2334 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2336 # Element-strided multiplies the immediate by element step
2337 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2338 imm
= SelectableInt(imm
* offsmul
, 32)
2341 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2342 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2343 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2344 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2345 # new replacement D... errr.. DS
2347 if info
.form
== 'DS':
2348 # TODO: assert 2 LSBs are zero?
2349 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2350 imm
.value
= imm
.value
>> 2
2351 self
.namespace
['DS'] = imm
2353 self
.namespace
['D'] = imm
2355 def get_input(self
, name
, ew_src
):
2356 # using PowerDecoder2, first, find the decoder index.
2357 # (mapping name RA RB RC RS to in1, in2, in3)
2358 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2360 # doing this is not part of svp64, it's because output
2361 # registers, to be modified, need to be in the namespace.
2362 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2364 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2366 if isinstance(regnum
, tuple):
2367 (regnum
, base
, offs
) = regnum
2369 base
, offs
= regnum
, 0 # temporary HACK
2371 # in case getting the register number is needed, _RA, _RB
2372 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2373 regname
= "_" + name
2374 if not self
.is_svp64_mode
or ew_src
== 64:
2375 self
.namespace
[regname
] = regnum
2376 elif regname
in self
.namespace
:
2377 del self
.namespace
[regname
]
2379 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2380 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2382 reg_val
= SelectableInt(self
.fpr(base
, is_vec
, offs
, ew_src
))
2383 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2384 self
.trace("r:FPR:%d:%d:%d " % (base
, offs
, ew_src
))
2385 elif name
is not None:
2386 reg_val
= SelectableInt(self
.gpr(base
, is_vec
, offs
, ew_src
))
2387 self
.trace("r:GPR:%d:%d:%d " % (base
, offs
, ew_src
))
2388 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2390 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2391 reg_val
= SelectableInt(0, ew_src
)
2394 def remap_set_steps(self
, remaps
):
2395 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2396 they work in concert with PowerDecoder2 at the moment,
2397 there is no HDL implementation of REMAP. therefore this
2398 function, because ISACaller still uses PowerDecoder2,
2399 will *explicitly* write the dec2.XX_step values. this has
2402 # just some convenient debug info
2404 sname
= 'SVSHAPE%d' % i
2405 shape
= self
.spr
[sname
]
2406 log(sname
, bin(shape
.value
))
2407 log(" lims", shape
.lims
)
2408 log(" mode", shape
.mode
)
2409 log(" skip", shape
.skip
)
2411 # set up the list of steps to remap
2412 mi0
= self
.svstate
.mi0
2413 mi1
= self
.svstate
.mi1
2414 mi2
= self
.svstate
.mi2
2415 mo0
= self
.svstate
.mo0
2416 mo1
= self
.svstate
.mo1
2417 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2418 [self
.dec2
.in2_step
, mi1
], # RB
2419 [self
.dec2
.in3_step
, mi2
], # RC
2420 [self
.dec2
.o_step
, mo0
], # RT
2421 [self
.dec2
.o2_step
, mo1
], # EA
2424 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2425 for i
, reg
in enumerate(rnames
):
2426 idx
= yield from get_idx_map(self
.dec2
, reg
)
2428 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2430 steps
[i
][0] = self
.dec2
.in1_step
2432 steps
[i
][0] = self
.dec2
.in2_step
2434 steps
[i
][0] = self
.dec2
.in3_step
2435 log("remap step", i
, reg
, idx
, steps
[i
][1])
2436 remap_idxs
= self
.remap_idxs
2438 # now cross-index the required SHAPE for each of 3-in 2-out regs
2439 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2440 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2441 (shape
, remap
) = remaps
[shape_idx
]
2442 remap_idx
= remap_idxs
[shape_idx
]
2443 # zero is "disabled"
2444 if shape
.value
== 0x0:
2446 # now set the actual requested step to the current index
2447 if dstep
is not None:
2448 yield dstep
.eq(remap_idx
)
2450 # debug printout info
2451 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2452 i
, rnames
[i
], shape_idx
, remap_idx
))
2454 log("shape remap", x
)
2456 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2457 if name
== 'overflow': # ignore, done already (above)
2459 if name
== 'CR0': # ignore, done already (above)
2461 if isinstance(output
, int):
2462 output
= SelectableInt(output
, EFFECTIVELY_UNLIMITED
)
2464 if name
in ['FPSCR', ]:
2465 log("write FPSCR 0x%x" % (output
.value
))
2466 self
.FPSCR
.eq(output
)
2469 if name
in ['CA', 'CA32']:
2471 log("writing %s to XER" % name
, output
)
2472 log("write XER %s 0x%x" % (name
, output
.value
))
2473 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2475 log("NOT writing %s to XER" % name
, output
)
2477 # write special SPRs
2478 if name
in info
.special_regs
:
2479 log('writing special %s' % name
, output
, special_sprs
)
2480 log("write reg %s 0x%x" % (name
, output
.value
))
2481 if name
in special_sprs
:
2482 self
.spr
[name
] = output
2484 self
.namespace
[name
].eq(output
)
2486 log('msr written', hex(self
.msr
.value
))
2488 # find out1/out2 PR/FPR
2489 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2491 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2493 # temporary hack for not having 2nd output
2494 regnum
= yield getattr(self
.decoder
, name
)
2496 # convenient debug prefix
2501 # check zeroing due to predicate bit being zero
2502 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2503 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2504 output
= SelectableInt(0, EFFECTIVELY_UNLIMITED
)
2505 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2506 output
.value
, ew_dst
),
2507 kind
=LogKind
.InstrInOuts
)
2508 # zero-extend tov64 bit begore storing (should use EXT oh well)
2509 if output
.bits
> 64:
2510 output
= SelectableInt(output
.value
, 64)
2511 rnum
, base
, offset
= regnum
2513 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2514 self
.trace("w:FPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2516 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2517 self
.trace("w:GPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2519 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2520 # check if it is the SVSTATE.src/dest step that needs incrementing
2521 # this is our Sub-Program-Counter loop from 0 to VL-1
2522 if not self
.allow_next_step_inc
:
2523 if self
.is_svp64_mode
:
2524 return (yield from self
.svstate_post_inc(ins_name
))
2526 # XXX only in non-SVP64 mode!
2527 # record state of whether the current operation was an svshape,
2529 # to be able to know if it should apply in the next instruction.
2530 # also (if going to use this instruction) should disable ability
2531 # to interrupt in between. sigh.
2532 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2539 log("SVSTATE_NEXT: inc requested, mode",
2540 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2541 yield from self
.svstate_pre_inc()
2542 pre
= yield from self
.update_new_svstate_steps()
2544 # reset at end of loop including exit Vertical Mode
2545 log("SVSTATE_NEXT: end of loop, reset")
2546 self
.svp64_reset_loop()
2547 self
.svstate
.vfirst
= 0
2551 self
.handle_comparison(SelectableInt(0, 64)) # CR0
2553 if self
.allow_next_step_inc
== 2:
2554 log("SVSTATE_NEXT: read")
2555 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
2557 log("SVSTATE_NEXT: post-inc")
2558 # use actual (cached) src/dst-step here to check end
2559 remaps
= self
.get_remap_indices()
2560 remap_idxs
= self
.remap_idxs
2561 vl
= self
.svstate
.vl
2562 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2563 if self
.allow_next_step_inc
!= 2:
2564 yield from self
.advance_svstate_steps()
2565 #self.namespace['SVSTATE'] = self.svstate.spr
2566 # set CR0 (if Rc=1) based on end
2567 endtest
= 1 if self
.at_loopend() else 0
2569 #results = [SelectableInt(endtest, 64)]
2570 # self.handle_comparison(results) # CR0
2572 # see if svstep was requested, if so, which SVSTATE
2574 if self
.svstate_next_mode
> 0:
2575 shape_idx
= self
.svstate_next_mode
.value
-1
2576 endings
= self
.remap_loopends
[shape_idx
]
2577 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
2578 log("svstep Rc=1, CR0", cr_field
, endtest
)
2579 self
.crl
[0].eq(cr_field
) # CR0
2581 # reset at end of loop including exit Vertical Mode
2582 log("SVSTATE_NEXT: after increments, reset")
2583 self
.svp64_reset_loop()
2584 self
.svstate
.vfirst
= 0
2587 def SVSTATE_NEXT(self
, mode
, submode
):
2588 """explicitly moves srcstep/dststep on to next element, for
2589 "Vertical-First" mode. this function is called from
2590 setvl pseudo-code, as a pseudo-op "svstep"
2592 WARNING: this function uses information that was created EARLIER
2593 due to it being in the middle of a yield, but this function is
2594 *NOT* called from yield (it's called from compiled pseudocode).
2596 self
.allow_next_step_inc
= submode
.value
+ 1
2597 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
2598 self
.svstate_next_mode
= mode
2599 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
2600 shape_idx
= self
.svstate_next_mode
.value
-1
2601 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
2602 if self
.svstate_next_mode
== 5:
2603 self
.svstate_next_mode
= 0
2604 return SelectableInt(self
.svstate
.srcstep
, 7)
2605 if self
.svstate_next_mode
== 6:
2606 self
.svstate_next_mode
= 0
2607 return SelectableInt(self
.svstate
.dststep
, 7)
2608 if self
.svstate_next_mode
== 7:
2609 self
.svstate_next_mode
= 0
2610 return SelectableInt(self
.svstate
.ssubstep
, 7)
2611 if self
.svstate_next_mode
== 8:
2612 self
.svstate_next_mode
= 0
2613 return SelectableInt(self
.svstate
.dsubstep
, 7)
2614 return SelectableInt(0, 7)
2616 def get_src_dststeps(self
):
2617 """gets srcstep, dststep, and ssubstep, dsubstep
2619 return (self
.new_srcstep
, self
.new_dststep
,
2620 self
.new_ssubstep
, self
.new_dsubstep
)
2622 def update_svstate_namespace(self
, overwrite_svstate
=True):
2623 if overwrite_svstate
:
2624 # note, do not get the bit-reversed srcstep here!
2625 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2626 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2628 # update SVSTATE with new srcstep
2629 self
.svstate
.srcstep
= srcstep
2630 self
.svstate
.dststep
= dststep
2631 self
.svstate
.ssubstep
= ssubstep
2632 self
.svstate
.dsubstep
= dsubstep
2633 self
.namespace
['SVSTATE'] = self
.svstate
2634 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2635 yield Settle() # let decoder update
2637 def update_new_svstate_steps(self
, overwrite_svstate
=True):
2638 yield from self
.update_svstate_namespace(overwrite_svstate
)
2639 srcstep
= self
.svstate
.srcstep
2640 dststep
= self
.svstate
.dststep
2641 ssubstep
= self
.svstate
.ssubstep
2642 dsubstep
= self
.svstate
.dsubstep
2643 pack
= self
.svstate
.pack
2644 unpack
= self
.svstate
.unpack
2645 vl
= self
.svstate
.vl
2646 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2647 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2648 rm_mode
= yield self
.dec2
.rm_dec
.mode
2649 ff_inv
= yield self
.dec2
.rm_dec
.inv
2650 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2651 log(" srcstep", srcstep
)
2652 log(" dststep", dststep
)
2654 log(" unpack", unpack
)
2655 log(" ssubstep", ssubstep
)
2656 log(" dsubstep", dsubstep
)
2658 log(" subvl", subvl
)
2659 log(" rm_mode", rm_mode
)
2660 log(" sv_mode", sv_mode
)
2662 log(" cr_bit", cr_bit
)
2664 # check if end reached (we let srcstep overrun, above)
2665 # nothing needs doing (TODO zeroing): just do next instruction
2668 return ((ssubstep
== subvl
and srcstep
== vl
) or
2669 (dsubstep
== subvl
and dststep
== vl
))
2671 def svstate_post_inc(self
, insn_name
, vf
=0):
2672 # check if SV "Vertical First" mode is enabled
2673 vfirst
= self
.svstate
.vfirst
2674 log(" SV Vertical First", vf
, vfirst
)
2675 if not vf
and vfirst
== 1:
2679 # check if it is the SVSTATE.src/dest step that needs incrementing
2680 # this is our Sub-Program-Counter loop from 0 to VL-1
2681 # XXX twin predication TODO
2682 vl
= self
.svstate
.vl
2683 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2684 mvl
= self
.svstate
.maxvl
2685 srcstep
= self
.svstate
.srcstep
2686 dststep
= self
.svstate
.dststep
2687 ssubstep
= self
.svstate
.ssubstep
2688 dsubstep
= self
.svstate
.dsubstep
2689 pack
= self
.svstate
.pack
2690 unpack
= self
.svstate
.unpack
2691 rm_mode
= yield self
.dec2
.rm_dec
.mode
2692 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
2693 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
2694 out_vec
= not (yield self
.dec2
.no_out_vec
)
2695 in_vec
= not (yield self
.dec2
.no_in_vec
)
2696 log(" svstate.vl", vl
)
2697 log(" svstate.mvl", mvl
)
2698 log(" rm.subvl", subvl
)
2699 log(" svstate.srcstep", srcstep
)
2700 log(" svstate.dststep", dststep
)
2701 log(" svstate.ssubstep", ssubstep
)
2702 log(" svstate.dsubstep", dsubstep
)
2703 log(" svstate.pack", pack
)
2704 log(" svstate.unpack", unpack
)
2705 log(" mode", rm_mode
)
2706 log(" reverse", reverse_gear
)
2707 log(" out_vec", out_vec
)
2708 log(" in_vec", in_vec
)
2709 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPType
.P2
.value
)
2710 # check if this was an sv.bc* and if so did it succeed
2711 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
2712 end_loop
= self
.namespace
['end_loop']
2713 log("branch %s end_loop" % insn_name
, end_loop
)
2715 self
.svp64_reset_loop()
2716 self
.update_pc_next()
2718 # check if srcstep needs incrementing by one, stop PC advancing
2719 # but for 2-pred both src/dest have to be checked.
2720 # XXX this might not be true! it may just be LD/ST
2721 if sv_ptype
== SVPType
.P2
.value
:
2722 svp64_is_vector
= (out_vec
or in_vec
)
2724 svp64_is_vector
= out_vec
2725 # loops end at the first "hit" (source or dest)
2726 yield from self
.advance_svstate_steps()
2727 loopend
= self
.loopend
2728 log("loopend", svp64_is_vector
, loopend
)
2729 if not svp64_is_vector
or loopend
:
2730 # reset loop to zero and update NIA
2731 self
.svp64_reset_loop()
2736 # still looping, advance and update NIA
2737 self
.namespace
['SVSTATE'] = self
.svstate
2739 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
2740 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
2741 # this way we keep repeating the same instruction (with new steps)
2742 self
.pc
.NIA
.value
= self
.pc
.CIA
.value
2743 self
.namespace
['NIA'] = self
.pc
.NIA
2744 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
2745 return False # DO NOT allow PC update whilst Sub-PC loop running
2747 def update_pc_next(self
):
2748 # UPDATE program counter
2749 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2750 #self.svstate.spr = self.namespace['SVSTATE']
2751 log("end of call", self
.namespace
['CIA'],
2752 self
.namespace
['NIA'],
2753 self
.namespace
['SVSTATE'])
2755 def svp64_reset_loop(self
):
2756 self
.svstate
.srcstep
= 0
2757 self
.svstate
.dststep
= 0
2758 self
.svstate
.ssubstep
= 0
2759 self
.svstate
.dsubstep
= 0
2760 self
.loopend
= False
2761 log(" svstate.srcstep loop end (PC to update)")
2762 self
.namespace
['SVSTATE'] = self
.svstate
2764 def update_nia(self
):
2765 self
.pc
.update_nia(self
.is_svp64_mode
)
2766 self
.namespace
['NIA'] = self
.pc
.NIA
2770 """Decorator factory.
2772 this decorator will "inject" variables into the function's namespace,
2773 from the *dictionary* in self.namespace. it therefore becomes possible
2774 to make it look like a whole stack of variables which would otherwise
2775 need "self." inserted in front of them (*and* for those variables to be
2776 added to the instance) "appear" in the function.
2778 "self.namespace['SI']" for example becomes accessible as just "SI" but
2779 *only* inside the function, when decorated.
2781 def variable_injector(func
):
2783 def decorator(*args
, **kwargs
):
2785 func_globals
= func
.__globals
__ # Python 2.6+
2786 except AttributeError:
2787 func_globals
= func
.func_globals
# Earlier versions.
2789 context
= args
[0].namespace
# variables to be injected
2790 saved_values
= func_globals
.copy() # Shallow copy of dict.
2791 log("globals before", context
.keys())
2792 func_globals
.update(context
)
2793 result
= func(*args
, **kwargs
)
2794 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
2795 log("args[0]", args
[0].namespace
['CIA'],
2796 args
[0].namespace
['NIA'],
2797 args
[0].namespace
['SVSTATE'])
2798 if 'end_loop' in func_globals
:
2799 log("args[0] end_loop", func_globals
['end_loop'])
2800 args
[0].namespace
= func_globals
2801 #exec (func.__code__, func_globals)
2804 # func_globals = saved_values # Undo changes.
2810 return variable_injector