1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
20 from nmigen
.sim
import Settle
21 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
22 SVP64CROffs
, SVP64MODEb
)
23 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
25 from openpower
.decoder
.isa
.mem
import Mem
, MemException
26 from openpower
.decoder
.isa
.radixmmu
import RADIX
27 from openpower
.decoder
.isa
.svshape
import SVSHAPE
28 from openpower
.decoder
.isa
.svstate
import SVP64State
29 from openpower
.decoder
.orderedset
import OrderedSet
30 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
31 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
32 MicrOp
, OutSel
, SVMode
,
33 SVP64LDSTmode
, SVP64PredCR
,
34 SVP64PredInt
, SVP64PredMode
,
35 SVP64RMMode
, SVPType
, XER_bits
,
36 insns
, spr_byname
, spr_dict
,
38 from openpower
.insndb
.core
import SVP64Instruction
39 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
40 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
41 SelectableInt
, selectconcat
,
42 EFFECTIVELY_UNLIMITED
)
43 from openpower
.fpscr
import FPSCRState
44 from openpower
.xer
import XERState
45 from openpower
.util
import LogKind
, log
47 LDST_UPDATE_INSNS
= ['ldu', 'lwzu', 'lbzu', 'lhzu', 'lhau', 'lfsu', 'lfdu',
48 'stwu', 'stbu', 'sthu', 'stfsu', 'stfdu', 'stdu',
52 instruction_info
= namedtuple('instruction_info',
53 'func read_regs uninit_regs write_regs ' +
54 'special_regs op_fields form asmregs')
64 # rrright. this is here basically because the compiler pywriter returns
65 # results in a specific priority order. to make sure regs match up they
66 # need partial sorting. sigh.
68 # TODO (lkcl): adjust other registers that should be in a particular order
69 # probably CA, CA32, and CR
97 "overflow": 7, # should definitely be last
101 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
104 def get_masked_reg(regs
, base
, offs
, ew_bits
):
105 # rrrright. start by breaking down into row/col, based on elwidth
106 gpr_offs
= offs
// (64 // ew_bits
)
107 gpr_col
= offs
% (64 // ew_bits
)
108 # compute the mask based on ew_bits
109 mask
= (1 << ew_bits
) - 1
110 # now select the 64-bit register, but get its value (easier)
111 val
= regs
[base
+ gpr_offs
]
112 # shift down so element we want is at LSB
113 val
>>= gpr_col
* ew_bits
114 # mask so we only return the LSB element
118 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
119 # rrrright. start by breaking down into row/col, based on elwidth
120 gpr_offs
= offs
// (64//ew_bits
)
121 gpr_col
= offs
% (64//ew_bits
)
122 # compute the mask based on ew_bits
123 mask
= (1 << ew_bits
)-1
124 # now select the 64-bit register, but get its value (easier)
125 val
= regs
[base
+gpr_offs
]
126 # now mask out the bit we don't want
127 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
128 # then wipe the bit we don't want from the value
130 # OR the new value in, shifted up
131 val |
= value
<< (gpr_col
*ew_bits
)
132 regs
[base
+gpr_offs
] = val
135 def create_args(reglist
, extra
=None):
136 retval
= list(OrderedSet(reglist
))
137 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
138 if extra
is not None:
139 return [extra
] + retval
144 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
147 self
.isacaller
= isacaller
148 self
.svstate
= svstate
149 for i
in range(len(regfile
)):
150 self
[i
] = SelectableInt(regfile
[i
], 64)
152 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
153 if isinstance(ridx
, SelectableInt
):
156 return self
[ridx
+offs
]
157 # rrrright. start by breaking down into row/col, based on elwidth
158 gpr_offs
= offs
// (64//elwidth
)
159 gpr_col
= offs
% (64//elwidth
)
160 # now select the 64-bit register, but get its value (easier)
161 val
= self
[ridx
+gpr_offs
].value
162 # now shift down and mask out
163 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
164 # finally, return a SelectableInt at the required elwidth
165 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
166 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
167 return SelectableInt(val
, elwidth
)
169 def set_form(self
, form
):
172 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
174 if isinstance(rnum
, SelectableInt
):
176 if isinstance(value
, SelectableInt
):
179 if isinstance(rnum
, tuple):
180 rnum
, base
, offs
= rnum
183 # rrrright. start by breaking down into row/col, based on elwidth
184 gpr_offs
= offs
// (64//elwidth
)
185 gpr_col
= offs
% (64//elwidth
)
186 # compute the mask based on elwidth
187 mask
= (1 << elwidth
)-1
188 # now select the 64-bit register, but get its value (easier)
189 val
= self
[base
+gpr_offs
].value
190 # now mask out the bit we don't want
191 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
192 # then wipe the bit we don't want from the value
194 # OR the new value in, shifted up
195 val |
= value
<< (gpr_col
*elwidth
)
196 # finally put the damn value into the regfile
197 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
198 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
200 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
202 def __setitem__(self
, rnum
, value
):
203 # rnum = rnum.value # only SelectableInt allowed
204 log("GPR setitem", rnum
, value
)
205 if isinstance(rnum
, SelectableInt
):
207 dict.__setitem
__(self
, rnum
, value
)
209 def getz(self
, rnum
):
210 # rnum = rnum.value # only SelectableInt allowed
211 log("GPR getzero?", rnum
)
213 return SelectableInt(0, 64)
216 def _get_regnum(self
, attr
):
217 getform
= self
.sd
.sigforms
[self
.form
]
218 rnum
= getattr(getform
, attr
)
221 def ___getitem__(self
, attr
):
222 """ XXX currently not used
224 rnum
= self
._get
_regnum
(attr
)
225 log("GPR getitem", attr
, rnum
)
226 return self
.regfile
[rnum
]
228 def dump(self
, printout
=True):
230 for i
in range(len(self
)):
231 res
.append(self
[i
].value
)
233 for i
in range(0, len(res
), 8):
236 s
.append("%08x" % res
[i
+j
])
238 print("reg", "%2d" % i
, s
)
243 def __init__(self
, dec2
, initial_sprs
={}):
246 for key
, v
in initial_sprs
.items():
247 if isinstance(key
, SelectableInt
):
249 key
= special_sprs
.get(key
, key
)
250 if isinstance(key
, int):
253 info
= spr_byname
[key
]
254 if not isinstance(v
, SelectableInt
):
255 v
= SelectableInt(v
, info
.length
)
258 def __getitem__(self
, key
):
260 #log("dict", self.items())
261 # if key in special_sprs get the special spr, otherwise return key
262 if isinstance(key
, SelectableInt
):
264 if isinstance(key
, int):
265 key
= spr_dict
[key
].SPR
266 key
= special_sprs
.get(key
, key
)
267 if key
== 'HSRR0': # HACK!
269 if key
== 'HSRR1': # HACK!
272 res
= dict.__getitem
__(self
, key
)
274 if isinstance(key
, int):
277 info
= spr_byname
[key
]
278 self
[key
] = SelectableInt(0, info
.length
)
279 res
= dict.__getitem
__(self
, key
)
280 #log("spr returning", key, res)
283 def __setitem__(self
, key
, value
):
284 if isinstance(key
, SelectableInt
):
286 if isinstance(key
, int):
287 key
= spr_dict
[key
].SPR
289 key
= special_sprs
.get(key
, key
)
290 if key
== 'HSRR0': # HACK!
291 self
.__setitem
__('SRR0', value
)
292 if key
== 'HSRR1': # HACK!
293 self
.__setitem
__('SRR1', value
)
295 value
= XERState(value
)
296 log("setting spr", key
, value
)
297 dict.__setitem
__(self
, key
, value
)
299 def __call__(self
, ridx
):
302 def dump(self
, printout
=True):
304 keys
= list(self
.keys())
307 sprname
= spr_dict
.get(k
, None)
311 sprname
= sprname
.SPR
312 res
.append((sprname
, self
[k
].value
))
314 for sprname
, value
in res
:
315 print(" ", sprname
, hex(value
))
320 def __init__(self
, pc_init
=0):
321 self
.CIA
= SelectableInt(pc_init
, 64)
322 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
324 def update_nia(self
, is_svp64
):
325 increment
= 8 if is_svp64
else 4
326 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
328 def update(self
, namespace
, is_svp64
):
329 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
331 self
.CIA
= namespace
['NIA'].narrow(64)
332 self
.update_nia(is_svp64
)
333 namespace
['CIA'] = self
.CIA
334 namespace
['NIA'] = self
.NIA
338 # See PowerISA Version 3.0 B Book 1
339 # Section 2.3.1 Condition Register pages 30 - 31
341 LT
= FL
= 0 # negative, less than, floating-point less than
342 GT
= FG
= 1 # positive, greater than, floating-point greater than
343 EQ
= FE
= 2 # equal, floating-point equal
344 SO
= FU
= 3 # summary overflow, floating-point unordered
346 def __init__(self
, init
=0):
347 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
348 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
349 self
.cr
= SelectableInt(init
, 64) # underlying reg
350 # field-selectable versions of Condition Register TODO check bitranges?
353 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
354 _cr
= FieldSelectableInt(self
.cr
, bits
)
358 # decode SVP64 predicate integer to reg number and invert
359 def get_predint(gpr
, mask
):
363 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
364 if mask
== SVP64PredInt
.ALWAYS
.value
:
365 return 0xffff_ffff_ffff_ffff # 64 bits of 1
366 if mask
== SVP64PredInt
.R3_UNARY
.value
:
367 return 1 << (r3
.value
& 0b111111)
368 if mask
== SVP64PredInt
.R3
.value
:
370 if mask
== SVP64PredInt
.R3_N
.value
:
372 if mask
== SVP64PredInt
.R10
.value
:
374 if mask
== SVP64PredInt
.R10_N
.value
:
376 if mask
== SVP64PredInt
.R30
.value
:
378 if mask
== SVP64PredInt
.R30_N
.value
:
382 # decode SVP64 predicate CR to reg number and invert status
383 def _get_predcr(mask
):
384 if mask
== SVP64PredCR
.LT
.value
:
386 if mask
== SVP64PredCR
.GE
.value
:
388 if mask
== SVP64PredCR
.GT
.value
:
390 if mask
== SVP64PredCR
.LE
.value
:
392 if mask
== SVP64PredCR
.EQ
.value
:
394 if mask
== SVP64PredCR
.NE
.value
:
396 if mask
== SVP64PredCR
.SO
.value
:
398 if mask
== SVP64PredCR
.NS
.value
:
402 # read individual CR fields (0..VL-1), extract the required bit
403 # and construct the mask
404 def get_predcr(crl
, mask
, vl
):
405 idx
, noninv
= _get_predcr(mask
)
408 cr
= crl
[i
+SVP64CROffs
.CRPred
]
409 if cr
[idx
].value
== noninv
:
414 # TODO, really should just be using PowerDecoder2
415 def get_idx_map(dec2
, name
):
417 in1_sel
= yield op
.in1_sel
418 in2_sel
= yield op
.in2_sel
419 in3_sel
= yield op
.in3_sel
420 in1
= yield dec2
.e
.read_reg1
.data
421 # identify which regnames map to in1/2/3
422 if name
== 'RA' or name
== 'RA_OR_ZERO':
423 if (in1_sel
== In1Sel
.RA
.value
or
424 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
426 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
429 if in2_sel
== In2Sel
.RB
.value
:
431 if in3_sel
== In3Sel
.RB
.value
:
433 # XXX TODO, RC doesn't exist yet!
435 if in3_sel
== In3Sel
.RC
.value
:
437 elif name
in ['EA', 'RS']:
438 if in1_sel
== In1Sel
.RS
.value
:
440 if in2_sel
== In2Sel
.RS
.value
:
442 if in3_sel
== In3Sel
.RS
.value
:
445 if in1_sel
== In1Sel
.FRA
.value
:
447 if in3_sel
== In3Sel
.FRA
.value
:
450 if in2_sel
== In2Sel
.FRB
.value
:
453 if in3_sel
== In3Sel
.FRC
.value
:
456 if in1_sel
== In1Sel
.FRS
.value
:
458 if in3_sel
== In3Sel
.FRS
.value
:
461 if in1_sel
== In1Sel
.FRT
.value
:
464 if in1_sel
== In1Sel
.RT
.value
:
469 # TODO, really should just be using PowerDecoder2
470 def get_idx_in(dec2
, name
, ewmode
=False):
471 idx
= yield from get_idx_map(dec2
, name
)
475 in1_sel
= yield op
.in1_sel
476 in2_sel
= yield op
.in2_sel
477 in3_sel
= yield op
.in3_sel
478 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
479 in1
= yield dec2
.e
.read_reg1
.data
480 in2
= yield dec2
.e
.read_reg2
.data
481 in3
= yield dec2
.e
.read_reg3
.data
483 in1_base
= yield dec2
.e
.read_reg1
.base
484 in2_base
= yield dec2
.e
.read_reg2
.base
485 in3_base
= yield dec2
.e
.read_reg3
.base
486 in1_offs
= yield dec2
.e
.read_reg1
.offs
487 in2_offs
= yield dec2
.e
.read_reg2
.offs
488 in3_offs
= yield dec2
.e
.read_reg3
.offs
489 in1
= (in1
, in1_base
, in1_offs
)
490 in2
= (in2
, in2_base
, in2_offs
)
491 in3
= (in3
, in3_base
, in3_offs
)
493 in1_isvec
= yield dec2
.in1_isvec
494 in2_isvec
= yield dec2
.in2_isvec
495 in3_isvec
= yield dec2
.in3_isvec
496 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
498 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
500 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
502 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
504 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
506 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
509 return in1
, in1_isvec
511 return in2
, in2_isvec
513 return in3
, in3_isvec
517 # TODO, really should just be using PowerDecoder2
518 def get_cr_in(dec2
, name
):
520 in_sel
= yield op
.cr_in
521 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
522 sv_cr_in
= yield op
.sv_cr_in
523 spec
= yield dec2
.crin_svdec
.spec
524 sv_override
= yield dec2
.dec_cr_in
.sv_override
525 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
526 in1
= yield dec2
.e
.read_cr1
.data
527 cr_isvec
= yield dec2
.cr_in_isvec
528 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
529 log(" sv_cr_in", sv_cr_in
)
530 log(" cr_bf", in_bitfield
)
532 log(" override", sv_override
)
533 # identify which regnames map to in / o2
535 if in_sel
== CRInSel
.BI
.value
:
537 log("get_cr_in not found", name
)
541 # TODO, really should just be using PowerDecoder2
542 def get_cr_out(dec2
, name
):
544 out_sel
= yield op
.cr_out
545 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
546 sv_cr_out
= yield op
.sv_cr_out
547 spec
= yield dec2
.crout_svdec
.spec
548 sv_override
= yield dec2
.dec_cr_out
.sv_override
549 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
550 out
= yield dec2
.e
.write_cr
.data
551 o_isvec
= yield dec2
.cr_out_isvec
552 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
553 log(" sv_cr_out", sv_cr_out
)
554 log(" cr_bf", out_bitfield
)
556 log(" override", sv_override
)
557 # identify which regnames map to out / o2
559 if out_sel
== CROutSel
.BF
.value
:
562 if out_sel
== CROutSel
.CR0
.value
:
564 if name
== 'CR1': # these are not actually calculated correctly
565 if out_sel
== CROutSel
.CR1
.value
:
567 # check RC1 set? if so return implicit vector, this is a REAL bad hack
568 RC1
= yield dec2
.rm_dec
.RC1
570 log("get_cr_out RC1 mode")
572 return 0, True # XXX TODO: offset CR0 from SVSTATE SPR
574 return 1, True # XXX TODO: offset CR1 from SVSTATE SPR
576 log("get_cr_out not found", name
)
580 # TODO, really should just be using PowerDecoder2
581 def get_out_map(dec2
, name
):
583 out_sel
= yield op
.out_sel
584 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
585 out
= yield dec2
.e
.write_reg
.data
586 # identify which regnames map to out / o2
588 if out_sel
== OutSel
.RA
.value
:
591 if out_sel
== OutSel
.RT
.value
:
593 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
595 elif name
== 'RT_OR_ZERO':
596 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
599 if out_sel
== OutSel
.FRA
.value
:
602 if out_sel
== OutSel
.FRS
.value
:
605 if out_sel
== OutSel
.FRT
.value
:
610 # TODO, really should just be using PowerDecoder2
611 def get_idx_out(dec2
, name
, ewmode
=False):
613 out_sel
= yield op
.out_sel
614 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
615 out
= yield dec2
.e
.write_reg
.data
616 o_isvec
= yield dec2
.o_isvec
618 offs
= yield dec2
.e
.write_reg
.offs
619 base
= yield dec2
.e
.write_reg
.base
620 out
= (out
, base
, offs
)
621 # identify which regnames map to out / o2
622 ismap
= yield from get_out_map(dec2
, name
)
624 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
626 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
630 # TODO, really should just be using PowerDecoder2
631 def get_out2_map(dec2
, name
):
632 # check first if register is activated for write
634 out_sel
= yield op
.out_sel
635 out
= yield dec2
.e
.write_ea
.data
636 out_ok
= yield dec2
.e
.write_ea
.ok
640 if name
in ['EA', 'RA']:
641 if hasattr(op
, "upd"):
642 # update mode LD/ST uses read-reg A also as an output
644 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
645 out_sel
, OutSel
.RA
.value
,
647 if upd
== LDSTMode
.update
.value
:
650 fft_en
= yield dec2
.implicit_rs
652 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
656 fft_en
= yield dec2
.implicit_rs
658 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
664 # TODO, really should just be using PowerDecoder2
665 def get_idx_out2(dec2
, name
, ewmode
=False):
666 # check first if register is activated for write
668 out_sel
= yield op
.out_sel
669 out
= yield dec2
.e
.write_ea
.data
671 offs
= yield dec2
.e
.write_ea
.offs
672 base
= yield dec2
.e
.write_ea
.base
673 out
= (out
, base
, offs
)
674 o_isvec
= yield dec2
.o2_isvec
675 ismap
= yield from get_out2_map(dec2
, name
)
677 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
683 """deals with svstate looping.
686 def __init__(self
, svstate
):
687 self
.svstate
= svstate
690 def new_iterators(self
):
691 self
.src_it
= self
.src_iterator()
692 self
.dst_it
= self
.dst_iterator()
696 self
.new_ssubstep
= 0
697 self
.new_dsubstep
= 0
698 self
.pred_dst_zero
= 0
699 self
.pred_src_zero
= 0
701 def src_iterator(self
):
702 """source-stepping iterator
704 pack
= self
.svstate
.pack
708 # pack advances subvl in *outer* loop
709 while True: # outer subvl loop
710 while True: # inner vl loop
713 srcmask
= self
.srcmask
714 srcstep
= self
.svstate
.srcstep
715 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
716 if self
.pred_sz
or pred_src_zero
:
717 self
.pred_src_zero
= not pred_src_zero
718 log(" advance src", srcstep
, vl
,
719 self
.svstate
.ssubstep
, subvl
)
720 # yield actual substep/srcstep
721 yield (self
.svstate
.ssubstep
, srcstep
)
722 # the way yield works these could have been modified.
725 srcstep
= self
.svstate
.srcstep
726 log(" advance src check", srcstep
, vl
,
727 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
728 self
.svstate
.ssubstep
== subvl
)
729 if srcstep
== vl
-1: # end-point
730 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
731 if self
.svstate
.ssubstep
== subvl
: # end-point
732 log(" advance pack stop")
734 break # exit inner loop
735 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
737 if self
.svstate
.ssubstep
== subvl
: # end-point
738 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
739 log(" advance pack stop")
741 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
744 # these cannot be done as for-loops because SVSTATE may change
745 # (srcstep/substep may be modified, interrupted, subvl/vl change)
746 # but they *can* be done as while-loops as long as every SVSTATE
747 # "thing" is re-read every single time a yield gives indices
748 while True: # outer vl loop
749 while True: # inner subvl loop
752 srcmask
= self
.srcmask
753 srcstep
= self
.svstate
.srcstep
754 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
755 if self
.pred_sz
or pred_src_zero
:
756 self
.pred_src_zero
= not pred_src_zero
757 log(" advance src", srcstep
, vl
,
758 self
.svstate
.ssubstep
, subvl
)
759 # yield actual substep/srcstep
760 yield (self
.svstate
.ssubstep
, srcstep
)
761 if self
.svstate
.ssubstep
== subvl
: # end-point
762 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
763 break # exit inner loop
764 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
766 if srcstep
== vl
-1: # end-point
767 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
770 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
772 def dst_iterator(self
):
773 """dest-stepping iterator
775 unpack
= self
.svstate
.unpack
779 # pack advances subvl in *outer* loop
780 while True: # outer subvl loop
781 while True: # inner vl loop
784 dstmask
= self
.dstmask
785 dststep
= self
.svstate
.dststep
786 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
787 if self
.pred_dz
or pred_dst_zero
:
788 self
.pred_dst_zero
= not pred_dst_zero
789 log(" advance dst", dststep
, vl
,
790 self
.svstate
.dsubstep
, subvl
)
791 # yield actual substep/dststep
792 yield (self
.svstate
.dsubstep
, dststep
)
793 # the way yield works these could have been modified.
795 dststep
= self
.svstate
.dststep
796 log(" advance dst check", dststep
, vl
,
797 self
.svstate
.ssubstep
, subvl
)
798 if dststep
== vl
-1: # end-point
799 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
800 if self
.svstate
.dsubstep
== subvl
: # end-point
801 log(" advance unpack stop")
804 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
806 if self
.svstate
.dsubstep
== subvl
: # end-point
807 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
808 log(" advance unpack stop")
810 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
812 # these cannot be done as for-loops because SVSTATE may change
813 # (dststep/substep may be modified, interrupted, subvl/vl change)
814 # but they *can* be done as while-loops as long as every SVSTATE
815 # "thing" is re-read every single time a yield gives indices
816 while True: # outer vl loop
817 while True: # inner subvl loop
819 dstmask
= self
.dstmask
820 dststep
= self
.svstate
.dststep
821 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
822 if self
.pred_dz
or pred_dst_zero
:
823 self
.pred_dst_zero
= not pred_dst_zero
824 log(" advance dst", dststep
, self
.svstate
.vl
,
825 self
.svstate
.dsubstep
, subvl
)
826 # yield actual substep/dststep
827 yield (self
.svstate
.dsubstep
, dststep
)
828 if self
.svstate
.dsubstep
== subvl
: # end-point
829 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
831 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
834 if dststep
== vl
-1: # end-point
835 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
837 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
839 def src_iterate(self
):
840 """source-stepping iterator
844 pack
= self
.svstate
.pack
845 unpack
= self
.svstate
.unpack
846 ssubstep
= self
.svstate
.ssubstep
847 end_ssub
= ssubstep
== subvl
848 end_src
= self
.svstate
.srcstep
== vl
-1
849 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
853 srcstep
= self
.svstate
.srcstep
854 srcmask
= self
.srcmask
856 # pack advances subvl in *outer* loop
858 assert srcstep
<= vl
-1
859 end_src
= srcstep
== vl
-1
864 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
868 srcstep
+= 1 # advance srcstep
869 if not self
.srcstep_skip
:
871 if ((1 << srcstep
) & srcmask
) != 0:
874 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
876 # advance subvl in *inner* loop
879 assert srcstep
<= vl
-1
880 end_src
= srcstep
== vl
-1
881 if end_src
: # end-point
887 if not self
.srcstep_skip
:
889 if ((1 << srcstep
) & srcmask
) != 0:
892 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
893 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
896 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
898 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
899 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
902 def dst_iterate(self
):
903 """dest step iterator
907 pack
= self
.svstate
.pack
908 unpack
= self
.svstate
.unpack
909 dsubstep
= self
.svstate
.dsubstep
910 end_dsub
= dsubstep
== subvl
911 dststep
= self
.svstate
.dststep
912 end_dst
= dststep
== vl
-1
913 dstmask
= self
.dstmask
914 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
919 # unpack advances subvl in *outer* loop
921 assert dststep
<= vl
-1
922 end_dst
= dststep
== vl
-1
927 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
931 dststep
+= 1 # advance dststep
932 if not self
.dststep_skip
:
934 if ((1 << dststep
) & dstmask
) != 0:
937 log(" dskip", bin(dstmask
), bin(1 << dststep
))
939 # advance subvl in *inner* loop
942 assert dststep
<= vl
-1
943 end_dst
= dststep
== vl
-1
944 if end_dst
: # end-point
950 if not self
.dststep_skip
:
952 if ((1 << dststep
) & dstmask
) != 0:
955 log(" dskip", bin(dstmask
), bin(1 << dststep
))
956 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
959 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
961 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
962 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
965 def at_loopend(self
):
966 """tells if this is the last possible element. uses the cached values
967 for src/dst-step and sub-steps
971 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
972 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
973 end_ssub
= ssubstep
== subvl
974 end_dsub
= dsubstep
== subvl
975 if srcstep
== vl
-1 and end_ssub
:
977 if dststep
== vl
-1 and end_dsub
:
981 def advance_svstate_steps(self
):
982 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
983 TODO when Pack/Unpack is set, substep becomes the *outer* loop
985 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
986 if self
.loopend
: # huhn??
991 def read_src_mask(self
):
992 """read/update pred_sz and src mask
994 # get SVSTATE VL (oh and print out some debug stuff)
996 srcstep
= self
.svstate
.srcstep
997 ssubstep
= self
.svstate
.ssubstep
999 # get predicate mask (all 64 bits)
1000 srcmask
= 0xffff_ffff_ffff_ffff
1002 pmode
= yield self
.dec2
.rm_dec
.predmode
1003 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1004 srcpred
= yield self
.dec2
.rm_dec
.srcpred
1005 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1006 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
1007 if pmode
== SVP64PredMode
.INT
.value
:
1008 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
1009 if sv_ptype
== SVPType
.P2
.value
:
1010 srcmask
= get_predint(self
.gpr
, srcpred
)
1011 elif pmode
== SVP64PredMode
.CR
.value
:
1012 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1013 if sv_ptype
== SVPType
.P2
.value
:
1014 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
1015 # work out if the ssubsteps are completed
1016 ssubstart
= ssubstep
== 0
1017 log(" pmode", pmode
)
1018 log(" ptype", sv_ptype
)
1019 log(" srcpred", bin(srcpred
))
1020 log(" srcmask", bin(srcmask
))
1021 log(" pred_sz", bin(pred_sz
))
1022 log(" ssubstart", ssubstart
)
1024 # store all that above
1025 self
.srcstep_skip
= False
1026 self
.srcmask
= srcmask
1027 self
.pred_sz
= pred_sz
1028 self
.new_ssubstep
= ssubstep
1029 log(" new ssubstep", ssubstep
)
1030 # until the predicate mask has a "1" bit... or we run out of VL
1031 # let srcstep==VL be the indicator to move to next instruction
1033 self
.srcstep_skip
= True
1035 def read_dst_mask(self
):
1036 """same as read_src_mask - check and record everything needed
1038 # get SVSTATE VL (oh and print out some debug stuff)
1039 # yield Delay(1e-10) # make changes visible
1040 vl
= self
.svstate
.vl
1041 dststep
= self
.svstate
.dststep
1042 dsubstep
= self
.svstate
.dsubstep
1044 # get predicate mask (all 64 bits)
1045 dstmask
= 0xffff_ffff_ffff_ffff
1047 pmode
= yield self
.dec2
.rm_dec
.predmode
1048 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1049 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1050 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1051 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1052 if pmode
== SVP64PredMode
.INT
.value
:
1053 dstmask
= get_predint(self
.gpr
, dstpred
)
1054 elif pmode
== SVP64PredMode
.CR
.value
:
1055 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1056 # work out if the ssubsteps are completed
1057 dsubstart
= dsubstep
== 0
1058 log(" pmode", pmode
)
1059 log(" ptype", sv_ptype
)
1060 log(" dstpred", bin(dstpred
))
1061 log(" dstmask", bin(dstmask
))
1062 log(" pred_dz", bin(pred_dz
))
1063 log(" dsubstart", dsubstart
)
1065 self
.dststep_skip
= False
1066 self
.dstmask
= dstmask
1067 self
.pred_dz
= pred_dz
1068 self
.new_dsubstep
= dsubstep
1069 log(" new dsubstep", dsubstep
)
1071 self
.dststep_skip
= True
1073 def svstate_pre_inc(self
):
1074 """check if srcstep/dststep need to skip over masked-out predicate bits
1075 note that this is not supposed to do anything to substep,
1076 it is purely for skipping masked-out bits
1079 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1080 yield from self
.read_src_mask()
1081 yield from self
.read_dst_mask()
1088 srcstep
= self
.svstate
.srcstep
1089 srcmask
= self
.srcmask
1090 pred_src_zero
= self
.pred_sz
1091 vl
= self
.svstate
.vl
1092 # srcstep-skipping opportunity identified
1093 if self
.srcstep_skip
:
1094 # cannot do this with sv.bc - XXX TODO
1097 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1098 log(" sskip", bin(1 << srcstep
))
1101 # now work out if the relevant mask bits require zeroing
1103 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1105 # store new srcstep / dststep
1106 self
.new_srcstep
= srcstep
1107 self
.pred_src_zero
= pred_src_zero
1108 log(" new srcstep", srcstep
)
1111 # dststep-skipping opportunity identified
1112 dststep
= self
.svstate
.dststep
1113 dstmask
= self
.dstmask
1114 pred_dst_zero
= self
.pred_dz
1115 vl
= self
.svstate
.vl
1116 if self
.dststep_skip
:
1117 # cannot do this with sv.bc - XXX TODO
1120 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1121 log(" dskip", bin(1 << dststep
))
1124 # now work out if the relevant mask bits require zeroing
1126 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1128 # store new srcstep / dststep
1129 self
.new_dststep
= dststep
1130 self
.pred_dst_zero
= pred_dst_zero
1131 log(" new dststep", dststep
)
1134 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1135 # decoder2 - an instance of power_decoder2
1136 # regfile - a list of initial values for the registers
1137 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1138 # respect_pc - tracks the program counter. requires initial_insns
1139 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1140 initial_mem
=None, initial_msr
=0,
1153 # trace log file for model output. if None do nothing
1154 self
.insnlog
= insnlog
1155 self
.insnlog_is_file
= hasattr(insnlog
, "write")
1156 if not self
.insnlog_is_file
and self
.insnlog
:
1157 self
.insnlog
= open(self
.insnlog
, "w")
1159 self
.bigendian
= bigendian
1161 self
.is_svp64_mode
= False
1162 self
.respect_pc
= respect_pc
1163 if initial_sprs
is None:
1165 if initial_mem
is None:
1167 if fpregfile
is None:
1168 fpregfile
= [0] * 32
1169 if initial_insns
is None:
1171 assert self
.respect_pc
== False, "instructions required to honor pc"
1173 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1174 log("ISACaller initial_msr", initial_msr
)
1176 # "fake program counter" mode (for unit testing)
1180 if isinstance(initial_mem
, tuple):
1181 self
.fake_pc
= initial_mem
[0]
1182 disasm_start
= self
.fake_pc
1184 disasm_start
= initial_pc
1186 # disassembly: we need this for now (not given from the decoder)
1187 self
.disassembly
= {}
1189 for i
, code
in enumerate(disassembly
):
1190 self
.disassembly
[i
*4 + disasm_start
] = code
1192 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1193 self
.svp64rm
= SVP64RM()
1194 if initial_svstate
is None:
1196 if isinstance(initial_svstate
, int):
1197 initial_svstate
= SVP64State(initial_svstate
)
1198 # SVSTATE, MSR and PC
1199 StepLoop
.__init
__(self
, initial_svstate
)
1200 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1202 # GPR FPR SPR registers
1203 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1204 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1205 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1206 self
.spr
= SPR(decoder2
, initial_sprs
) # initialise SPRs before MMU
1208 # set up 4 dummy SVSHAPEs if they aren't already set up
1210 sname
= 'SVSHAPE%d' % i
1211 val
= self
.spr
.get(sname
, 0)
1212 # make sure it's an SVSHAPE
1213 self
.spr
[sname
] = SVSHAPE(val
, self
.gpr
)
1214 self
.last_op_svshape
= False
1217 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
, misaligned_ok
=True)
1218 self
.mem
.log_fancy(kind
=LogKind
.InstrInOuts
)
1219 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1220 # MMU mode, redirect underlying Mem through RADIX
1222 self
.mem
= RADIX(self
.mem
, self
)
1224 self
.imem
= RADIX(self
.imem
, self
)
1226 # TODO, needed here:
1227 # FPR (same as GPR except for FP nums)
1228 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1229 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1230 self
.fpscr
= FPSCRState(initial_fpscr
)
1232 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1233 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1235 # 2.3.2 LR (actually SPR #8) -- Done
1236 # 2.3.3 CTR (actually SPR #9) -- Done
1237 # 2.3.4 TAR (actually SPR #815)
1238 # 3.2.2 p45 XER (actually SPR #1) -- Done
1239 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1241 # create CR then allow portions of it to be "selectable" (below)
1242 self
.cr_fields
= CRFields(initial_cr
)
1243 self
.cr
= self
.cr_fields
.cr
1244 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1246 # "undefined", just set to variable-bit-width int (use exts "max")
1247 # self.undefined = SelectableInt(0, EFFECTIVELY_UNLIMITED)
1250 self
.namespace
.update(self
.spr
)
1251 self
.namespace
.update({'GPR': self
.gpr
,
1255 'memassign': self
.memassign
,
1258 'SVSTATE': self
.svstate
,
1259 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1260 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1261 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1262 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1265 'FPSCR': self
.fpscr
,
1266 'undefined': undefined
,
1267 'mode_is_64bit': True,
1268 'SO': XER_bits
['SO'],
1269 'XLEN': 64 # elwidth overrides
1272 for name
in BFP_FLAG_NAMES
:
1273 setattr(self
, name
, 0)
1275 # update pc to requested start point
1276 self
.set_pc(initial_pc
)
1278 # field-selectable versions of Condition Register
1279 self
.crl
= self
.cr_fields
.crl
1281 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1283 self
.decoder
= decoder2
.dec
1284 self
.dec2
= decoder2
1286 super().__init
__(XLEN
=self
.namespace
["XLEN"], FPSCR
=self
.fpscr
)
1288 def trace(self
, out
):
1289 if self
.insnlog
is None: return
1290 self
.insnlog
.write(out
)
1294 return self
.namespace
["XLEN"]
1300 def call_trap(self
, trap_addr
, trap_bit
):
1301 """calls TRAP and sets up NIA to the new execution location.
1302 next instruction will begin at trap_addr.
1304 self
.TRAP(trap_addr
, trap_bit
)
1305 self
.namespace
['NIA'] = self
.trap_nia
1306 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1308 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1309 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1311 TRAP function is callable from inside the pseudocode itself,
1312 hence the default arguments. when calling from inside ISACaller
1313 it is best to use call_trap()
1315 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1316 kaivb
= self
.spr
['KAIVB'].value
1317 msr
= self
.namespace
['MSR'].value
1318 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1319 # store CIA(+4?) in SRR0, set NIA to 0x700
1320 # store MSR in SRR1, set MSR to um errr something, have to check spec
1321 # store SVSTATE (if enabled) in SVSRR0
1322 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1323 self
.spr
['SRR1'].value
= msr
1324 if self
.is_svp64_mode
:
1325 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1326 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1327 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1329 # set exception bits. TODO: this should, based on the address
1330 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1331 # bits appropriately. however it turns out that *for now* in all
1332 # cases (all trap_addrs) the exact same thing is needed.
1333 self
.msr
[MSRb
.IR
] = 0
1334 self
.msr
[MSRb
.DR
] = 0
1335 self
.msr
[MSRb
.FE0
] = 0
1336 self
.msr
[MSRb
.FE1
] = 0
1337 self
.msr
[MSRb
.EE
] = 0
1338 self
.msr
[MSRb
.RI
] = 0
1339 self
.msr
[MSRb
.SF
] = 1
1340 self
.msr
[MSRb
.TM
] = 0
1341 self
.msr
[MSRb
.VEC
] = 0
1342 self
.msr
[MSRb
.VSX
] = 0
1343 self
.msr
[MSRb
.PR
] = 0
1344 self
.msr
[MSRb
.FP
] = 0
1345 self
.msr
[MSRb
.PMM
] = 0
1346 self
.msr
[MSRb
.TEs
] = 0
1347 self
.msr
[MSRb
.TEe
] = 0
1348 self
.msr
[MSRb
.UND
] = 0
1349 self
.msr
[MSRb
.LE
] = 1
1351 def memassign(self
, ea
, sz
, val
):
1352 self
.mem
.memassign(ea
, sz
, val
)
1354 def prep_namespace(self
, insn_name
, formname
, op_fields
, xlen
):
1355 # TODO: get field names from form in decoder*1* (not decoder2)
1356 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1358 # then "yield" fields only from op_fields rather than hard-coded
1360 fields
= self
.decoder
.sigforms
[formname
]
1361 log("prep_namespace", formname
, op_fields
, insn_name
)
1362 for name
in op_fields
:
1363 # CR immediates. deal with separately. needs modifying
1365 if self
.is_svp64_mode
and name
in ['BI']: # TODO, more CRs
1366 # BI is a 5-bit, must reconstruct the value
1367 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1368 sig
= getattr(fields
, name
)
1370 # low 2 LSBs (CR field selector) remain same, CR num extended
1371 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1372 val
= (val
& 0b11) |
(regnum
<< 2)
1373 elif self
.is_svp64_mode
and name
in ['BF']: # TODO, more CRs
1374 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, "BF")
1375 log('hack %s' % name
, regnum
, is_vec
)
1378 sig
= getattr(fields
, name
)
1380 # these are all opcode fields involved in index-selection of CR,
1381 # and need to do "standard" arithmetic. CR[BA+32] for example
1382 # would, if using SelectableInt, only be 5-bit.
1383 if name
in ['BF', 'BFA', 'BC', 'BA', 'BB', 'BT', 'BI']:
1384 self
.namespace
[name
] = val
1386 self
.namespace
[name
] = SelectableInt(val
, sig
.width
)
1388 self
.namespace
['XER'] = self
.spr
['XER']
1389 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1390 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1391 self
.namespace
['OV'] = self
.spr
['XER'][XER_bits
['OV']].value
1392 self
.namespace
['OV32'] = self
.spr
['XER'][XER_bits
['OV32']].value
1393 self
.namespace
['XLEN'] = xlen
1395 # add some SVSTATE convenience variables
1396 vl
= self
.svstate
.vl
1397 srcstep
= self
.svstate
.srcstep
1398 self
.namespace
['VL'] = vl
1399 self
.namespace
['srcstep'] = srcstep
1401 # take a copy of the CR field value: if non-VLi fail-first fails
1402 # this is because the pseudocode writes *directly* to CR. sigh
1403 self
.cr_backup
= self
.cr
.value
1405 # sv.bc* need some extra fields
1406 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
1407 # blegh grab bits manually
1408 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1409 # convert to SelectableInt before test
1410 mode
= SelectableInt(mode
, 5)
1411 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1412 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1413 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1414 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1415 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1416 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1417 sz
= yield self
.dec2
.rm_dec
.pred_sz
1418 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1419 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1420 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1421 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1422 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1423 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1424 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1425 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1427 def get_kludged_op_add_ca_ov(self
, inputs
, inp_ca_ov
):
1428 """ this was not at all necessary to do. this function massively
1429 duplicates - in a laborious and complex fashion - the contents of
1430 the CSV files that were extracted two years ago from microwatt's
1431 source code. A-inversion is the "inv A" column, output inversion
1432 is the "inv out" column, carry-in equal to 0 or 1 or CA is the
1435 all of that information is available in
1436 self.instrs[ins_name].op_fields
1437 where info is usually assigned to self.instrs[ins_name]
1439 https://git.libre-soc.org/?p=openpower-isa.git;a=blob;f=openpower/isatables/minor_31.csv;hb=HEAD
1441 the immediate constants are *also* decoded correctly and placed
1442 usually by DecodeIn2Imm into operand2, as part of power_decoder2.py
1444 def ca(a
, b
, ca_in
, width
):
1445 mask
= (1 << width
) - 1
1446 y
= (a
& mask
) + (b
& mask
) + ca_in
1449 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1450 insn
= insns
.get(asmcode
)
1451 SI
= yield self
.dec2
.dec
.SI
1454 inputs
= [i
.value
for i
in inputs
]
1457 if insn
in ("add", "addo", "addc", "addco"):
1461 elif insn
== "addic" or insn
== "addic.":
1465 elif insn
in ("subf", "subfo", "subfc", "subfco"):
1469 elif insn
== "subfic":
1473 elif insn
== "adde" or insn
== "addeo":
1477 elif insn
== "subfe" or insn
== "subfeo":
1481 elif insn
== "addme" or insn
== "addmeo":
1485 elif insn
== "addze" or insn
== "addzeo":
1489 elif insn
== "subfme" or insn
== "subfmeo":
1493 elif insn
== "subfze" or insn
== "subfzeo":
1497 elif insn
== "addex":
1498 # CA[32] aren't actually written, just generate so we have
1499 # something to return
1500 ca64
= ov64
= ca(inputs
[0], inputs
[1], OV
, 64)
1501 ca32
= ov32
= ca(inputs
[0], inputs
[1], OV
, 32)
1502 return ca64
, ca32
, ov64
, ov32
1503 elif insn
== "neg" or insn
== "nego":
1508 raise NotImplementedError(
1509 "op_add kludge unimplemented instruction: ", asmcode
, insn
)
1511 ca64
= ca(a
, b
, ca_in
, 64)
1512 ca32
= ca(a
, b
, ca_in
, 32)
1513 ov64
= ca64
!= ca(a
, b
, ca_in
, 63)
1514 ov32
= ca32
!= ca(a
, b
, ca_in
, 31)
1515 return ca64
, ca32
, ov64
, ov32
1517 def handle_carry_(self
, inputs
, output
, ca
, ca32
, inp_ca_ov
):
1518 op
= yield self
.dec2
.e
.do
.insn_type
1519 if op
== MicrOp
.OP_ADD
.value
and ca
is None and ca32
is None:
1520 retval
= yield from self
.get_kludged_op_add_ca_ov(
1522 ca
, ca32
, ov
, ov32
= retval
1523 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1524 if insns
.get(asmcode
) == 'addex':
1525 # TODO: if 32-bit mode, set ov to ov32
1526 self
.spr
['XER'][XER_bits
['OV']] = ov
1527 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1529 # TODO: if 32-bit mode, set ca to ca32
1530 self
.spr
['XER'][XER_bits
['CA']] = ca
1531 self
.spr
['XER'][XER_bits
['CA32']] = ca32
1533 inv_a
= yield self
.dec2
.e
.do
.invert_in
1535 inputs
[0] = ~inputs
[0]
1537 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1539 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1540 inputs
.append(SelectableInt(imm
, 64))
1543 log("gt input", x
, output
)
1544 gt
= (gtu(x
, output
))
1547 cy
= 1 if any(gts
) else 0
1549 if ca
is None: # already written
1550 self
.spr
['XER'][XER_bits
['CA']] = cy
1553 # ARGH... different for OP_ADD... *sigh*...
1554 op
= yield self
.dec2
.e
.do
.insn_type
1555 if op
== MicrOp
.OP_ADD
.value
:
1556 res32
= (output
.value
& (1 << 32)) != 0
1557 a32
= (inputs
[0].value
& (1 << 32)) != 0
1558 if len(inputs
) >= 2:
1559 b32
= (inputs
[1].value
& (1 << 32)) != 0
1562 cy32
= res32 ^ a32 ^ b32
1563 log("CA32 ADD", cy32
)
1567 log("input", x
, output
)
1568 log(" x[32:64]", x
, x
[32:64])
1569 log(" o[32:64]", output
, output
[32:64])
1570 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1572 cy32
= 1 if any(gts
) else 0
1573 log("CA32", cy32
, gts
)
1574 if ca32
is None: # already written
1575 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1577 def handle_overflow(self
, inputs
, output
, div_overflow
, inp_ca_ov
):
1578 op
= yield self
.dec2
.e
.do
.insn_type
1579 if op
== MicrOp
.OP_ADD
.value
:
1580 retval
= yield from self
.get_kludged_op_add_ca_ov(
1582 ca
, ca32
, ov
, ov32
= retval
1583 # TODO: if 32-bit mode, set ov to ov32
1584 self
.spr
['XER'][XER_bits
['OV']] = ov
1585 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1586 self
.spr
['XER'][XER_bits
['SO']] |
= ov
1588 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1589 inv_a
= yield self
.dec2
.e
.do
.invert_in
1591 inputs
[0] = ~inputs
[0]
1593 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1595 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1596 inputs
.append(SelectableInt(imm
, 64))
1597 log("handle_overflow", inputs
, output
, div_overflow
)
1598 if len(inputs
) < 2 and div_overflow
is None:
1601 # div overflow is different: it's returned by the pseudo-code
1602 # because it's more complex than can be done by analysing the output
1603 if div_overflow
is not None:
1604 ov
, ov32
= div_overflow
, div_overflow
1605 # arithmetic overflow can be done by analysing the input and output
1606 elif len(inputs
) >= 2:
1608 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1609 output_sgn
= exts(output
.value
, output
.bits
) < 0
1610 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1611 output_sgn
!= input_sgn
[0] else 0
1614 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1615 output32_sgn
= exts(output
.value
, 32) < 0
1616 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1617 output32_sgn
!= input32_sgn
[0] else 0
1619 # now update XER OV/OV32/SO
1620 so
= self
.spr
['XER'][XER_bits
['SO']]
1621 new_so
= so | ov
# sticky overflow ORs in old with new
1622 self
.spr
['XER'][XER_bits
['OV']] = ov
1623 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1624 self
.spr
['XER'][XER_bits
['SO']] = new_so
1625 log(" set overflow", ov
, ov32
, so
, new_so
)
1627 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1628 assert isinstance(out
, SelectableInt
), \
1629 "out zero not a SelectableInt %s" % repr(outputs
)
1630 log("handle_comparison", out
.bits
, hex(out
.value
))
1631 # TODO - XXX *processor* in 32-bit mode
1632 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1634 # o32 = exts(out.value, 32)
1635 # print ("handle_comparison exts 32 bit", hex(o32))
1636 out
= exts(out
.value
, out
.bits
)
1637 log("handle_comparison exts", hex(out
))
1638 # create the three main CR flags, EQ GT LT
1639 zero
= SelectableInt(out
== 0, 1)
1640 positive
= SelectableInt(out
> 0, 1)
1641 negative
= SelectableInt(out
< 0, 1)
1642 # get (or not) XER.SO. for setvl this is important *not* to read SO
1644 SO
= SelectableInt(1, 0)
1646 SO
= self
.spr
['XER'][XER_bits
['SO']]
1647 log("handle_comparison SO", SO
.value
,
1648 "overflow", overflow
,
1650 "+ve", positive
.value
,
1651 "-ve", negative
.value
)
1652 # alternative overflow checking (setvl mainly at the moment)
1653 if overflow
is not None and overflow
== 1:
1654 SO
= SelectableInt(1, 1)
1655 # create the four CR field values and set the required CR field
1656 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1657 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1658 self
.crl
[cr_idx
].eq(cr_field
)
1660 def set_pc(self
, pc_val
):
1661 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1662 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1664 def get_next_insn(self
):
1665 """check instruction
1668 pc
= self
.pc
.CIA
.value
1671 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1673 raise KeyError("no instruction at 0x%x" % pc
)
1676 def setup_one(self
):
1677 """set up one instruction
1679 pc
, insn
= self
.get_next_insn()
1680 yield from self
.setup_next_insn(pc
, insn
)
1682 # cache since it's really slow to construct
1683 __PREFIX_CACHE
= SVP64Instruction
.Prefix(SelectableInt(value
=0, bits
=32))
1685 def __decode_prefix(self
, opcode
):
1686 pfx
= self
.__PREFIX
_CACHE
1687 pfx
.storage
.eq(opcode
)
1690 def setup_next_insn(self
, pc
, ins
):
1691 """set up next instruction
1694 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
1695 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
1697 yield self
.dec2
.sv_rm
.eq(0)
1698 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
1699 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
1700 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
1701 yield self
.dec2
.state
.pc
.eq(pc
)
1702 if self
.svstate
is not None:
1703 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
1705 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
1707 opcode
= yield self
.dec2
.dec
.opcode_in
1708 opcode
= SelectableInt(value
=opcode
, bits
=32)
1709 pfx
= self
.__decode
_prefix
(opcode
)
1710 log("prefix test: opcode:", pfx
.PO
, bin(pfx
.PO
), pfx
.id)
1711 self
.is_svp64_mode
= bool((pfx
.PO
== 0b000001) and (pfx
.id == 0b11))
1712 self
.pc
.update_nia(self
.is_svp64_mode
)
1714 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
1715 self
.namespace
['NIA'] = self
.pc
.NIA
1716 self
.namespace
['SVSTATE'] = self
.svstate
1717 if not self
.is_svp64_mode
:
1720 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
1721 log("svp64.rm", bin(pfx
.rm
))
1722 log(" svstate.vl", self
.svstate
.vl
)
1723 log(" svstate.mvl", self
.svstate
.maxvl
)
1724 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
1725 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
1726 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
1727 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
1730 def execute_one(self
):
1731 """execute one instruction
1733 # get the disassembly code for this instruction
1734 if not self
.disassembly
:
1735 code
= yield from self
.get_assembly_name()
1738 if self
.is_svp64_mode
:
1739 offs
, dbg
= 4, "svp64 "
1740 code
= self
.disassembly
[self
._pc
+offs
]
1741 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
1742 opname
= code
.split(' ')[0]
1744 yield from self
.call(opname
) # execute the instruction
1745 except MemException
as e
: # check for memory errors
1746 if e
.args
[0] == 'unaligned': # alignment error
1747 # run a Trap but set DAR first
1748 print("memory unaligned exception, DAR", e
.dar
, repr(e
))
1749 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
1750 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
1752 elif e
.args
[0] == 'invalid': # invalid
1753 # run a Trap but set DAR first
1754 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
1755 if e
.mode
== 'EXECUTE':
1756 # XXX TODO: must set a few bits in SRR1,
1757 # see microwatt loadstore1.vhdl
1758 # if m_in.segerr = '0' then
1759 # v.srr1(47 - 33) := m_in.invalid;
1760 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
1761 # v.srr1(47 - 44) := m_in.badtree;
1762 # v.srr1(47 - 45) := m_in.rc_error;
1763 # v.intr_vec := 16#400#;
1765 # v.intr_vec := 16#480#;
1766 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
1768 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
1770 # not supported yet:
1771 raise e
# ... re-raise
1773 # append to the trace log file
1774 self
.trace(" # %s\n" % code
)
1776 log("gprs after code", code
)
1779 for i
in range(len(self
.crl
)):
1780 crs
.append(bin(self
.crl
[i
].asint()))
1781 log("crs", " ".join(crs
))
1782 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
1784 # don't use this except in special circumstances
1785 if not self
.respect_pc
:
1788 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
1789 hex(self
.pc
.NIA
.value
))
1791 def get_assembly_name(self
):
1792 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1793 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1794 dec_insn
= yield self
.dec2
.e
.do
.insn
1795 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
1796 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1797 int_op
= yield self
.dec2
.dec
.op
.internal_op
1798 log("get assembly name asmcode", asmcode
, int_op
,
1799 hex(dec_insn
), bin(insn_1_11
))
1800 asmop
= insns
.get(asmcode
, None)
1802 # sigh reconstruct the assembly instruction name
1803 if hasattr(self
.dec2
.e
.do
, "oe"):
1804 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
1805 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
1809 if hasattr(self
.dec2
.e
.do
, "rc"):
1810 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
1811 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
1815 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
1816 RC1
= yield self
.dec2
.rm_dec
.RC1
1820 # grrrr have to special-case MUL op (see DecodeOE)
1821 log("ov %d en %d rc %d en %d op %d" %
1822 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
1823 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
1828 if not asmop
.endswith("."): # don't add "." to "andis."
1831 if hasattr(self
.dec2
.e
.do
, "lk"):
1832 lk
= yield self
.dec2
.e
.do
.lk
1835 log("int_op", int_op
)
1836 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
1837 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
1841 spr_msb
= yield from self
.get_spr_msb()
1842 if int_op
== MicrOp
.OP_MFCR
.value
:
1847 # XXX TODO: for whatever weird reason this doesn't work
1848 # https://bugs.libre-soc.org/show_bug.cgi?id=390
1849 if int_op
== MicrOp
.OP_MTCRF
.value
:
1856 def reset_remaps(self
):
1857 self
.remap_loopends
= [0] * 4
1858 self
.remap_idxs
= [0, 1, 2, 3]
1860 def get_remap_indices(self
):
1861 """WARNING, this function stores remap_idxs and remap_loopends
1862 in the class for later use. this to avoid problems with yield
1864 # go through all iterators in lock-step, advance to next remap_idx
1865 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
1866 # get four SVSHAPEs. here we are hard-coding
1868 SVSHAPE0
= self
.spr
['SVSHAPE0']
1869 SVSHAPE1
= self
.spr
['SVSHAPE1']
1870 SVSHAPE2
= self
.spr
['SVSHAPE2']
1871 SVSHAPE3
= self
.spr
['SVSHAPE3']
1872 # set up the iterators
1873 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
1874 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
1875 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
1876 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
1880 for i
, (shape
, remap
) in enumerate(remaps
):
1881 # zero is "disabled"
1882 if shape
.value
== 0x0:
1883 self
.remap_idxs
[i
] = 0
1884 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
1885 step
= dststep
if (i
in [3, 4]) else srcstep
1886 # this is terrible. O(N^2) looking for the match. but hey.
1887 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
1890 self
.remap_idxs
[i
] = remap_idx
1891 self
.remap_loopends
[i
] = loopends
1892 dbg
.append((i
, step
, remap_idx
, loopends
))
1893 for (i
, step
, remap_idx
, loopends
) in dbg
:
1894 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
1897 def get_spr_msb(self
):
1898 dec_insn
= yield self
.dec2
.e
.do
.insn
1899 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
1901 def call(self
, name
):
1902 """call(opcode) - the primary execution point for instructions
1904 self
.last_st_addr
= None # reset the last known store address
1905 self
.last_ld_addr
= None # etc.
1907 ins_name
= name
.strip() # remove spaces if not already done so
1909 log("halted - not executing", ins_name
)
1912 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1913 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1914 asmop
= yield from self
.get_assembly_name()
1915 log("call", ins_name
, asmop
)
1917 # sv.setvl is *not* a loop-function. sigh
1918 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
1921 int_op
= yield self
.dec2
.dec
.op
.internal_op
1922 spr_msb
= yield from self
.get_spr_msb()
1924 instr_is_privileged
= False
1925 if int_op
in [MicrOp
.OP_ATTN
.value
,
1926 MicrOp
.OP_MFMSR
.value
,
1927 MicrOp
.OP_MTMSR
.value
,
1928 MicrOp
.OP_MTMSRD
.value
,
1930 MicrOp
.OP_RFID
.value
]:
1931 instr_is_privileged
= True
1932 if int_op
in [MicrOp
.OP_MFSPR
.value
,
1933 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
1934 instr_is_privileged
= True
1936 log("is priv", instr_is_privileged
, hex(self
.msr
.value
),
1938 # check MSR priv bit and whether op is privileged: if so, throw trap
1939 if instr_is_privileged
and self
.msr
[MSRb
.PR
] == 1:
1940 self
.call_trap(0x700, PIb
.PRIV
)
1943 # check halted condition
1944 if ins_name
== 'attn':
1948 # check illegal instruction
1950 if ins_name
not in ['mtcrf', 'mtocrf']:
1951 illegal
= ins_name
!= asmop
1953 # list of instructions not being supported by binutils (.long)
1954 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
1955 if dotstrp
in [*FPTRANS_INSNS
,
1957 'ffmadds', 'fdmadds', 'ffadds',
1959 "brh", "brw", "brd",
1960 'setvl', 'svindex', 'svremap', 'svstep',
1961 'svshape', 'svshape2',
1962 'ternlogi', 'bmask', 'cprop',
1963 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
1964 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
1965 "dsld", "dsrd", "maddedus",
1966 "sadd", "saddw", "sadduw",
1971 "maddsubrs", "maddrs", "msubrs",
1972 "cfuged", "cntlzdm", "cnttzdm", "pdepd", "pextd",
1973 "setbc", "setbcr", "setnbc", "setnbcr",
1978 # branch-conditional redirects to sv.bc
1979 if asmop
.startswith('bc') and self
.is_svp64_mode
:
1980 ins_name
= 'sv.%s' % ins_name
1982 # ld-immediate-with-pi mode redirects to ld-with-postinc
1983 ldst_imm_postinc
= False
1984 if 'u' in ins_name
and self
.is_svp64_mode
:
1985 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
1987 ins_name
= ins_name
.replace("u", "up")
1988 ldst_imm_postinc
= True
1989 log(" enable ld/st postinc", ins_name
)
1991 log(" post-processed name", dotstrp
, ins_name
, asmop
)
1993 # illegal instructions call TRAP at 0x700
1995 print("illegal", ins_name
, asmop
)
1996 self
.call_trap(0x700, PIb
.ILLEG
)
1997 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
1998 (ins_name
, asmop
, self
.pc
.CIA
.value
))
2001 # this is for setvl "Vertical" mode: if set true,
2002 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
2003 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
2004 self
.allow_next_step_inc
= False
2005 self
.svstate_next_mode
= 0
2007 # nop has to be supported, we could let the actual op calculate
2008 # but PowerDecoder has a pattern for nop
2009 if ins_name
== 'nop':
2010 self
.update_pc_next()
2013 # get elwidths, defaults to 64
2017 if self
.is_svp64_mode
:
2018 ew_src
= yield self
.dec2
.rm_dec
.ew_src
2019 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
2020 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
2021 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
2022 xlen
= max(ew_src
, ew_dst
)
2023 log("elwdith", ew_src
, ew_dst
)
2024 log("XLEN:", self
.is_svp64_mode
, xlen
)
2026 # look up instruction in ISA.instrs, prepare namespace
2027 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
2028 info
= self
.instrs
[ins_name
+"."]
2029 elif asmop
[-1] == '.' and asmop
in self
.instrs
:
2030 info
= self
.instrs
[asmop
]
2032 info
= self
.instrs
[ins_name
]
2033 yield from self
.prep_namespace(ins_name
, info
.form
, info
.op_fields
,
2036 # preserve order of register names
2037 input_names
= create_args(list(info
.read_regs
) +
2038 list(info
.uninit_regs
))
2039 log("input names", input_names
)
2041 # get SVP64 entry for the current instruction
2042 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
2043 if sv_rm
is not None:
2044 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
2046 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
2047 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
2049 # see if srcstep/dststep need skipping over masked-out predicate bits
2050 # svstep also needs advancement because it calls SVSTATE_NEXT.
2051 # bit the remaps get computed just after pre_inc moves them on
2052 # with remap_set_steps substituting for PowerDecider2 not doing it,
2053 # and SVSTATE_NEXT not being able to.use yield, the preinc on
2054 # svstep is necessary for now.
2056 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
2057 yield from self
.svstate_pre_inc()
2058 if self
.is_svp64_mode
:
2059 pre
= yield from self
.update_new_svstate_steps()
2061 self
.svp64_reset_loop()
2063 self
.update_pc_next()
2065 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2066 pred_dst_zero
= self
.pred_dst_zero
2067 pred_src_zero
= self
.pred_src_zero
2068 vl
= self
.svstate
.vl
2069 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2071 # VL=0 in SVP64 mode means "do nothing: skip instruction"
2072 if self
.is_svp64_mode
and vl
== 0:
2073 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2074 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
2075 self
.namespace
['NIA'], kind
=LogKind
.InstrInOuts
)
2078 # for when SVREMAP is active, using pre-arranged schedule.
2079 # note: modifying PowerDecoder2 needs to "settle"
2080 remap_en
= self
.svstate
.SVme
2081 persist
= self
.svstate
.RMpst
2082 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
2083 if self
.is_svp64_mode
:
2084 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
2086 if persist
or self
.last_op_svshape
:
2087 remaps
= self
.get_remap_indices()
2088 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
2089 yield from self
.remap_set_steps(remaps
)
2090 # after that, settle down (combinatorial) to let Vector reg numbers
2091 # work themselves out
2093 if self
.is_svp64_mode
:
2094 remap_active
= yield self
.dec2
.remap_active
2096 remap_active
= False
2097 log("remap active", bin(remap_active
))
2099 # main input registers (RT, RA ...)
2101 for name
in input_names
:
2102 regval
= (yield from self
.get_input(name
, ew_src
))
2103 log("regval name", name
, regval
)
2104 inputs
.append(regval
)
2106 # arrrrgh, awful hack, to get _RT into namespace
2107 if ins_name
in ['setvl', 'svstep']:
2109 RT
= yield self
.dec2
.dec
.RT
2110 self
.namespace
[regname
] = SelectableInt(RT
, 5)
2112 self
.namespace
["RT"] = SelectableInt(0, 5)
2113 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
2114 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
2116 # in SVP64 mode for LD/ST work out immediate
2117 # XXX TODO: replace_ds for DS-Form rather than D-Form.
2118 # use info.form to detect
2119 if self
.is_svp64_mode
and not ldst_imm_postinc
:
2120 yield from self
.check_replace_d(info
, remap_active
)
2122 # "special" registers
2123 for special
in info
.special_regs
:
2124 if special
in special_sprs
:
2125 inputs
.append(self
.spr
[special
])
2127 inputs
.append(self
.namespace
[special
])
2129 # clear trap (trap) NIA
2130 self
.trap_nia
= None
2132 # check if this was an sv.bc* and create an indicator that
2133 # this is the last check to be made as a loop. combined with
2134 # the ALL/ANY mode we can early-exit
2135 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
2136 no_in_vec
= yield self
.dec2
.no_in_vec
# BI is scalar
2137 end_loop
= no_in_vec
or srcstep
== vl
-1 or dststep
== vl
-1
2138 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
2140 inp_ca_ov
= (self
.spr
['XER'][XER_bits
['CA']].value
,
2141 self
.spr
['XER'][XER_bits
['OV']].value
)
2143 # execute actual instruction here (finally)
2144 log("inputs", inputs
)
2145 results
= info
.func(self
, *inputs
)
2146 output_names
= create_args(info
.write_regs
)
2148 for out
, n
in zip(results
or [], output_names
):
2150 log("results", outs
)
2152 # "inject" decorator takes namespace from function locals: we need to
2153 # overwrite NIA being overwritten (sigh)
2154 if self
.trap_nia
is not None:
2155 self
.namespace
['NIA'] = self
.trap_nia
2157 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
2159 # check if op was a LD/ST so that debugging can check the
2161 if int_op
in [MicrOp
.OP_STORE
.value
,
2163 self
.last_st_addr
= self
.mem
.last_st_addr
2164 if int_op
in [MicrOp
.OP_LOAD
.value
,
2166 self
.last_ld_addr
= self
.mem
.last_ld_addr
2167 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
2168 self
.last_st_addr
, self
.last_ld_addr
)
2170 # detect if CA/CA32 already in outputs (sra*, basically)
2172 ca32
= outs
.get("CA32")
2174 log("carry already done?", ca
, ca32
, output_names
)
2175 # soc test_pipe_caller tests don't have output_carry
2176 has_output_carry
= hasattr(self
.dec2
.e
.do
, "output_carry")
2177 carry_en
= has_output_carry
and (yield self
.dec2
.e
.do
.output_carry
)
2179 yield from self
.handle_carry_(
2180 inputs
, results
[0], ca
, ca32
, inp_ca_ov
=inp_ca_ov
)
2182 # get output named "overflow" and "CR0"
2183 overflow
= outs
.get('overflow')
2184 cr0
= outs
.get('CR0')
2185 cr1
= outs
.get('CR1')
2187 # soc test_pipe_caller tests don't have oe
2188 has_oe
= hasattr(self
.dec2
.e
.do
, "oe")
2189 # yeah just no. not in parallel processing
2190 if has_oe
and not self
.is_svp64_mode
:
2191 # detect if overflow was in return result
2192 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2193 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2194 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
2196 yield from self
.handle_overflow(
2197 inputs
, results
[0], overflow
, inp_ca_ov
=inp_ca_ov
)
2199 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
2201 if not self
.is_svp64_mode
or not pred_dst_zero
:
2202 if hasattr(self
.dec2
.e
.do
, "rc"):
2203 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2204 # don't do Rc=1 for svstep it is handled explicitly.
2205 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
2206 # to write directly to CR0 instead of in ISACaller. hooyahh.
2207 if rc_en
and ins_name
not in ['svstep']:
2208 yield from self
.do_rc_ov(
2209 ins_name
, results
[0], overflow
, cr0
, cr1
, output_names
)
2212 ffirst_hit
= False, False
2213 if self
.is_svp64_mode
:
2214 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2215 is_cr
= sv_mode
== SVMode
.CROP
.value
2216 chk
= rc_en
or is_cr
2217 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2219 # check if a FP Exception occurred. TODO for DD-FFirst, check VLi
2220 # and raise the exception *after* if VLi=1 but if VLi=0 then
2221 # truncate and make the exception "disappear".
2222 if self
.FPSCR
.FEX
and (self
.msr
[MSRb
.FE0
] or self
.msr
[MSRb
.FE1
]):
2223 self
.call_trap(0x700, PIb
.FP
)
2226 # any modified return results?
2227 yield from self
.do_outregs_nia(asmop
, ins_name
, info
, outs
,
2228 carry_en
, rc_en
, ffirst_hit
, ew_dst
)
2230 def check_ffirst(self
, info
, rc_en
, srcstep
):
2231 """fail-first mode: checks a bit of Rc Vector, truncates VL
2233 rm_mode
= yield self
.dec2
.rm_dec
.mode
2234 ff_inv
= yield self
.dec2
.rm_dec
.inv
2235 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2236 RC1
= yield self
.dec2
.rm_dec
.RC1
2237 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2238 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2242 log(" cr_bit", cr_bit
)
2243 log(" rc_en", rc_en
)
2244 if not rc_en
or rm_mode
!= SVP64RMMode
.FFIRST
.value
:
2246 # get the CR vevtor, do BO-test
2248 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2249 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2251 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2252 crtest
= self
.crl
[regnum
]
2253 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2254 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2255 log("cr test?", ffirst_hit
)
2258 # Fail-first activated, truncate VL
2259 vli
= SelectableInt(int(vli_
), 7)
2260 self
.svstate
.vl
= srcstep
+ vli
2261 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2262 yield Settle() # let decoder update
2265 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
, cr1
, output_names
):
2266 cr_out
= yield self
.dec2
.op
.cr_out
2267 if cr_out
== CROutSel
.CR1
.value
:
2271 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2272 # hang on... for `setvl` actually you want to test SVSTATE.VL
2273 is_setvl
= ins_name
in ('svstep', 'setvl')
2275 result
= SelectableInt(result
.vl
, 64)
2277 # overflow = None # do not override overflow except in setvl
2281 cr1
= int(self
.FPSCR
.FX
) << 3
2282 cr1 |
= int(self
.FPSCR
.FEX
) << 2
2283 cr1 |
= int(self
.FPSCR
.VX
) << 1
2284 cr1 |
= int(self
.FPSCR
.OX
)
2285 log("default fp cr1", cr1
)
2287 log("explicit cr1", cr1
)
2288 self
.crl
[regnum
].eq(cr1
)
2290 # if there was not an explicit CR0 in the pseudocode,
2292 self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2294 # otherwise we just blat CR0 into the required regnum
2295 log("explicit rc0", cr0
)
2296 self
.crl
[regnum
].eq(cr0
)
2298 def do_outregs_nia(self
, asmop
, ins_name
, info
, outs
,
2299 ca_en
, rc_en
, ffirst_hit
, ew_dst
):
2300 ffirst_hit
, vli
= ffirst_hit
2301 # write out any regs for this instruction, but only if fail-first is ok
2302 # XXX TODO: allow CR-vector to be written out even if ffirst fails
2303 if not ffirst_hit
or vli
:
2304 for name
, output
in outs
.items():
2305 yield from self
.check_write(info
, name
, output
, ca_en
, ew_dst
)
2306 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2307 # which write directly to CR in the pseudocode (gah, what a mess)
2308 # if ffirst_hit and not vli:
2309 # self.cr.value = self.cr_backup
2312 self
.svp64_reset_loop()
2315 # check advancement of src/dst/sub-steps and if PC needs updating
2316 nia_update
= (yield from self
.check_step_increment(rc_en
,
2319 self
.update_pc_next()
2321 def check_replace_d(self
, info
, remap_active
):
2322 replace_d
= False # update / replace constant in pseudocode
2323 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2324 vl
= self
.svstate
.vl
2325 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2326 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2327 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2328 if info
.form
== 'DS':
2329 # DS-Form, multiply by 4 then knock 2 bits off after
2330 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2332 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2333 imm
= exts(imm
, 16) # sign-extend to integer
2334 # get the right step. LD is from srcstep, ST is dststep
2335 op
= yield self
.dec2
.e
.do
.insn_type
2337 if op
== MicrOp
.OP_LOAD
.value
:
2339 offsmul
= yield self
.dec2
.in1_step
2340 log("D-field REMAP src", imm
, offsmul
, ldstmode
)
2342 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2343 log("D-field src", imm
, offsmul
, ldstmode
)
2344 elif op
== MicrOp
.OP_STORE
.value
:
2345 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2346 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2347 log("D-field dst", imm
, offsmul
, ldstmode
)
2348 # Unit-Strided LD/ST adds offset*width to immediate
2349 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2350 ldst_len
= yield self
.dec2
.e
.do
.data_len
2351 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2353 # Element-strided multiplies the immediate by element step
2354 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2355 imm
= SelectableInt(imm
* offsmul
, 32)
2358 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2359 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2360 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2361 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2362 # new replacement D... errr.. DS
2364 if info
.form
== 'DS':
2365 # TODO: assert 2 LSBs are zero?
2366 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2367 imm
.value
= imm
.value
>> 2
2368 self
.namespace
['DS'] = imm
2370 self
.namespace
['D'] = imm
2372 def get_input(self
, name
, ew_src
):
2373 # using PowerDecoder2, first, find the decoder index.
2374 # (mapping name RA RB RC RS to in1, in2, in3)
2375 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2377 # doing this is not part of svp64, it's because output
2378 # registers, to be modified, need to be in the namespace.
2379 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2381 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2383 if isinstance(regnum
, tuple):
2384 (regnum
, base
, offs
) = regnum
2386 base
, offs
= regnum
, 0 # temporary HACK
2388 # in case getting the register number is needed, _RA, _RB
2389 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2390 regname
= "_" + name
2391 if not self
.is_svp64_mode
or ew_src
== 64:
2392 self
.namespace
[regname
] = regnum
2393 elif regname
in self
.namespace
:
2394 del self
.namespace
[regname
]
2396 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2397 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2399 reg_val
= SelectableInt(self
.fpr(base
, is_vec
, offs
, ew_src
))
2400 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2401 self
.trace("r:FPR:%d:%d:%d " % (base
, offs
, ew_src
))
2402 elif name
is not None:
2403 reg_val
= SelectableInt(self
.gpr(base
, is_vec
, offs
, ew_src
))
2404 self
.trace("r:GPR:%d:%d:%d " % (base
, offs
, ew_src
))
2405 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2407 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2408 reg_val
= SelectableInt(0, ew_src
)
2411 def remap_set_steps(self
, remaps
):
2412 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2413 they work in concert with PowerDecoder2 at the moment,
2414 there is no HDL implementation of REMAP. therefore this
2415 function, because ISACaller still uses PowerDecoder2,
2416 will *explicitly* write the dec2.XX_step values. this has
2419 # just some convenient debug info
2421 sname
= 'SVSHAPE%d' % i
2422 shape
= self
.spr
[sname
]
2423 log(sname
, bin(shape
.value
))
2424 log(" lims", shape
.lims
)
2425 log(" mode", shape
.mode
)
2426 log(" skip", shape
.skip
)
2428 # set up the list of steps to remap
2429 mi0
= self
.svstate
.mi0
2430 mi1
= self
.svstate
.mi1
2431 mi2
= self
.svstate
.mi2
2432 mo0
= self
.svstate
.mo0
2433 mo1
= self
.svstate
.mo1
2434 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2435 [self
.dec2
.in2_step
, mi1
], # RB
2436 [self
.dec2
.in3_step
, mi2
], # RC
2437 [self
.dec2
.o_step
, mo0
], # RT
2438 [self
.dec2
.o2_step
, mo1
], # EA
2441 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2442 for i
, reg
in enumerate(rnames
):
2443 idx
= yield from get_idx_map(self
.dec2
, reg
)
2445 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2447 steps
[i
][0] = self
.dec2
.in1_step
2449 steps
[i
][0] = self
.dec2
.in2_step
2451 steps
[i
][0] = self
.dec2
.in3_step
2452 log("remap step", i
, reg
, idx
, steps
[i
][1])
2453 remap_idxs
= self
.remap_idxs
2455 # now cross-index the required SHAPE for each of 3-in 2-out regs
2456 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2457 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2458 (shape
, remap
) = remaps
[shape_idx
]
2459 remap_idx
= remap_idxs
[shape_idx
]
2460 # zero is "disabled"
2461 if shape
.value
== 0x0:
2463 # now set the actual requested step to the current index
2464 if dstep
is not None:
2465 yield dstep
.eq(remap_idx
)
2467 # debug printout info
2468 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2469 i
, rnames
[i
], shape_idx
, remap_idx
))
2471 log("shape remap", x
)
2473 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2474 if name
== 'overflow': # ignore, done already (above)
2476 if name
== 'CR0': # ignore, done already (above)
2478 if isinstance(output
, int):
2479 output
= SelectableInt(output
, EFFECTIVELY_UNLIMITED
)
2481 if name
in ['FPSCR', ]:
2482 log("write FPSCR 0x%x" % (output
.value
))
2483 self
.FPSCR
.eq(output
)
2486 if name
in ['CA', 'CA32']:
2488 log("writing %s to XER" % name
, output
)
2489 log("write XER %s 0x%x" % (name
, output
.value
))
2490 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2492 log("NOT writing %s to XER" % name
, output
)
2494 # write special SPRs
2495 if name
in info
.special_regs
:
2496 log('writing special %s' % name
, output
, special_sprs
)
2497 log("write reg %s 0x%x" % (name
, output
.value
))
2498 if name
in special_sprs
:
2499 self
.spr
[name
] = output
2501 self
.namespace
[name
].eq(output
)
2503 log('msr written', hex(self
.msr
.value
))
2505 # find out1/out2 PR/FPR
2506 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2508 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2510 # temporary hack for not having 2nd output
2511 regnum
= yield getattr(self
.decoder
, name
)
2513 # convenient debug prefix
2518 # check zeroing due to predicate bit being zero
2519 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2520 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2521 output
= SelectableInt(0, EFFECTIVELY_UNLIMITED
)
2522 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2523 output
.value
, ew_dst
),
2524 kind
=LogKind
.InstrInOuts
)
2525 # zero-extend tov64 bit begore storing (should use EXT oh well)
2526 if output
.bits
> 64:
2527 output
= SelectableInt(output
.value
, 64)
2528 rnum
, base
, offset
= regnum
2530 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2531 self
.trace("w:FPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2533 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2534 self
.trace("w:GPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2536 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2537 # check if it is the SVSTATE.src/dest step that needs incrementing
2538 # this is our Sub-Program-Counter loop from 0 to VL-1
2539 if not self
.allow_next_step_inc
:
2540 if self
.is_svp64_mode
:
2541 return (yield from self
.svstate_post_inc(ins_name
))
2543 # XXX only in non-SVP64 mode!
2544 # record state of whether the current operation was an svshape,
2546 # to be able to know if it should apply in the next instruction.
2547 # also (if going to use this instruction) should disable ability
2548 # to interrupt in between. sigh.
2549 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2556 log("SVSTATE_NEXT: inc requested, mode",
2557 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2558 yield from self
.svstate_pre_inc()
2559 pre
= yield from self
.update_new_svstate_steps()
2561 # reset at end of loop including exit Vertical Mode
2562 log("SVSTATE_NEXT: end of loop, reset")
2563 self
.svp64_reset_loop()
2564 self
.svstate
.vfirst
= 0
2568 self
.handle_comparison(SelectableInt(0, 64)) # CR0
2570 if self
.allow_next_step_inc
== 2:
2571 log("SVSTATE_NEXT: read")
2572 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
2574 log("SVSTATE_NEXT: post-inc")
2575 # use actual (cached) src/dst-step here to check end
2576 remaps
= self
.get_remap_indices()
2577 remap_idxs
= self
.remap_idxs
2578 vl
= self
.svstate
.vl
2579 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2580 if self
.allow_next_step_inc
!= 2:
2581 yield from self
.advance_svstate_steps()
2582 #self.namespace['SVSTATE'] = self.svstate.spr
2583 # set CR0 (if Rc=1) based on end
2584 endtest
= 1 if self
.at_loopend() else 0
2586 #results = [SelectableInt(endtest, 64)]
2587 # self.handle_comparison(results) # CR0
2589 # see if svstep was requested, if so, which SVSTATE
2591 if self
.svstate_next_mode
> 0:
2592 shape_idx
= self
.svstate_next_mode
.value
-1
2593 endings
= self
.remap_loopends
[shape_idx
]
2594 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
2595 log("svstep Rc=1, CR0", cr_field
, endtest
)
2596 self
.crl
[0].eq(cr_field
) # CR0
2598 # reset at end of loop including exit Vertical Mode
2599 log("SVSTATE_NEXT: after increments, reset")
2600 self
.svp64_reset_loop()
2601 self
.svstate
.vfirst
= 0
2604 def SVSTATE_NEXT(self
, mode
, submode
):
2605 """explicitly moves srcstep/dststep on to next element, for
2606 "Vertical-First" mode. this function is called from
2607 setvl pseudo-code, as a pseudo-op "svstep"
2609 WARNING: this function uses information that was created EARLIER
2610 due to it being in the middle of a yield, but this function is
2611 *NOT* called from yield (it's called from compiled pseudocode).
2613 self
.allow_next_step_inc
= submode
.value
+ 1
2614 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
2615 self
.svstate_next_mode
= mode
2616 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
2617 shape_idx
= self
.svstate_next_mode
.value
-1
2618 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
2619 if self
.svstate_next_mode
== 5:
2620 self
.svstate_next_mode
= 0
2621 return SelectableInt(self
.svstate
.srcstep
, 7)
2622 if self
.svstate_next_mode
== 6:
2623 self
.svstate_next_mode
= 0
2624 return SelectableInt(self
.svstate
.dststep
, 7)
2625 if self
.svstate_next_mode
== 7:
2626 self
.svstate_next_mode
= 0
2627 return SelectableInt(self
.svstate
.ssubstep
, 7)
2628 if self
.svstate_next_mode
== 8:
2629 self
.svstate_next_mode
= 0
2630 return SelectableInt(self
.svstate
.dsubstep
, 7)
2631 return SelectableInt(0, 7)
2633 def get_src_dststeps(self
):
2634 """gets srcstep, dststep, and ssubstep, dsubstep
2636 return (self
.new_srcstep
, self
.new_dststep
,
2637 self
.new_ssubstep
, self
.new_dsubstep
)
2639 def update_svstate_namespace(self
, overwrite_svstate
=True):
2640 if overwrite_svstate
:
2641 # note, do not get the bit-reversed srcstep here!
2642 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2643 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2645 # update SVSTATE with new srcstep
2646 self
.svstate
.srcstep
= srcstep
2647 self
.svstate
.dststep
= dststep
2648 self
.svstate
.ssubstep
= ssubstep
2649 self
.svstate
.dsubstep
= dsubstep
2650 self
.namespace
['SVSTATE'] = self
.svstate
2651 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2652 yield Settle() # let decoder update
2654 def update_new_svstate_steps(self
, overwrite_svstate
=True):
2655 yield from self
.update_svstate_namespace(overwrite_svstate
)
2656 srcstep
= self
.svstate
.srcstep
2657 dststep
= self
.svstate
.dststep
2658 ssubstep
= self
.svstate
.ssubstep
2659 dsubstep
= self
.svstate
.dsubstep
2660 pack
= self
.svstate
.pack
2661 unpack
= self
.svstate
.unpack
2662 vl
= self
.svstate
.vl
2663 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2664 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2665 rm_mode
= yield self
.dec2
.rm_dec
.mode
2666 ff_inv
= yield self
.dec2
.rm_dec
.inv
2667 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2668 log(" srcstep", srcstep
)
2669 log(" dststep", dststep
)
2671 log(" unpack", unpack
)
2672 log(" ssubstep", ssubstep
)
2673 log(" dsubstep", dsubstep
)
2675 log(" subvl", subvl
)
2676 log(" rm_mode", rm_mode
)
2677 log(" sv_mode", sv_mode
)
2679 log(" cr_bit", cr_bit
)
2681 # check if end reached (we let srcstep overrun, above)
2682 # nothing needs doing (TODO zeroing): just do next instruction
2685 return ((ssubstep
== subvl
and srcstep
== vl
) or
2686 (dsubstep
== subvl
and dststep
== vl
))
2688 def svstate_post_inc(self
, insn_name
, vf
=0):
2689 # check if SV "Vertical First" mode is enabled
2690 vfirst
= self
.svstate
.vfirst
2691 log(" SV Vertical First", vf
, vfirst
)
2692 if not vf
and vfirst
== 1:
2696 # check if it is the SVSTATE.src/dest step that needs incrementing
2697 # this is our Sub-Program-Counter loop from 0 to VL-1
2698 # XXX twin predication TODO
2699 vl
= self
.svstate
.vl
2700 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2701 mvl
= self
.svstate
.maxvl
2702 srcstep
= self
.svstate
.srcstep
2703 dststep
= self
.svstate
.dststep
2704 ssubstep
= self
.svstate
.ssubstep
2705 dsubstep
= self
.svstate
.dsubstep
2706 pack
= self
.svstate
.pack
2707 unpack
= self
.svstate
.unpack
2708 rm_mode
= yield self
.dec2
.rm_dec
.mode
2709 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
2710 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
2711 out_vec
= not (yield self
.dec2
.no_out_vec
)
2712 in_vec
= not (yield self
.dec2
.no_in_vec
)
2713 log(" svstate.vl", vl
)
2714 log(" svstate.mvl", mvl
)
2715 log(" rm.subvl", subvl
)
2716 log(" svstate.srcstep", srcstep
)
2717 log(" svstate.dststep", dststep
)
2718 log(" svstate.ssubstep", ssubstep
)
2719 log(" svstate.dsubstep", dsubstep
)
2720 log(" svstate.pack", pack
)
2721 log(" svstate.unpack", unpack
)
2722 log(" mode", rm_mode
)
2723 log(" reverse", reverse_gear
)
2724 log(" out_vec", out_vec
)
2725 log(" in_vec", in_vec
)
2726 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPType
.P2
.value
)
2727 # check if this was an sv.bc* and if so did it succeed
2728 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
2729 end_loop
= self
.namespace
['end_loop']
2730 log("branch %s end_loop" % insn_name
, end_loop
)
2732 self
.svp64_reset_loop()
2733 self
.update_pc_next()
2735 # check if srcstep needs incrementing by one, stop PC advancing
2736 # but for 2-pred both src/dest have to be checked.
2737 # XXX this might not be true! it may just be LD/ST
2738 if sv_ptype
== SVPType
.P2
.value
:
2739 svp64_is_vector
= (out_vec
or in_vec
)
2741 svp64_is_vector
= out_vec
2742 # loops end at the first "hit" (source or dest)
2743 yield from self
.advance_svstate_steps()
2744 loopend
= self
.loopend
2745 log("loopend", svp64_is_vector
, loopend
)
2746 if not svp64_is_vector
or loopend
:
2747 # reset loop to zero and update NIA
2748 self
.svp64_reset_loop()
2753 # still looping, advance and update NIA
2754 self
.namespace
['SVSTATE'] = self
.svstate
2756 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
2757 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
2758 # this way we keep repeating the same instruction (with new steps)
2759 self
.pc
.NIA
.value
= self
.pc
.CIA
.value
2760 self
.namespace
['NIA'] = self
.pc
.NIA
2761 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
2762 return False # DO NOT allow PC update whilst Sub-PC loop running
2764 def update_pc_next(self
):
2765 # UPDATE program counter
2766 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2767 #self.svstate.spr = self.namespace['SVSTATE']
2768 log("end of call", self
.namespace
['CIA'],
2769 self
.namespace
['NIA'],
2770 self
.namespace
['SVSTATE'])
2772 def svp64_reset_loop(self
):
2773 self
.svstate
.srcstep
= 0
2774 self
.svstate
.dststep
= 0
2775 self
.svstate
.ssubstep
= 0
2776 self
.svstate
.dsubstep
= 0
2777 self
.loopend
= False
2778 log(" svstate.srcstep loop end (PC to update)")
2779 self
.namespace
['SVSTATE'] = self
.svstate
2781 def update_nia(self
):
2782 self
.pc
.update_nia(self
.is_svp64_mode
)
2783 self
.namespace
['NIA'] = self
.pc
.NIA
2787 """Decorator factory.
2789 this decorator will "inject" variables into the function's namespace,
2790 from the *dictionary* in self.namespace. it therefore becomes possible
2791 to make it look like a whole stack of variables which would otherwise
2792 need "self." inserted in front of them (*and* for those variables to be
2793 added to the instance) "appear" in the function.
2795 "self.namespace['SI']" for example becomes accessible as just "SI" but
2796 *only* inside the function, when decorated.
2798 def variable_injector(func
):
2800 def decorator(*args
, **kwargs
):
2802 func_globals
= func
.__globals
__ # Python 2.6+
2803 except AttributeError:
2804 func_globals
= func
.func_globals
# Earlier versions.
2806 context
= args
[0].namespace
# variables to be injected
2807 saved_values
= func_globals
.copy() # Shallow copy of dict.
2808 log("globals before", context
.keys())
2809 func_globals
.update(context
)
2810 result
= func(*args
, **kwargs
)
2811 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
2812 log("args[0]", args
[0].namespace
['CIA'],
2813 args
[0].namespace
['NIA'],
2814 args
[0].namespace
['SVSTATE'])
2815 if 'end_loop' in func_globals
:
2816 log("args[0] end_loop", func_globals
['end_loop'])
2817 args
[0].namespace
= func_globals
2818 #exec (func.__code__, func_globals)
2821 # func_globals = saved_values # Undo changes.
2827 return variable_injector