c070337b96677f05cc34e0f8c47759a1fda4d218
1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
20 from nmigen
.sim
import Settle
21 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
22 SVP64CROffs
, SVP64MODEb
)
23 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
25 from openpower
.decoder
.isa
.mem
import Mem
, MemMMap
, MemException
26 from openpower
.decoder
.isa
.radixmmu
import RADIX
27 from openpower
.decoder
.isa
.svshape
import SVSHAPE
28 from openpower
.decoder
.isa
.svstate
import SVP64State
29 from openpower
.decoder
.orderedset
import OrderedSet
30 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
31 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
32 MicrOp
, OutSel
, SVMode
,
33 SVP64LDSTmode
, SVP64PredCR
,
34 SVP64PredInt
, SVP64PredMode
,
35 SVP64RMMode
, SVPType
, XER_bits
,
36 insns
, spr_byname
, spr_dict
,
38 from openpower
.insndb
.core
import SVP64Instruction
39 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
40 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
41 SelectableInt
, selectconcat
,
42 EFFECTIVELY_UNLIMITED
)
43 from openpower
.fpscr
import FPSCRState
44 from openpower
.xer
import XERState
45 from openpower
.util
import LogKind
, log
47 LDST_UPDATE_INSNS
= ['ldu', 'lwzu', 'lbzu', 'lhzu', 'lhau', 'lfsu', 'lfdu',
48 'stwu', 'stbu', 'sthu', 'stfsu', 'stfdu', 'stdu',
52 instruction_info
= namedtuple('instruction_info',
53 'func read_regs uninit_regs write_regs ' +
54 'special_regs op_fields form asmregs')
64 # rrright. this is here basically because the compiler pywriter returns
65 # results in a specific priority order. to make sure regs match up they
66 # need partial sorting. sigh.
68 # TODO (lkcl): adjust other registers that should be in a particular order
69 # probably CA, CA32, and CR
97 "overflow": 7, # should definitely be last
101 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
104 def get_masked_reg(regs
, base
, offs
, ew_bits
):
105 # rrrright. start by breaking down into row/col, based on elwidth
106 gpr_offs
= offs
// (64 // ew_bits
)
107 gpr_col
= offs
% (64 // ew_bits
)
108 # compute the mask based on ew_bits
109 mask
= (1 << ew_bits
) - 1
110 # now select the 64-bit register, but get its value (easier)
111 val
= regs
[base
+ gpr_offs
]
112 # shift down so element we want is at LSB
113 val
>>= gpr_col
* ew_bits
114 # mask so we only return the LSB element
118 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
119 # rrrright. start by breaking down into row/col, based on elwidth
120 gpr_offs
= offs
// (64//ew_bits
)
121 gpr_col
= offs
% (64//ew_bits
)
122 # compute the mask based on ew_bits
123 mask
= (1 << ew_bits
)-1
124 # now select the 64-bit register, but get its value (easier)
125 val
= regs
[base
+gpr_offs
]
126 # now mask out the bit we don't want
127 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
128 # then wipe the bit we don't want from the value
130 # OR the new value in, shifted up
131 val |
= value
<< (gpr_col
*ew_bits
)
132 regs
[base
+gpr_offs
] = val
135 def create_args(reglist
, extra
=None):
136 retval
= list(OrderedSet(reglist
))
137 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
138 if extra
is not None:
139 return [extra
] + retval
144 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
147 self
.isacaller
= isacaller
148 self
.svstate
= svstate
149 for i
in range(len(regfile
)):
150 self
[i
] = SelectableInt(regfile
[i
], 64)
152 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
153 if isinstance(ridx
, SelectableInt
):
156 return self
[ridx
+offs
]
157 # rrrright. start by breaking down into row/col, based on elwidth
158 gpr_offs
= offs
// (64//elwidth
)
159 gpr_col
= offs
% (64//elwidth
)
160 # now select the 64-bit register, but get its value (easier)
161 val
= self
[ridx
+gpr_offs
].value
162 # now shift down and mask out
163 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
164 # finally, return a SelectableInt at the required elwidth
165 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
166 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
167 return SelectableInt(val
, elwidth
)
169 def set_form(self
, form
):
172 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
174 if isinstance(rnum
, SelectableInt
):
176 if isinstance(value
, SelectableInt
):
179 if isinstance(rnum
, tuple):
180 rnum
, base
, offs
= rnum
183 # rrrright. start by breaking down into row/col, based on elwidth
184 gpr_offs
= offs
// (64//elwidth
)
185 gpr_col
= offs
% (64//elwidth
)
186 # compute the mask based on elwidth
187 mask
= (1 << elwidth
)-1
188 # now select the 64-bit register, but get its value (easier)
189 val
= self
[base
+gpr_offs
].value
190 # now mask out the bit we don't want
191 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
192 # then wipe the bit we don't want from the value
194 # OR the new value in, shifted up
195 val |
= value
<< (gpr_col
*elwidth
)
196 # finally put the damn value into the regfile
197 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
198 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
200 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
202 def __setitem__(self
, rnum
, value
):
203 # rnum = rnum.value # only SelectableInt allowed
204 log("GPR setitem", rnum
, value
)
205 if isinstance(rnum
, SelectableInt
):
207 dict.__setitem
__(self
, rnum
, value
)
209 def getz(self
, rnum
):
210 # rnum = rnum.value # only SelectableInt allowed
211 log("GPR getzero?", rnum
)
213 return SelectableInt(0, 64)
216 def _get_regnum(self
, attr
):
217 getform
= self
.sd
.sigforms
[self
.form
]
218 rnum
= getattr(getform
, attr
)
221 def ___getitem__(self
, attr
):
222 """ XXX currently not used
224 rnum
= self
._get
_regnum
(attr
)
225 log("GPR getitem", attr
, rnum
)
226 return self
.regfile
[rnum
]
228 def dump(self
, printout
=True):
230 for i
in range(len(self
)):
231 res
.append(self
[i
].value
)
233 for i
in range(0, len(res
), 8):
236 s
.append("%08x" % res
[i
+j
])
238 print("reg", "%2d" % i
, s
)
243 def __init__(self
, dec2
, initial_sprs
={}):
246 for key
, v
in initial_sprs
.items():
247 if isinstance(key
, SelectableInt
):
249 key
= special_sprs
.get(key
, key
)
250 if isinstance(key
, int):
253 info
= spr_byname
[key
]
254 if not isinstance(v
, SelectableInt
):
255 v
= SelectableInt(v
, info
.length
)
258 def __getitem__(self
, key
):
260 #log("dict", self.items())
261 # if key in special_sprs get the special spr, otherwise return key
262 if isinstance(key
, SelectableInt
):
264 if isinstance(key
, int):
265 key
= spr_dict
[key
].SPR
266 key
= special_sprs
.get(key
, key
)
267 if key
== 'HSRR0': # HACK!
269 if key
== 'HSRR1': # HACK!
272 res
= dict.__getitem
__(self
, key
)
274 if isinstance(key
, int):
277 info
= spr_byname
[key
]
278 self
[key
] = SelectableInt(0, info
.length
)
279 res
= dict.__getitem
__(self
, key
)
280 #log("spr returning", key, res)
283 def __setitem__(self
, key
, value
):
284 if isinstance(key
, SelectableInt
):
286 if isinstance(key
, int):
287 key
= spr_dict
[key
].SPR
289 key
= special_sprs
.get(key
, key
)
290 if key
== 'HSRR0': # HACK!
291 self
.__setitem
__('SRR0', value
)
292 if key
== 'HSRR1': # HACK!
293 self
.__setitem
__('SRR1', value
)
295 value
= XERState(value
)
296 log("setting spr", key
, value
)
297 dict.__setitem
__(self
, key
, value
)
299 def __call__(self
, ridx
):
302 def dump(self
, printout
=True):
304 keys
= list(self
.keys())
307 sprname
= spr_dict
.get(k
, None)
311 sprname
= sprname
.SPR
312 res
.append((sprname
, self
[k
].value
))
314 for sprname
, value
in res
:
315 print(" ", sprname
, hex(value
))
320 def __init__(self
, pc_init
=0):
321 self
.CIA
= SelectableInt(pc_init
, 64)
322 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
324 def update_nia(self
, is_svp64
):
325 increment
= 8 if is_svp64
else 4
326 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
328 def update(self
, namespace
, is_svp64
):
329 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
331 self
.CIA
= namespace
['NIA'].narrow(64)
332 self
.update_nia(is_svp64
)
333 namespace
['CIA'] = self
.CIA
334 namespace
['NIA'] = self
.NIA
338 # See PowerISA Version 3.0 B Book 1
339 # Section 2.3.1 Condition Register pages 30 - 31
341 LT
= FL
= 0 # negative, less than, floating-point less than
342 GT
= FG
= 1 # positive, greater than, floating-point greater than
343 EQ
= FE
= 2 # equal, floating-point equal
344 SO
= FU
= 3 # summary overflow, floating-point unordered
346 def __init__(self
, init
=0):
347 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
348 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
349 self
.cr
= SelectableInt(init
, 64) # underlying reg
350 # field-selectable versions of Condition Register TODO check bitranges?
353 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
354 _cr
= FieldSelectableInt(self
.cr
, bits
)
358 # decode SVP64 predicate integer to reg number and invert
359 def get_predint(gpr
, mask
):
363 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
364 if mask
== SVP64PredInt
.ALWAYS
.value
:
365 return 0xffff_ffff_ffff_ffff # 64 bits of 1
366 if mask
== SVP64PredInt
.R3_UNARY
.value
:
367 return 1 << (r3
.value
& 0b111111)
368 if mask
== SVP64PredInt
.R3
.value
:
370 if mask
== SVP64PredInt
.R3_N
.value
:
372 if mask
== SVP64PredInt
.R10
.value
:
374 if mask
== SVP64PredInt
.R10_N
.value
:
376 if mask
== SVP64PredInt
.R30
.value
:
378 if mask
== SVP64PredInt
.R30_N
.value
:
382 # decode SVP64 predicate CR to reg number and invert status
383 def _get_predcr(mask
):
384 if mask
== SVP64PredCR
.LT
.value
:
386 if mask
== SVP64PredCR
.GE
.value
:
388 if mask
== SVP64PredCR
.GT
.value
:
390 if mask
== SVP64PredCR
.LE
.value
:
392 if mask
== SVP64PredCR
.EQ
.value
:
394 if mask
== SVP64PredCR
.NE
.value
:
396 if mask
== SVP64PredCR
.SO
.value
:
398 if mask
== SVP64PredCR
.NS
.value
:
402 # read individual CR fields (0..VL-1), extract the required bit
403 # and construct the mask
404 def get_predcr(crl
, mask
, vl
):
405 idx
, noninv
= _get_predcr(mask
)
408 cr
= crl
[i
+SVP64CROffs
.CRPred
]
409 if cr
[idx
].value
== noninv
:
414 # TODO, really should just be using PowerDecoder2
415 def get_idx_map(dec2
, name
):
417 in1_sel
= yield op
.in1_sel
418 in2_sel
= yield op
.in2_sel
419 in3_sel
= yield op
.in3_sel
420 in1
= yield dec2
.e
.read_reg1
.data
421 # identify which regnames map to in1/2/3
422 if name
== 'RA' or name
== 'RA_OR_ZERO':
423 if (in1_sel
== In1Sel
.RA
.value
or
424 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
426 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
429 if in2_sel
== In2Sel
.RB
.value
:
431 if in3_sel
== In3Sel
.RB
.value
:
433 # XXX TODO, RC doesn't exist yet!
435 if in3_sel
== In3Sel
.RC
.value
:
437 elif name
in ['EA', 'RS']:
438 if in1_sel
== In1Sel
.RS
.value
:
440 if in2_sel
== In2Sel
.RS
.value
:
442 if in3_sel
== In3Sel
.RS
.value
:
445 if in1_sel
== In1Sel
.FRA
.value
:
447 if in3_sel
== In3Sel
.FRA
.value
:
450 if in2_sel
== In2Sel
.FRB
.value
:
453 if in3_sel
== In3Sel
.FRC
.value
:
456 if in1_sel
== In1Sel
.FRS
.value
:
458 if in3_sel
== In3Sel
.FRS
.value
:
461 if in1_sel
== In1Sel
.FRT
.value
:
464 if in1_sel
== In1Sel
.RT
.value
:
469 # TODO, really should just be using PowerDecoder2
470 def get_idx_in(dec2
, name
, ewmode
=False):
471 idx
= yield from get_idx_map(dec2
, name
)
475 in1_sel
= yield op
.in1_sel
476 in2_sel
= yield op
.in2_sel
477 in3_sel
= yield op
.in3_sel
478 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
479 in1
= yield dec2
.e
.read_reg1
.data
480 in2
= yield dec2
.e
.read_reg2
.data
481 in3
= yield dec2
.e
.read_reg3
.data
483 in1_base
= yield dec2
.e
.read_reg1
.base
484 in2_base
= yield dec2
.e
.read_reg2
.base
485 in3_base
= yield dec2
.e
.read_reg3
.base
486 in1_offs
= yield dec2
.e
.read_reg1
.offs
487 in2_offs
= yield dec2
.e
.read_reg2
.offs
488 in3_offs
= yield dec2
.e
.read_reg3
.offs
489 in1
= (in1
, in1_base
, in1_offs
)
490 in2
= (in2
, in2_base
, in2_offs
)
491 in3
= (in3
, in3_base
, in3_offs
)
493 in1_isvec
= yield dec2
.in1_isvec
494 in2_isvec
= yield dec2
.in2_isvec
495 in3_isvec
= yield dec2
.in3_isvec
496 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
498 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
500 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
502 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
504 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
506 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
509 return in1
, in1_isvec
511 return in2
, in2_isvec
513 return in3
, in3_isvec
517 # TODO, really should just be using PowerDecoder2
518 def get_cr_in(dec2
, name
):
520 in_sel
= yield op
.cr_in
521 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
522 sv_cr_in
= yield op
.sv_cr_in
523 spec
= yield dec2
.crin_svdec
.spec
524 sv_override
= yield dec2
.dec_cr_in
.sv_override
525 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
526 in1
= yield dec2
.e
.read_cr1
.data
527 cr_isvec
= yield dec2
.cr_in_isvec
528 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
529 log(" sv_cr_in", sv_cr_in
)
530 log(" cr_bf", in_bitfield
)
532 log(" override", sv_override
)
533 # identify which regnames map to in / o2
535 if in_sel
== CRInSel
.BI
.value
:
537 log("get_cr_in not found", name
)
541 # TODO, really should just be using PowerDecoder2
542 def get_cr_out(dec2
, name
):
544 out_sel
= yield op
.cr_out
545 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
546 sv_cr_out
= yield op
.sv_cr_out
547 spec
= yield dec2
.crout_svdec
.spec
548 sv_override
= yield dec2
.dec_cr_out
.sv_override
549 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
550 out
= yield dec2
.e
.write_cr
.data
551 o_isvec
= yield dec2
.cr_out_isvec
552 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
553 log(" sv_cr_out", sv_cr_out
)
554 log(" cr_bf", out_bitfield
)
556 log(" override", sv_override
)
557 # identify which regnames map to out / o2
559 if out_sel
== CROutSel
.BF
.value
:
562 if out_sel
== CROutSel
.CR0
.value
:
564 if name
== 'CR1': # these are not actually calculated correctly
565 if out_sel
== CROutSel
.CR1
.value
:
567 # check RC1 set? if so return implicit vector, this is a REAL bad hack
568 RC1
= yield dec2
.rm_dec
.RC1
570 log("get_cr_out RC1 mode")
572 return 0, True # XXX TODO: offset CR0 from SVSTATE SPR
574 return 1, True # XXX TODO: offset CR1 from SVSTATE SPR
576 log("get_cr_out not found", name
)
580 # TODO, really should just be using PowerDecoder2
581 def get_out_map(dec2
, name
):
583 out_sel
= yield op
.out_sel
584 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
585 out
= yield dec2
.e
.write_reg
.data
586 # identify which regnames map to out / o2
588 if out_sel
== OutSel
.RA
.value
:
591 if out_sel
== OutSel
.RT
.value
:
593 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
595 elif name
== 'RT_OR_ZERO':
596 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
599 if out_sel
== OutSel
.FRA
.value
:
602 if out_sel
== OutSel
.FRS
.value
:
605 if out_sel
== OutSel
.FRT
.value
:
610 # TODO, really should just be using PowerDecoder2
611 def get_idx_out(dec2
, name
, ewmode
=False):
613 out_sel
= yield op
.out_sel
614 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
615 out
= yield dec2
.e
.write_reg
.data
616 o_isvec
= yield dec2
.o_isvec
618 offs
= yield dec2
.e
.write_reg
.offs
619 base
= yield dec2
.e
.write_reg
.base
620 out
= (out
, base
, offs
)
621 # identify which regnames map to out / o2
622 ismap
= yield from get_out_map(dec2
, name
)
624 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
626 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
630 # TODO, really should just be using PowerDecoder2
631 def get_out2_map(dec2
, name
):
632 # check first if register is activated for write
634 out_sel
= yield op
.out_sel
635 out
= yield dec2
.e
.write_ea
.data
636 out_ok
= yield dec2
.e
.write_ea
.ok
640 if name
in ['EA', 'RA']:
641 if hasattr(op
, "upd"):
642 # update mode LD/ST uses read-reg A also as an output
644 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
645 out_sel
, OutSel
.RA
.value
,
647 if upd
== LDSTMode
.update
.value
:
650 fft_en
= yield dec2
.implicit_rs
652 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
656 fft_en
= yield dec2
.implicit_rs
658 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
664 # TODO, really should just be using PowerDecoder2
665 def get_idx_out2(dec2
, name
, ewmode
=False):
666 # check first if register is activated for write
668 out_sel
= yield op
.out_sel
669 out
= yield dec2
.e
.write_ea
.data
671 offs
= yield dec2
.e
.write_ea
.offs
672 base
= yield dec2
.e
.write_ea
.base
673 out
= (out
, base
, offs
)
674 o_isvec
= yield dec2
.o2_isvec
675 ismap
= yield from get_out2_map(dec2
, name
)
677 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
683 """deals with svstate looping.
686 def __init__(self
, svstate
):
687 self
.svstate
= svstate
690 def new_iterators(self
):
691 self
.src_it
= self
.src_iterator()
692 self
.dst_it
= self
.dst_iterator()
696 self
.new_ssubstep
= 0
697 self
.new_dsubstep
= 0
698 self
.pred_dst_zero
= 0
699 self
.pred_src_zero
= 0
701 def src_iterator(self
):
702 """source-stepping iterator
704 pack
= self
.svstate
.pack
708 # pack advances subvl in *outer* loop
709 while True: # outer subvl loop
710 while True: # inner vl loop
713 srcmask
= self
.srcmask
714 srcstep
= self
.svstate
.srcstep
715 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
716 if self
.pred_sz
or pred_src_zero
:
717 self
.pred_src_zero
= not pred_src_zero
718 log(" advance src", srcstep
, vl
,
719 self
.svstate
.ssubstep
, subvl
)
720 # yield actual substep/srcstep
721 yield (self
.svstate
.ssubstep
, srcstep
)
722 # the way yield works these could have been modified.
725 srcstep
= self
.svstate
.srcstep
726 log(" advance src check", srcstep
, vl
,
727 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
728 self
.svstate
.ssubstep
== subvl
)
729 if srcstep
== vl
-1: # end-point
730 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
731 if self
.svstate
.ssubstep
== subvl
: # end-point
732 log(" advance pack stop")
734 break # exit inner loop
735 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
737 if self
.svstate
.ssubstep
== subvl
: # end-point
738 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
739 log(" advance pack stop")
741 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
744 # these cannot be done as for-loops because SVSTATE may change
745 # (srcstep/substep may be modified, interrupted, subvl/vl change)
746 # but they *can* be done as while-loops as long as every SVSTATE
747 # "thing" is re-read every single time a yield gives indices
748 while True: # outer vl loop
749 while True: # inner subvl loop
752 srcmask
= self
.srcmask
753 srcstep
= self
.svstate
.srcstep
754 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
755 if self
.pred_sz
or pred_src_zero
:
756 self
.pred_src_zero
= not pred_src_zero
757 log(" advance src", srcstep
, vl
,
758 self
.svstate
.ssubstep
, subvl
)
759 # yield actual substep/srcstep
760 yield (self
.svstate
.ssubstep
, srcstep
)
761 if self
.svstate
.ssubstep
== subvl
: # end-point
762 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
763 break # exit inner loop
764 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
766 if srcstep
== vl
-1: # end-point
767 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
770 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
772 def dst_iterator(self
):
773 """dest-stepping iterator
775 unpack
= self
.svstate
.unpack
779 # pack advances subvl in *outer* loop
780 while True: # outer subvl loop
781 while True: # inner vl loop
784 dstmask
= self
.dstmask
785 dststep
= self
.svstate
.dststep
786 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
787 if self
.pred_dz
or pred_dst_zero
:
788 self
.pred_dst_zero
= not pred_dst_zero
789 log(" advance dst", dststep
, vl
,
790 self
.svstate
.dsubstep
, subvl
)
791 # yield actual substep/dststep
792 yield (self
.svstate
.dsubstep
, dststep
)
793 # the way yield works these could have been modified.
795 dststep
= self
.svstate
.dststep
796 log(" advance dst check", dststep
, vl
,
797 self
.svstate
.ssubstep
, subvl
)
798 if dststep
== vl
-1: # end-point
799 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
800 if self
.svstate
.dsubstep
== subvl
: # end-point
801 log(" advance unpack stop")
804 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
806 if self
.svstate
.dsubstep
== subvl
: # end-point
807 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
808 log(" advance unpack stop")
810 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
812 # these cannot be done as for-loops because SVSTATE may change
813 # (dststep/substep may be modified, interrupted, subvl/vl change)
814 # but they *can* be done as while-loops as long as every SVSTATE
815 # "thing" is re-read every single time a yield gives indices
816 while True: # outer vl loop
817 while True: # inner subvl loop
819 dstmask
= self
.dstmask
820 dststep
= self
.svstate
.dststep
821 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
822 if self
.pred_dz
or pred_dst_zero
:
823 self
.pred_dst_zero
= not pred_dst_zero
824 log(" advance dst", dststep
, self
.svstate
.vl
,
825 self
.svstate
.dsubstep
, subvl
)
826 # yield actual substep/dststep
827 yield (self
.svstate
.dsubstep
, dststep
)
828 if self
.svstate
.dsubstep
== subvl
: # end-point
829 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
831 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
834 if dststep
== vl
-1: # end-point
835 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
837 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
839 def src_iterate(self
):
840 """source-stepping iterator
844 pack
= self
.svstate
.pack
845 unpack
= self
.svstate
.unpack
846 ssubstep
= self
.svstate
.ssubstep
847 end_ssub
= ssubstep
== subvl
848 end_src
= self
.svstate
.srcstep
== vl
-1
849 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
853 srcstep
= self
.svstate
.srcstep
854 srcmask
= self
.srcmask
856 # pack advances subvl in *outer* loop
858 assert srcstep
<= vl
-1
859 end_src
= srcstep
== vl
-1
864 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
868 srcstep
+= 1 # advance srcstep
869 if not self
.srcstep_skip
:
871 if ((1 << srcstep
) & srcmask
) != 0:
874 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
876 # advance subvl in *inner* loop
879 assert srcstep
<= vl
-1
880 end_src
= srcstep
== vl
-1
881 if end_src
: # end-point
887 if not self
.srcstep_skip
:
889 if ((1 << srcstep
) & srcmask
) != 0:
892 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
893 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
896 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
898 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
899 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
902 def dst_iterate(self
):
903 """dest step iterator
907 pack
= self
.svstate
.pack
908 unpack
= self
.svstate
.unpack
909 dsubstep
= self
.svstate
.dsubstep
910 end_dsub
= dsubstep
== subvl
911 dststep
= self
.svstate
.dststep
912 end_dst
= dststep
== vl
-1
913 dstmask
= self
.dstmask
914 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
919 # unpack advances subvl in *outer* loop
921 assert dststep
<= vl
-1
922 end_dst
= dststep
== vl
-1
927 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
931 dststep
+= 1 # advance dststep
932 if not self
.dststep_skip
:
934 if ((1 << dststep
) & dstmask
) != 0:
937 log(" dskip", bin(dstmask
), bin(1 << dststep
))
939 # advance subvl in *inner* loop
942 assert dststep
<= vl
-1
943 end_dst
= dststep
== vl
-1
944 if end_dst
: # end-point
950 if not self
.dststep_skip
:
952 if ((1 << dststep
) & dstmask
) != 0:
955 log(" dskip", bin(dstmask
), bin(1 << dststep
))
956 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
959 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
961 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
962 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
965 def at_loopend(self
):
966 """tells if this is the last possible element. uses the cached values
967 for src/dst-step and sub-steps
971 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
972 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
973 end_ssub
= ssubstep
== subvl
974 end_dsub
= dsubstep
== subvl
975 if srcstep
== vl
-1 and end_ssub
:
977 if dststep
== vl
-1 and end_dsub
:
981 def advance_svstate_steps(self
):
982 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
983 TODO when Pack/Unpack is set, substep becomes the *outer* loop
985 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
986 if self
.loopend
: # huhn??
991 def read_src_mask(self
):
992 """read/update pred_sz and src mask
994 # get SVSTATE VL (oh and print out some debug stuff)
996 srcstep
= self
.svstate
.srcstep
997 ssubstep
= self
.svstate
.ssubstep
999 # get predicate mask (all 64 bits)
1000 srcmask
= 0xffff_ffff_ffff_ffff
1002 pmode
= yield self
.dec2
.rm_dec
.predmode
1003 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1004 srcpred
= yield self
.dec2
.rm_dec
.srcpred
1005 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1006 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
1007 if pmode
== SVP64PredMode
.INT
.value
:
1008 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
1009 if sv_ptype
== SVPType
.P2
.value
:
1010 srcmask
= get_predint(self
.gpr
, srcpred
)
1011 elif pmode
== SVP64PredMode
.CR
.value
:
1012 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1013 if sv_ptype
== SVPType
.P2
.value
:
1014 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
1015 # work out if the ssubsteps are completed
1016 ssubstart
= ssubstep
== 0
1017 log(" pmode", pmode
)
1018 log(" ptype", sv_ptype
)
1019 log(" srcpred", bin(srcpred
))
1020 log(" srcmask", bin(srcmask
))
1021 log(" pred_sz", bin(pred_sz
))
1022 log(" ssubstart", ssubstart
)
1024 # store all that above
1025 self
.srcstep_skip
= False
1026 self
.srcmask
= srcmask
1027 self
.pred_sz
= pred_sz
1028 self
.new_ssubstep
= ssubstep
1029 log(" new ssubstep", ssubstep
)
1030 # until the predicate mask has a "1" bit... or we run out of VL
1031 # let srcstep==VL be the indicator to move to next instruction
1033 self
.srcstep_skip
= True
1035 def read_dst_mask(self
):
1036 """same as read_src_mask - check and record everything needed
1038 # get SVSTATE VL (oh and print out some debug stuff)
1039 # yield Delay(1e-10) # make changes visible
1040 vl
= self
.svstate
.vl
1041 dststep
= self
.svstate
.dststep
1042 dsubstep
= self
.svstate
.dsubstep
1044 # get predicate mask (all 64 bits)
1045 dstmask
= 0xffff_ffff_ffff_ffff
1047 pmode
= yield self
.dec2
.rm_dec
.predmode
1048 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1049 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1050 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1051 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1052 if pmode
== SVP64PredMode
.INT
.value
:
1053 dstmask
= get_predint(self
.gpr
, dstpred
)
1054 elif pmode
== SVP64PredMode
.CR
.value
:
1055 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1056 # work out if the ssubsteps are completed
1057 dsubstart
= dsubstep
== 0
1058 log(" pmode", pmode
)
1059 log(" ptype", sv_ptype
)
1060 log(" dstpred", bin(dstpred
))
1061 log(" dstmask", bin(dstmask
))
1062 log(" pred_dz", bin(pred_dz
))
1063 log(" dsubstart", dsubstart
)
1065 self
.dststep_skip
= False
1066 self
.dstmask
= dstmask
1067 self
.pred_dz
= pred_dz
1068 self
.new_dsubstep
= dsubstep
1069 log(" new dsubstep", dsubstep
)
1071 self
.dststep_skip
= True
1073 def svstate_pre_inc(self
):
1074 """check if srcstep/dststep need to skip over masked-out predicate bits
1075 note that this is not supposed to do anything to substep,
1076 it is purely for skipping masked-out bits
1079 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1080 yield from self
.read_src_mask()
1081 yield from self
.read_dst_mask()
1088 srcstep
= self
.svstate
.srcstep
1089 srcmask
= self
.srcmask
1090 pred_src_zero
= self
.pred_sz
1091 vl
= self
.svstate
.vl
1092 # srcstep-skipping opportunity identified
1093 if self
.srcstep_skip
:
1094 # cannot do this with sv.bc - XXX TODO
1097 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1098 log(" sskip", bin(1 << srcstep
))
1101 # now work out if the relevant mask bits require zeroing
1103 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1105 # store new srcstep / dststep
1106 self
.new_srcstep
= srcstep
1107 self
.pred_src_zero
= pred_src_zero
1108 log(" new srcstep", srcstep
)
1111 # dststep-skipping opportunity identified
1112 dststep
= self
.svstate
.dststep
1113 dstmask
= self
.dstmask
1114 pred_dst_zero
= self
.pred_dz
1115 vl
= self
.svstate
.vl
1116 if self
.dststep_skip
:
1117 # cannot do this with sv.bc - XXX TODO
1120 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1121 log(" dskip", bin(1 << dststep
))
1124 # now work out if the relevant mask bits require zeroing
1126 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1128 # store new srcstep / dststep
1129 self
.new_dststep
= dststep
1130 self
.pred_dst_zero
= pred_dst_zero
1131 log(" new dststep", dststep
)
1134 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1135 # decoder2 - an instance of power_decoder2
1136 # regfile - a list of initial values for the registers
1137 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1138 # respect_pc - tracks the program counter. requires initial_insns
1139 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1140 initial_mem
=None, initial_msr
=0,
1152 use_mmap_mem
=False):
1154 # trace log file for model output. if None do nothing
1155 self
.insnlog
= insnlog
1156 self
.insnlog_is_file
= hasattr(insnlog
, "write")
1157 if not self
.insnlog_is_file
and self
.insnlog
:
1158 self
.insnlog
= open(self
.insnlog
, "w")
1160 self
.bigendian
= bigendian
1162 self
.is_svp64_mode
= False
1163 self
.respect_pc
= respect_pc
1164 if initial_sprs
is None:
1166 if initial_mem
is None:
1168 if fpregfile
is None:
1169 fpregfile
= [0] * 32
1170 if initial_insns
is None:
1172 assert self
.respect_pc
== False, "instructions required to honor pc"
1174 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1175 log("ISACaller initial_msr", initial_msr
)
1177 # "fake program counter" mode (for unit testing)
1181 if isinstance(initial_mem
, tuple):
1182 self
.fake_pc
= initial_mem
[0]
1183 disasm_start
= self
.fake_pc
1185 disasm_start
= initial_pc
1187 # disassembly: we need this for now (not given from the decoder)
1188 self
.disassembly
= {}
1190 for i
, code
in enumerate(disassembly
):
1191 self
.disassembly
[i
*4 + disasm_start
] = code
1193 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1194 self
.svp64rm
= SVP64RM()
1195 if initial_svstate
is None:
1197 if isinstance(initial_svstate
, int):
1198 initial_svstate
= SVP64State(initial_svstate
)
1199 # SVSTATE, MSR and PC
1200 StepLoop
.__init
__(self
, initial_svstate
)
1201 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1203 # GPR FPR SPR registers
1204 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1205 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1206 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1207 self
.spr
= SPR(decoder2
, initial_sprs
) # initialise SPRs before MMU
1209 # set up 4 dummy SVSHAPEs if they aren't already set up
1211 sname
= 'SVSHAPE%d' % i
1212 val
= self
.spr
.get(sname
, 0)
1213 # make sure it's an SVSHAPE
1214 self
.spr
[sname
] = SVSHAPE(val
, self
.gpr
)
1215 self
.last_op_svshape
= False
1219 self
.mem
= MemMMap(row_bytes
=8,
1220 initial_mem
=initial_mem
,
1222 self
.imem
= self
.mem
1223 self
.mem
.initialize(row_bytes
=4, initial_mem
=initial_insns
)
1224 self
.mem
.log_fancy(kind
=LogKind
.InstrInOuts
)
1226 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
,
1228 self
.mem
.log_fancy(kind
=LogKind
.InstrInOuts
)
1229 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1230 # MMU mode, redirect underlying Mem through RADIX
1232 self
.mem
= RADIX(self
.mem
, self
)
1234 self
.imem
= RADIX(self
.imem
, self
)
1236 # TODO, needed here:
1237 # FPR (same as GPR except for FP nums)
1238 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1239 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1240 self
.fpscr
= FPSCRState(initial_fpscr
)
1242 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1243 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1245 # 2.3.2 LR (actually SPR #8) -- Done
1246 # 2.3.3 CTR (actually SPR #9) -- Done
1247 # 2.3.4 TAR (actually SPR #815)
1248 # 3.2.2 p45 XER (actually SPR #1) -- Done
1249 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1251 # create CR then allow portions of it to be "selectable" (below)
1252 self
.cr_fields
= CRFields(initial_cr
)
1253 self
.cr
= self
.cr_fields
.cr
1254 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1256 # "undefined", just set to variable-bit-width int (use exts "max")
1257 # self.undefined = SelectableInt(0, EFFECTIVELY_UNLIMITED)
1260 self
.namespace
.update(self
.spr
)
1261 self
.namespace
.update({'GPR': self
.gpr
,
1265 'memassign': self
.memassign
,
1268 'SVSTATE': self
.svstate
,
1269 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1270 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1271 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1272 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1275 'FPSCR': self
.fpscr
,
1276 'undefined': undefined
,
1277 'mode_is_64bit': True,
1278 'SO': XER_bits
['SO'],
1279 'XLEN': 64 # elwidth overrides
1282 for name
in BFP_FLAG_NAMES
:
1283 setattr(self
, name
, 0)
1285 # update pc to requested start point
1286 self
.set_pc(initial_pc
)
1288 # field-selectable versions of Condition Register
1289 self
.crl
= self
.cr_fields
.crl
1291 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1293 self
.decoder
= decoder2
.dec
1294 self
.dec2
= decoder2
1296 super().__init
__(XLEN
=self
.namespace
["XLEN"], FPSCR
=self
.fpscr
)
1298 def trace(self
, out
):
1299 if self
.insnlog
is None: return
1300 self
.insnlog
.write(out
)
1304 return self
.namespace
["XLEN"]
1310 def call_trap(self
, trap_addr
, trap_bit
):
1311 """calls TRAP and sets up NIA to the new execution location.
1312 next instruction will begin at trap_addr.
1314 self
.TRAP(trap_addr
, trap_bit
)
1315 self
.namespace
['NIA'] = self
.trap_nia
1316 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1318 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1319 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1321 TRAP function is callable from inside the pseudocode itself,
1322 hence the default arguments. when calling from inside ISACaller
1323 it is best to use call_trap()
1325 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1326 kaivb
= self
.spr
['KAIVB'].value
1327 msr
= self
.namespace
['MSR'].value
1328 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1329 # store CIA(+4?) in SRR0, set NIA to 0x700
1330 # store MSR in SRR1, set MSR to um errr something, have to check spec
1331 # store SVSTATE (if enabled) in SVSRR0
1332 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1333 self
.spr
['SRR1'].value
= msr
1334 if self
.is_svp64_mode
:
1335 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1336 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1337 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1339 # set exception bits. TODO: this should, based on the address
1340 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1341 # bits appropriately. however it turns out that *for now* in all
1342 # cases (all trap_addrs) the exact same thing is needed.
1343 self
.msr
[MSRb
.IR
] = 0
1344 self
.msr
[MSRb
.DR
] = 0
1345 self
.msr
[MSRb
.FE0
] = 0
1346 self
.msr
[MSRb
.FE1
] = 0
1347 self
.msr
[MSRb
.EE
] = 0
1348 self
.msr
[MSRb
.RI
] = 0
1349 self
.msr
[MSRb
.SF
] = 1
1350 self
.msr
[MSRb
.TM
] = 0
1351 self
.msr
[MSRb
.VEC
] = 0
1352 self
.msr
[MSRb
.VSX
] = 0
1353 self
.msr
[MSRb
.PR
] = 0
1354 self
.msr
[MSRb
.FP
] = 0
1355 self
.msr
[MSRb
.PMM
] = 0
1356 self
.msr
[MSRb
.TEs
] = 0
1357 self
.msr
[MSRb
.TEe
] = 0
1358 self
.msr
[MSRb
.UND
] = 0
1359 self
.msr
[MSRb
.LE
] = 1
1361 def memassign(self
, ea
, sz
, val
):
1362 self
.mem
.memassign(ea
, sz
, val
)
1364 def prep_namespace(self
, insn_name
, formname
, op_fields
, xlen
):
1365 # TODO: get field names from form in decoder*1* (not decoder2)
1366 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1368 # then "yield" fields only from op_fields rather than hard-coded
1370 fields
= self
.decoder
.sigforms
[formname
]
1371 log("prep_namespace", formname
, op_fields
, insn_name
)
1372 for name
in op_fields
:
1373 # CR immediates. deal with separately. needs modifying
1375 if self
.is_svp64_mode
and name
in ['BI']: # TODO, more CRs
1376 # BI is a 5-bit, must reconstruct the value
1377 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1378 sig
= getattr(fields
, name
)
1380 # low 2 LSBs (CR field selector) remain same, CR num extended
1381 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1382 val
= (val
& 0b11) |
(regnum
<< 2)
1383 elif self
.is_svp64_mode
and name
in ['BF']: # TODO, more CRs
1384 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, "BF")
1385 log('hack %s' % name
, regnum
, is_vec
)
1388 sig
= getattr(fields
, name
)
1390 # these are all opcode fields involved in index-selection of CR,
1391 # and need to do "standard" arithmetic. CR[BA+32] for example
1392 # would, if using SelectableInt, only be 5-bit.
1393 if name
in ['BF', 'BFA', 'BC', 'BA', 'BB', 'BT', 'BI']:
1394 self
.namespace
[name
] = val
1396 self
.namespace
[name
] = SelectableInt(val
, sig
.width
)
1398 self
.namespace
['XER'] = self
.spr
['XER']
1399 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1400 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1401 self
.namespace
['OV'] = self
.spr
['XER'][XER_bits
['OV']].value
1402 self
.namespace
['OV32'] = self
.spr
['XER'][XER_bits
['OV32']].value
1403 self
.namespace
['XLEN'] = xlen
1405 # add some SVSTATE convenience variables
1406 vl
= self
.svstate
.vl
1407 srcstep
= self
.svstate
.srcstep
1408 self
.namespace
['VL'] = vl
1409 self
.namespace
['srcstep'] = srcstep
1411 # take a copy of the CR field value: if non-VLi fail-first fails
1412 # this is because the pseudocode writes *directly* to CR. sigh
1413 self
.cr_backup
= self
.cr
.value
1415 # sv.bc* need some extra fields
1416 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
1417 # blegh grab bits manually
1418 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1419 # convert to SelectableInt before test
1420 mode
= SelectableInt(mode
, 5)
1421 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1422 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1423 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1424 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1425 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1426 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1427 sz
= yield self
.dec2
.rm_dec
.pred_sz
1428 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1429 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1430 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1431 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1432 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1433 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1434 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1435 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1437 def get_kludged_op_add_ca_ov(self
, inputs
, inp_ca_ov
):
1438 """ this was not at all necessary to do. this function massively
1439 duplicates - in a laborious and complex fashion - the contents of
1440 the CSV files that were extracted two years ago from microwatt's
1441 source code. A-inversion is the "inv A" column, output inversion
1442 is the "inv out" column, carry-in equal to 0 or 1 or CA is the
1445 all of that information is available in
1446 self.instrs[ins_name].op_fields
1447 where info is usually assigned to self.instrs[ins_name]
1449 https://git.libre-soc.org/?p=openpower-isa.git;a=blob;f=openpower/isatables/minor_31.csv;hb=HEAD
1451 the immediate constants are *also* decoded correctly and placed
1452 usually by DecodeIn2Imm into operand2, as part of power_decoder2.py
1454 def ca(a
, b
, ca_in
, width
):
1455 mask
= (1 << width
) - 1
1456 y
= (a
& mask
) + (b
& mask
) + ca_in
1459 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1460 insn
= insns
.get(asmcode
)
1461 SI
= yield self
.dec2
.dec
.SI
1464 inputs
= [i
.value
for i
in inputs
]
1467 if insn
in ("add", "addo", "addc", "addco"):
1471 elif insn
== "addic" or insn
== "addic.":
1475 elif insn
in ("subf", "subfo", "subfc", "subfco"):
1479 elif insn
== "subfic":
1483 elif insn
== "adde" or insn
== "addeo":
1487 elif insn
== "subfe" or insn
== "subfeo":
1491 elif insn
== "addme" or insn
== "addmeo":
1495 elif insn
== "addze" or insn
== "addzeo":
1499 elif insn
== "subfme" or insn
== "subfmeo":
1503 elif insn
== "subfze" or insn
== "subfzeo":
1507 elif insn
== "addex":
1508 # CA[32] aren't actually written, just generate so we have
1509 # something to return
1510 ca64
= ov64
= ca(inputs
[0], inputs
[1], OV
, 64)
1511 ca32
= ov32
= ca(inputs
[0], inputs
[1], OV
, 32)
1512 return ca64
, ca32
, ov64
, ov32
1513 elif insn
== "neg" or insn
== "nego":
1518 raise NotImplementedError(
1519 "op_add kludge unimplemented instruction: ", asmcode
, insn
)
1521 ca64
= ca(a
, b
, ca_in
, 64)
1522 ca32
= ca(a
, b
, ca_in
, 32)
1523 ov64
= ca64
!= ca(a
, b
, ca_in
, 63)
1524 ov32
= ca32
!= ca(a
, b
, ca_in
, 31)
1525 return ca64
, ca32
, ov64
, ov32
1527 def handle_carry_(self
, inputs
, output
, ca
, ca32
, inp_ca_ov
):
1528 op
= yield self
.dec2
.e
.do
.insn_type
1529 if op
== MicrOp
.OP_ADD
.value
and ca
is None and ca32
is None:
1530 retval
= yield from self
.get_kludged_op_add_ca_ov(
1532 ca
, ca32
, ov
, ov32
= retval
1533 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1534 if insns
.get(asmcode
) == 'addex':
1535 # TODO: if 32-bit mode, set ov to ov32
1536 self
.spr
['XER'][XER_bits
['OV']] = ov
1537 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1538 log(f
"write OV/OV32 OV={ov} OV32={ov32}",
1539 kind
=LogKind
.InstrInOuts
)
1541 # TODO: if 32-bit mode, set ca to ca32
1542 self
.spr
['XER'][XER_bits
['CA']] = ca
1543 self
.spr
['XER'][XER_bits
['CA32']] = ca32
1544 log(f
"write CA/CA32 CA={ca} CA32={ca32}",
1545 kind
=LogKind
.InstrInOuts
)
1547 inv_a
= yield self
.dec2
.e
.do
.invert_in
1549 inputs
[0] = ~inputs
[0]
1551 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1553 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1554 inputs
.append(SelectableInt(imm
, 64))
1557 log("gt input", x
, output
)
1558 gt
= (gtu(x
, output
))
1561 cy
= 1 if any(gts
) else 0
1563 if ca
is None: # already written
1564 self
.spr
['XER'][XER_bits
['CA']] = cy
1567 # ARGH... different for OP_ADD... *sigh*...
1568 op
= yield self
.dec2
.e
.do
.insn_type
1569 if op
== MicrOp
.OP_ADD
.value
:
1570 res32
= (output
.value
& (1 << 32)) != 0
1571 a32
= (inputs
[0].value
& (1 << 32)) != 0
1572 if len(inputs
) >= 2:
1573 b32
= (inputs
[1].value
& (1 << 32)) != 0
1576 cy32
= res32 ^ a32 ^ b32
1577 log("CA32 ADD", cy32
)
1581 log("input", x
, output
)
1582 log(" x[32:64]", x
, x
[32:64])
1583 log(" o[32:64]", output
, output
[32:64])
1584 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1586 cy32
= 1 if any(gts
) else 0
1587 log("CA32", cy32
, gts
)
1588 if ca32
is None: # already written
1589 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1591 def handle_overflow(self
, inputs
, output
, div_overflow
, inp_ca_ov
):
1592 op
= yield self
.dec2
.e
.do
.insn_type
1593 if op
== MicrOp
.OP_ADD
.value
:
1594 retval
= yield from self
.get_kludged_op_add_ca_ov(
1596 ca
, ca32
, ov
, ov32
= retval
1597 # TODO: if 32-bit mode, set ov to ov32
1598 self
.spr
['XER'][XER_bits
['OV']] = ov
1599 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1600 self
.spr
['XER'][XER_bits
['SO']] |
= ov
1602 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1603 inv_a
= yield self
.dec2
.e
.do
.invert_in
1605 inputs
[0] = ~inputs
[0]
1607 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1609 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1610 inputs
.append(SelectableInt(imm
, 64))
1611 log("handle_overflow", inputs
, output
, div_overflow
)
1612 if len(inputs
) < 2 and div_overflow
is None:
1615 # div overflow is different: it's returned by the pseudo-code
1616 # because it's more complex than can be done by analysing the output
1617 if div_overflow
is not None:
1618 ov
, ov32
= div_overflow
, div_overflow
1619 # arithmetic overflow can be done by analysing the input and output
1620 elif len(inputs
) >= 2:
1622 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1623 output_sgn
= exts(output
.value
, output
.bits
) < 0
1624 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1625 output_sgn
!= input_sgn
[0] else 0
1628 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1629 output32_sgn
= exts(output
.value
, 32) < 0
1630 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1631 output32_sgn
!= input32_sgn
[0] else 0
1633 # now update XER OV/OV32/SO
1634 so
= self
.spr
['XER'][XER_bits
['SO']]
1635 new_so
= so | ov
# sticky overflow ORs in old with new
1636 self
.spr
['XER'][XER_bits
['OV']] = ov
1637 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1638 self
.spr
['XER'][XER_bits
['SO']] = new_so
1639 log(" set overflow", ov
, ov32
, so
, new_so
)
1641 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1642 assert isinstance(out
, SelectableInt
), \
1643 "out zero not a SelectableInt %s" % repr(outputs
)
1644 log("handle_comparison", out
.bits
, hex(out
.value
))
1645 # TODO - XXX *processor* in 32-bit mode
1646 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1648 # o32 = exts(out.value, 32)
1649 # print ("handle_comparison exts 32 bit", hex(o32))
1650 out
= exts(out
.value
, out
.bits
)
1651 log("handle_comparison exts", hex(out
))
1652 # create the three main CR flags, EQ GT LT
1653 zero
= SelectableInt(out
== 0, 1)
1654 positive
= SelectableInt(out
> 0, 1)
1655 negative
= SelectableInt(out
< 0, 1)
1656 # get (or not) XER.SO. for setvl this is important *not* to read SO
1658 SO
= SelectableInt(1, 0)
1660 SO
= self
.spr
['XER'][XER_bits
['SO']]
1661 log("handle_comparison SO", SO
.value
,
1662 "overflow", overflow
,
1664 "+ve", positive
.value
,
1665 "-ve", negative
.value
)
1666 # alternative overflow checking (setvl mainly at the moment)
1667 if overflow
is not None and overflow
== 1:
1668 SO
= SelectableInt(1, 1)
1669 # create the four CR field values and set the required CR field
1670 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1671 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1672 self
.crl
[cr_idx
].eq(cr_field
)
1674 def set_pc(self
, pc_val
):
1675 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1676 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1678 def get_next_insn(self
):
1679 """check instruction
1682 pc
= self
.pc
.CIA
.value
1685 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1687 raise KeyError("no instruction at 0x%x" % pc
)
1690 def setup_one(self
):
1691 """set up one instruction
1693 pc
, insn
= self
.get_next_insn()
1694 yield from self
.setup_next_insn(pc
, insn
)
1696 # cache since it's really slow to construct
1697 __PREFIX_CACHE
= SVP64Instruction
.Prefix(SelectableInt(value
=0, bits
=32))
1699 def __decode_prefix(self
, opcode
):
1700 pfx
= self
.__PREFIX
_CACHE
1701 pfx
.storage
.eq(opcode
)
1704 def setup_next_insn(self
, pc
, ins
):
1705 """set up next instruction
1708 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
1709 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
1711 yield self
.dec2
.sv_rm
.eq(0)
1712 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
1713 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
1714 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
1715 yield self
.dec2
.state
.pc
.eq(pc
)
1716 if self
.svstate
is not None:
1717 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
1719 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
1721 opcode
= yield self
.dec2
.dec
.opcode_in
1722 opcode
= SelectableInt(value
=opcode
, bits
=32)
1723 pfx
= self
.__decode
_prefix
(opcode
)
1724 log("prefix test: opcode:", pfx
.PO
, bin(pfx
.PO
), pfx
.id)
1725 self
.is_svp64_mode
= bool((pfx
.PO
== 0b000001) and (pfx
.id == 0b11))
1726 self
.pc
.update_nia(self
.is_svp64_mode
)
1728 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
1729 self
.namespace
['NIA'] = self
.pc
.NIA
1730 self
.namespace
['SVSTATE'] = self
.svstate
1731 if not self
.is_svp64_mode
:
1734 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
1735 log("svp64.rm", bin(pfx
.rm
))
1736 log(" svstate.vl", self
.svstate
.vl
)
1737 log(" svstate.mvl", self
.svstate
.maxvl
)
1738 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
1739 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
1740 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
1741 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
1744 def execute_one(self
):
1745 """execute one instruction
1747 # get the disassembly code for this instruction
1748 if not self
.disassembly
:
1749 code
= yield from self
.get_assembly_name()
1752 if self
.is_svp64_mode
:
1753 offs
, dbg
= 4, "svp64 "
1754 code
= self
.disassembly
[self
._pc
+offs
]
1755 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
1756 opname
= code
.split(' ')[0]
1758 yield from self
.call(opname
) # execute the instruction
1759 except MemException
as e
: # check for memory errors
1760 if e
.args
[0] == 'unaligned': # alignment error
1761 # run a Trap but set DAR first
1762 print("memory unaligned exception, DAR", e
.dar
, repr(e
))
1763 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
1764 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
1766 elif e
.args
[0] == 'invalid': # invalid
1767 # run a Trap but set DAR first
1768 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
1769 if e
.mode
== 'EXECUTE':
1770 # XXX TODO: must set a few bits in SRR1,
1771 # see microwatt loadstore1.vhdl
1772 # if m_in.segerr = '0' then
1773 # v.srr1(47 - 33) := m_in.invalid;
1774 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
1775 # v.srr1(47 - 44) := m_in.badtree;
1776 # v.srr1(47 - 45) := m_in.rc_error;
1777 # v.intr_vec := 16#400#;
1779 # v.intr_vec := 16#480#;
1780 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
1782 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
1784 # not supported yet:
1785 raise e
# ... re-raise
1787 # append to the trace log file
1788 self
.trace(" # %s\n" % code
)
1790 log("gprs after code", code
)
1793 for i
in range(len(self
.crl
)):
1794 crs
.append(bin(self
.crl
[i
].asint()))
1795 log("crs", " ".join(crs
))
1796 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
1798 # don't use this except in special circumstances
1799 if not self
.respect_pc
:
1802 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
1803 hex(self
.pc
.NIA
.value
))
1805 def get_assembly_name(self
):
1806 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1807 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1808 dec_insn
= yield self
.dec2
.e
.do
.insn
1809 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
1810 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1811 int_op
= yield self
.dec2
.dec
.op
.internal_op
1812 log("get assembly name asmcode", asmcode
, int_op
,
1813 hex(dec_insn
), bin(insn_1_11
))
1814 asmop
= insns
.get(asmcode
, None)
1816 # sigh reconstruct the assembly instruction name
1817 if hasattr(self
.dec2
.e
.do
, "oe"):
1818 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
1819 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
1823 if hasattr(self
.dec2
.e
.do
, "rc"):
1824 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
1825 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
1829 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
1830 RC1
= yield self
.dec2
.rm_dec
.RC1
1834 # grrrr have to special-case MUL op (see DecodeOE)
1835 log("ov %d en %d rc %d en %d op %d" %
1836 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
1837 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
1842 if not asmop
.endswith("."): # don't add "." to "andis."
1845 if hasattr(self
.dec2
.e
.do
, "lk"):
1846 lk
= yield self
.dec2
.e
.do
.lk
1849 log("int_op", int_op
)
1850 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
1851 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
1855 spr_msb
= yield from self
.get_spr_msb()
1856 if int_op
== MicrOp
.OP_MFCR
.value
:
1861 # XXX TODO: for whatever weird reason this doesn't work
1862 # https://bugs.libre-soc.org/show_bug.cgi?id=390
1863 if int_op
== MicrOp
.OP_MTCRF
.value
:
1870 def reset_remaps(self
):
1871 self
.remap_loopends
= [0] * 4
1872 self
.remap_idxs
= [0, 1, 2, 3]
1874 def get_remap_indices(self
):
1875 """WARNING, this function stores remap_idxs and remap_loopends
1876 in the class for later use. this to avoid problems with yield
1878 # go through all iterators in lock-step, advance to next remap_idx
1879 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
1880 # get four SVSHAPEs. here we are hard-coding
1882 SVSHAPE0
= self
.spr
['SVSHAPE0']
1883 SVSHAPE1
= self
.spr
['SVSHAPE1']
1884 SVSHAPE2
= self
.spr
['SVSHAPE2']
1885 SVSHAPE3
= self
.spr
['SVSHAPE3']
1886 # set up the iterators
1887 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
1888 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
1889 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
1890 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
1894 for i
, (shape
, remap
) in enumerate(remaps
):
1895 # zero is "disabled"
1896 if shape
.value
== 0x0:
1897 self
.remap_idxs
[i
] = 0
1898 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
1899 step
= dststep
if (i
in [3, 4]) else srcstep
1900 # this is terrible. O(N^2) looking for the match. but hey.
1901 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
1904 self
.remap_idxs
[i
] = remap_idx
1905 self
.remap_loopends
[i
] = loopends
1906 dbg
.append((i
, step
, remap_idx
, loopends
))
1907 for (i
, step
, remap_idx
, loopends
) in dbg
:
1908 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
1911 def get_spr_msb(self
):
1912 dec_insn
= yield self
.dec2
.e
.do
.insn
1913 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
1915 def call(self
, name
):
1916 """call(opcode) - the primary execution point for instructions
1918 self
.last_st_addr
= None # reset the last known store address
1919 self
.last_ld_addr
= None # etc.
1921 ins_name
= name
.strip() # remove spaces if not already done so
1923 log("halted - not executing", ins_name
)
1926 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1927 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1928 asmop
= yield from self
.get_assembly_name()
1929 log("call", ins_name
, asmop
)
1931 # sv.setvl is *not* a loop-function. sigh
1932 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
1935 int_op
= yield self
.dec2
.dec
.op
.internal_op
1936 spr_msb
= yield from self
.get_spr_msb()
1938 instr_is_privileged
= False
1939 if int_op
in [MicrOp
.OP_ATTN
.value
,
1940 MicrOp
.OP_MFMSR
.value
,
1941 MicrOp
.OP_MTMSR
.value
,
1942 MicrOp
.OP_MTMSRD
.value
,
1944 MicrOp
.OP_RFID
.value
]:
1945 instr_is_privileged
= True
1946 if int_op
in [MicrOp
.OP_MFSPR
.value
,
1947 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
1948 instr_is_privileged
= True
1950 log("is priv", instr_is_privileged
, hex(self
.msr
.value
),
1952 # check MSR priv bit and whether op is privileged: if so, throw trap
1953 if instr_is_privileged
and self
.msr
[MSRb
.PR
] == 1:
1954 self
.call_trap(0x700, PIb
.PRIV
)
1957 # check halted condition
1958 if ins_name
== 'attn':
1962 # check illegal instruction
1964 if ins_name
not in ['mtcrf', 'mtocrf']:
1965 illegal
= ins_name
!= asmop
1967 # list of instructions not being supported by binutils (.long)
1968 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
1969 if dotstrp
in [*FPTRANS_INSNS
,
1971 'ffmadds', 'fdmadds', 'ffadds',
1973 "brh", "brw", "brd",
1974 'setvl', 'svindex', 'svremap', 'svstep',
1975 'svshape', 'svshape2',
1976 'ternlogi', 'bmask', 'cprop',
1977 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
1978 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
1979 "dsld", "dsrd", "maddedus",
1980 "sadd", "saddw", "sadduw",
1985 "maddsubrs", "maddrs", "msubrs",
1986 "cfuged", "cntlzdm", "cnttzdm", "pdepd", "pextd",
1987 "setbc", "setbcr", "setnbc", "setnbcr",
1992 # branch-conditional redirects to sv.bc
1993 if asmop
.startswith('bc') and self
.is_svp64_mode
:
1994 ins_name
= 'sv.%s' % ins_name
1996 # ld-immediate-with-pi mode redirects to ld-with-postinc
1997 ldst_imm_postinc
= False
1998 if 'u' in ins_name
and self
.is_svp64_mode
:
1999 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
2001 ins_name
= ins_name
.replace("u", "up")
2002 ldst_imm_postinc
= True
2003 log(" enable ld/st postinc", ins_name
)
2005 log(" post-processed name", dotstrp
, ins_name
, asmop
)
2007 # illegal instructions call TRAP at 0x700
2009 print("illegal", ins_name
, asmop
)
2010 self
.call_trap(0x700, PIb
.ILLEG
)
2011 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
2012 (ins_name
, asmop
, self
.pc
.CIA
.value
))
2015 # this is for setvl "Vertical" mode: if set true,
2016 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
2017 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
2018 self
.allow_next_step_inc
= False
2019 self
.svstate_next_mode
= 0
2021 # nop has to be supported, we could let the actual op calculate
2022 # but PowerDecoder has a pattern for nop
2023 if ins_name
== 'nop':
2024 self
.update_pc_next()
2027 # get elwidths, defaults to 64
2031 if self
.is_svp64_mode
:
2032 ew_src
= yield self
.dec2
.rm_dec
.ew_src
2033 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
2034 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
2035 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
2036 xlen
= max(ew_src
, ew_dst
)
2037 log("elwdith", ew_src
, ew_dst
)
2038 log("XLEN:", self
.is_svp64_mode
, xlen
)
2040 # look up instruction in ISA.instrs, prepare namespace
2041 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
2042 info
= self
.instrs
[ins_name
+"."]
2043 elif asmop
[-1] == '.' and asmop
in self
.instrs
:
2044 info
= self
.instrs
[asmop
]
2046 info
= self
.instrs
[ins_name
]
2047 yield from self
.prep_namespace(ins_name
, info
.form
, info
.op_fields
,
2050 # preserve order of register names
2051 input_names
= create_args(list(info
.read_regs
) +
2052 list(info
.uninit_regs
))
2053 log("input names", input_names
)
2055 # get SVP64 entry for the current instruction
2056 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
2057 if sv_rm
is not None:
2058 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
2060 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
2061 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
2063 # see if srcstep/dststep need skipping over masked-out predicate bits
2064 # svstep also needs advancement because it calls SVSTATE_NEXT.
2065 # bit the remaps get computed just after pre_inc moves them on
2066 # with remap_set_steps substituting for PowerDecider2 not doing it,
2067 # and SVSTATE_NEXT not being able to.use yield, the preinc on
2068 # svstep is necessary for now.
2070 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
2071 yield from self
.svstate_pre_inc()
2072 if self
.is_svp64_mode
:
2073 pre
= yield from self
.update_new_svstate_steps()
2075 self
.svp64_reset_loop()
2077 self
.update_pc_next()
2079 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2080 pred_dst_zero
= self
.pred_dst_zero
2081 pred_src_zero
= self
.pred_src_zero
2082 vl
= self
.svstate
.vl
2083 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2085 # VL=0 in SVP64 mode means "do nothing: skip instruction"
2086 if self
.is_svp64_mode
and vl
== 0:
2087 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2088 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
2089 self
.namespace
['NIA'], kind
=LogKind
.InstrInOuts
)
2092 # for when SVREMAP is active, using pre-arranged schedule.
2093 # note: modifying PowerDecoder2 needs to "settle"
2094 remap_en
= self
.svstate
.SVme
2095 persist
= self
.svstate
.RMpst
2096 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
2097 if self
.is_svp64_mode
:
2098 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
2100 if persist
or self
.last_op_svshape
:
2101 remaps
= self
.get_remap_indices()
2102 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
2103 yield from self
.remap_set_steps(remaps
)
2104 # after that, settle down (combinatorial) to let Vector reg numbers
2105 # work themselves out
2107 if self
.is_svp64_mode
:
2108 remap_active
= yield self
.dec2
.remap_active
2110 remap_active
= False
2111 log("remap active", bin(remap_active
))
2113 # main input registers (RT, RA ...)
2115 for name
in input_names
:
2116 regval
= (yield from self
.get_input(name
, ew_src
))
2117 log("regval name", name
, regval
)
2118 inputs
.append(regval
)
2120 # arrrrgh, awful hack, to get _RT into namespace
2121 if ins_name
in ['setvl', 'svstep']:
2123 RT
= yield self
.dec2
.dec
.RT
2124 self
.namespace
[regname
] = SelectableInt(RT
, 5)
2126 self
.namespace
["RT"] = SelectableInt(0, 5)
2127 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
2128 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
2130 # in SVP64 mode for LD/ST work out immediate
2131 # XXX TODO: replace_ds for DS-Form rather than D-Form.
2132 # use info.form to detect
2133 if self
.is_svp64_mode
and not ldst_imm_postinc
:
2134 yield from self
.check_replace_d(info
, remap_active
)
2136 # "special" registers
2137 for special
in info
.special_regs
:
2138 if special
in special_sprs
:
2139 inputs
.append(self
.spr
[special
])
2141 inputs
.append(self
.namespace
[special
])
2143 # clear trap (trap) NIA
2144 self
.trap_nia
= None
2146 # check if this was an sv.bc* and create an indicator that
2147 # this is the last check to be made as a loop. combined with
2148 # the ALL/ANY mode we can early-exit
2149 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
2150 no_in_vec
= yield self
.dec2
.no_in_vec
# BI is scalar
2151 end_loop
= no_in_vec
or srcstep
== vl
-1 or dststep
== vl
-1
2152 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
2154 inp_ca_ov
= (self
.spr
['XER'][XER_bits
['CA']].value
,
2155 self
.spr
['XER'][XER_bits
['OV']].value
)
2157 # execute actual instruction here (finally)
2158 log("inputs", inputs
)
2159 results
= info
.func(self
, *inputs
)
2160 output_names
= create_args(info
.write_regs
)
2162 for out
, n
in zip(results
or [], output_names
):
2164 log("results", outs
)
2166 # "inject" decorator takes namespace from function locals: we need to
2167 # overwrite NIA being overwritten (sigh)
2168 if self
.trap_nia
is not None:
2169 self
.namespace
['NIA'] = self
.trap_nia
2171 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
2173 # check if op was a LD/ST so that debugging can check the
2175 if int_op
in [MicrOp
.OP_STORE
.value
,
2177 self
.last_st_addr
= self
.mem
.last_st_addr
2178 if int_op
in [MicrOp
.OP_LOAD
.value
,
2180 self
.last_ld_addr
= self
.mem
.last_ld_addr
2181 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
2182 self
.last_st_addr
, self
.last_ld_addr
)
2184 # detect if CA/CA32 already in outputs (sra*, basically)
2186 ca32
= outs
.get("CA32")
2188 log("carry already done?", ca
, ca32
, output_names
)
2189 # soc test_pipe_caller tests don't have output_carry
2190 has_output_carry
= hasattr(self
.dec2
.e
.do
, "output_carry")
2191 carry_en
= has_output_carry
and (yield self
.dec2
.e
.do
.output_carry
)
2193 yield from self
.handle_carry_(
2194 inputs
, results
[0], ca
, ca32
, inp_ca_ov
=inp_ca_ov
)
2196 # get output named "overflow" and "CR0"
2197 overflow
= outs
.get('overflow')
2198 cr0
= outs
.get('CR0')
2199 cr1
= outs
.get('CR1')
2201 # soc test_pipe_caller tests don't have oe
2202 has_oe
= hasattr(self
.dec2
.e
.do
, "oe")
2203 # yeah just no. not in parallel processing
2204 if has_oe
and not self
.is_svp64_mode
:
2205 # detect if overflow was in return result
2206 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2207 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2208 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
2210 yield from self
.handle_overflow(
2211 inputs
, results
[0], overflow
, inp_ca_ov
=inp_ca_ov
)
2213 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
2215 if not self
.is_svp64_mode
or not pred_dst_zero
:
2216 if hasattr(self
.dec2
.e
.do
, "rc"):
2217 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2218 # don't do Rc=1 for svstep it is handled explicitly.
2219 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
2220 # to write directly to CR0 instead of in ISACaller. hooyahh.
2221 if rc_en
and ins_name
not in ['svstep']:
2222 yield from self
.do_rc_ov(
2223 ins_name
, results
[0], overflow
, cr0
, cr1
, output_names
)
2226 ffirst_hit
= False, False
2227 if self
.is_svp64_mode
:
2228 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2229 is_cr
= sv_mode
== SVMode
.CROP
.value
2230 chk
= rc_en
or is_cr
2231 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2233 # check if a FP Exception occurred. TODO for DD-FFirst, check VLi
2234 # and raise the exception *after* if VLi=1 but if VLi=0 then
2235 # truncate and make the exception "disappear".
2236 if self
.FPSCR
.FEX
and (self
.msr
[MSRb
.FE0
] or self
.msr
[MSRb
.FE1
]):
2237 self
.call_trap(0x700, PIb
.FP
)
2240 # any modified return results?
2241 yield from self
.do_outregs_nia(asmop
, ins_name
, info
, outs
,
2242 carry_en
, rc_en
, ffirst_hit
, ew_dst
)
2244 def check_ffirst(self
, info
, rc_en
, srcstep
):
2245 """fail-first mode: checks a bit of Rc Vector, truncates VL
2247 rm_mode
= yield self
.dec2
.rm_dec
.mode
2248 ff_inv
= yield self
.dec2
.rm_dec
.inv
2249 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2250 RC1
= yield self
.dec2
.rm_dec
.RC1
2251 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2252 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2256 log(" cr_bit", cr_bit
)
2257 log(" rc_en", rc_en
)
2258 if not rc_en
or rm_mode
!= SVP64RMMode
.FFIRST
.value
:
2260 # get the CR vevtor, do BO-test
2262 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2263 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2265 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2266 crtest
= self
.crl
[regnum
]
2267 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2268 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2269 log("cr test?", ffirst_hit
)
2272 # Fail-first activated, truncate VL
2273 vli
= SelectableInt(int(vli_
), 7)
2274 self
.svstate
.vl
= srcstep
+ vli
2275 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2276 yield Settle() # let decoder update
2279 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
, cr1
, output_names
):
2280 cr_out
= yield self
.dec2
.op
.cr_out
2281 if cr_out
== CROutSel
.CR1
.value
:
2285 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2286 # hang on... for `setvl` actually you want to test SVSTATE.VL
2287 is_setvl
= ins_name
in ('svstep', 'setvl')
2289 result
= SelectableInt(result
.vl
, 64)
2291 # overflow = None # do not override overflow except in setvl
2295 cr1
= int(self
.FPSCR
.FX
) << 3
2296 cr1 |
= int(self
.FPSCR
.FEX
) << 2
2297 cr1 |
= int(self
.FPSCR
.VX
) << 1
2298 cr1 |
= int(self
.FPSCR
.OX
)
2299 log("default fp cr1", cr1
)
2301 log("explicit cr1", cr1
)
2302 self
.crl
[regnum
].eq(cr1
)
2304 # if there was not an explicit CR0 in the pseudocode,
2306 self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2308 # otherwise we just blat CR0 into the required regnum
2309 log("explicit rc0", cr0
)
2310 self
.crl
[regnum
].eq(cr0
)
2312 def do_outregs_nia(self
, asmop
, ins_name
, info
, outs
,
2313 ca_en
, rc_en
, ffirst_hit
, ew_dst
):
2314 ffirst_hit
, vli
= ffirst_hit
2315 # write out any regs for this instruction, but only if fail-first is ok
2316 # XXX TODO: allow CR-vector to be written out even if ffirst fails
2317 if not ffirst_hit
or vli
:
2318 for name
, output
in outs
.items():
2319 yield from self
.check_write(info
, name
, output
, ca_en
, ew_dst
)
2320 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2321 # which write directly to CR in the pseudocode (gah, what a mess)
2322 # if ffirst_hit and not vli:
2323 # self.cr.value = self.cr_backup
2326 self
.svp64_reset_loop()
2329 # check advancement of src/dst/sub-steps and if PC needs updating
2330 nia_update
= (yield from self
.check_step_increment(rc_en
,
2333 self
.update_pc_next()
2335 def check_replace_d(self
, info
, remap_active
):
2336 replace_d
= False # update / replace constant in pseudocode
2337 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2338 vl
= self
.svstate
.vl
2339 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2340 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2341 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2342 if info
.form
== 'DS':
2343 # DS-Form, multiply by 4 then knock 2 bits off after
2344 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2346 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2347 imm
= exts(imm
, 16) # sign-extend to integer
2348 # get the right step. LD is from srcstep, ST is dststep
2349 op
= yield self
.dec2
.e
.do
.insn_type
2351 if op
== MicrOp
.OP_LOAD
.value
:
2353 offsmul
= yield self
.dec2
.in1_step
2354 log("D-field REMAP src", imm
, offsmul
, ldstmode
)
2356 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2357 log("D-field src", imm
, offsmul
, ldstmode
)
2358 elif op
== MicrOp
.OP_STORE
.value
:
2359 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2360 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2361 log("D-field dst", imm
, offsmul
, ldstmode
)
2362 # Unit-Strided LD/ST adds offset*width to immediate
2363 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2364 ldst_len
= yield self
.dec2
.e
.do
.data_len
2365 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2367 # Element-strided multiplies the immediate by element step
2368 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2369 imm
= SelectableInt(imm
* offsmul
, 32)
2372 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2373 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2374 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2375 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2376 # new replacement D... errr.. DS
2378 if info
.form
== 'DS':
2379 # TODO: assert 2 LSBs are zero?
2380 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2381 imm
.value
= imm
.value
>> 2
2382 self
.namespace
['DS'] = imm
2384 self
.namespace
['D'] = imm
2386 def get_input(self
, name
, ew_src
):
2387 # using PowerDecoder2, first, find the decoder index.
2388 # (mapping name RA RB RC RS to in1, in2, in3)
2389 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2391 # doing this is not part of svp64, it's because output
2392 # registers, to be modified, need to be in the namespace.
2393 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2395 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2397 if isinstance(regnum
, tuple):
2398 (regnum
, base
, offs
) = regnum
2400 base
, offs
= regnum
, 0 # temporary HACK
2402 # in case getting the register number is needed, _RA, _RB
2403 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2404 regname
= "_" + name
2405 if not self
.is_svp64_mode
or ew_src
== 64:
2406 self
.namespace
[regname
] = regnum
2407 elif regname
in self
.namespace
:
2408 del self
.namespace
[regname
]
2410 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2411 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2413 reg_val
= SelectableInt(self
.fpr(base
, is_vec
, offs
, ew_src
))
2414 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
),
2415 kind
=LogKind
.InstrInOuts
)
2416 self
.trace("r:FPR:%d:%d:%d " % (base
, offs
, ew_src
))
2417 elif name
is not None:
2418 reg_val
= SelectableInt(self
.gpr(base
, is_vec
, offs
, ew_src
))
2419 self
.trace("r:GPR:%d:%d:%d " % (base
, offs
, ew_src
))
2420 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
),
2421 kind
=LogKind
.InstrInOuts
)
2423 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2424 reg_val
= SelectableInt(0, ew_src
)
2427 def remap_set_steps(self
, remaps
):
2428 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2429 they work in concert with PowerDecoder2 at the moment,
2430 there is no HDL implementation of REMAP. therefore this
2431 function, because ISACaller still uses PowerDecoder2,
2432 will *explicitly* write the dec2.XX_step values. this has
2435 # just some convenient debug info
2437 sname
= 'SVSHAPE%d' % i
2438 shape
= self
.spr
[sname
]
2439 log(sname
, bin(shape
.value
))
2440 log(" lims", shape
.lims
)
2441 log(" mode", shape
.mode
)
2442 log(" skip", shape
.skip
)
2444 # set up the list of steps to remap
2445 mi0
= self
.svstate
.mi0
2446 mi1
= self
.svstate
.mi1
2447 mi2
= self
.svstate
.mi2
2448 mo0
= self
.svstate
.mo0
2449 mo1
= self
.svstate
.mo1
2450 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2451 [self
.dec2
.in2_step
, mi1
], # RB
2452 [self
.dec2
.in3_step
, mi2
], # RC
2453 [self
.dec2
.o_step
, mo0
], # RT
2454 [self
.dec2
.o2_step
, mo1
], # EA
2457 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2458 for i
, reg
in enumerate(rnames
):
2459 idx
= yield from get_idx_map(self
.dec2
, reg
)
2461 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2463 steps
[i
][0] = self
.dec2
.in1_step
2465 steps
[i
][0] = self
.dec2
.in2_step
2467 steps
[i
][0] = self
.dec2
.in3_step
2468 log("remap step", i
, reg
, idx
, steps
[i
][1])
2469 remap_idxs
= self
.remap_idxs
2471 # now cross-index the required SHAPE for each of 3-in 2-out regs
2472 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2473 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2474 (shape
, remap
) = remaps
[shape_idx
]
2475 remap_idx
= remap_idxs
[shape_idx
]
2476 # zero is "disabled"
2477 if shape
.value
== 0x0:
2479 # now set the actual requested step to the current index
2480 if dstep
is not None:
2481 yield dstep
.eq(remap_idx
)
2483 # debug printout info
2484 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2485 i
, rnames
[i
], shape_idx
, remap_idx
))
2487 log("shape remap", x
)
2489 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2490 if name
== 'overflow': # ignore, done already (above)
2492 if name
== 'CR0': # ignore, done already (above)
2494 if isinstance(output
, int):
2495 output
= SelectableInt(output
, EFFECTIVELY_UNLIMITED
)
2497 if name
in ['FPSCR', ]:
2498 log("write FPSCR 0x%x" % (output
.value
))
2499 self
.FPSCR
.eq(output
)
2502 if name
in ['CA', 'CA32']:
2504 log("writing %s to XER" % name
, output
)
2505 log("write XER %s 0x%x" % (name
, output
.value
))
2506 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2508 log("NOT writing %s to XER" % name
, output
)
2510 # write special SPRs
2511 if name
in info
.special_regs
:
2512 log('writing special %s' % name
, output
, special_sprs
)
2513 log("write reg %s 0x%x" % (name
, output
.value
),
2514 kind
=LogKind
.InstrInOuts
)
2515 if name
in special_sprs
:
2516 self
.spr
[name
] = output
2518 self
.namespace
[name
].eq(output
)
2520 log('msr written', hex(self
.msr
.value
))
2522 # find out1/out2 PR/FPR
2523 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2525 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2527 # temporary hack for not having 2nd output
2528 regnum
= yield getattr(self
.decoder
, name
)
2530 # convenient debug prefix
2535 # check zeroing due to predicate bit being zero
2536 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2537 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2538 output
= SelectableInt(0, EFFECTIVELY_UNLIMITED
)
2539 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2540 output
.value
, ew_dst
),
2541 kind
=LogKind
.InstrInOuts
)
2542 # zero-extend tov64 bit begore storing (should use EXT oh well)
2543 if output
.bits
> 64:
2544 output
= SelectableInt(output
.value
, 64)
2545 rnum
, base
, offset
= regnum
2547 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2548 self
.trace("w:FPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2550 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2551 self
.trace("w:GPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2553 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2554 # check if it is the SVSTATE.src/dest step that needs incrementing
2555 # this is our Sub-Program-Counter loop from 0 to VL-1
2556 if not self
.allow_next_step_inc
:
2557 if self
.is_svp64_mode
:
2558 return (yield from self
.svstate_post_inc(ins_name
))
2560 # XXX only in non-SVP64 mode!
2561 # record state of whether the current operation was an svshape,
2563 # to be able to know if it should apply in the next instruction.
2564 # also (if going to use this instruction) should disable ability
2565 # to interrupt in between. sigh.
2566 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2573 log("SVSTATE_NEXT: inc requested, mode",
2574 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2575 yield from self
.svstate_pre_inc()
2576 pre
= yield from self
.update_new_svstate_steps()
2578 # reset at end of loop including exit Vertical Mode
2579 log("SVSTATE_NEXT: end of loop, reset")
2580 self
.svp64_reset_loop()
2581 self
.svstate
.vfirst
= 0
2585 self
.handle_comparison(SelectableInt(0, 64)) # CR0
2587 if self
.allow_next_step_inc
== 2:
2588 log("SVSTATE_NEXT: read")
2589 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
2591 log("SVSTATE_NEXT: post-inc")
2592 # use actual (cached) src/dst-step here to check end
2593 remaps
= self
.get_remap_indices()
2594 remap_idxs
= self
.remap_idxs
2595 vl
= self
.svstate
.vl
2596 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2597 if self
.allow_next_step_inc
!= 2:
2598 yield from self
.advance_svstate_steps()
2599 #self.namespace['SVSTATE'] = self.svstate.spr
2600 # set CR0 (if Rc=1) based on end
2601 endtest
= 1 if self
.at_loopend() else 0
2603 #results = [SelectableInt(endtest, 64)]
2604 # self.handle_comparison(results) # CR0
2606 # see if svstep was requested, if so, which SVSTATE
2608 if self
.svstate_next_mode
> 0:
2609 shape_idx
= self
.svstate_next_mode
.value
-1
2610 endings
= self
.remap_loopends
[shape_idx
]
2611 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
2612 log("svstep Rc=1, CR0", cr_field
, endtest
)
2613 self
.crl
[0].eq(cr_field
) # CR0
2615 # reset at end of loop including exit Vertical Mode
2616 log("SVSTATE_NEXT: after increments, reset")
2617 self
.svp64_reset_loop()
2618 self
.svstate
.vfirst
= 0
2621 def SVSTATE_NEXT(self
, mode
, submode
):
2622 """explicitly moves srcstep/dststep on to next element, for
2623 "Vertical-First" mode. this function is called from
2624 setvl pseudo-code, as a pseudo-op "svstep"
2626 WARNING: this function uses information that was created EARLIER
2627 due to it being in the middle of a yield, but this function is
2628 *NOT* called from yield (it's called from compiled pseudocode).
2630 self
.allow_next_step_inc
= submode
.value
+ 1
2631 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
2632 self
.svstate_next_mode
= mode
2633 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
2634 shape_idx
= self
.svstate_next_mode
.value
-1
2635 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
2636 if self
.svstate_next_mode
== 5:
2637 self
.svstate_next_mode
= 0
2638 return SelectableInt(self
.svstate
.srcstep
, 7)
2639 if self
.svstate_next_mode
== 6:
2640 self
.svstate_next_mode
= 0
2641 return SelectableInt(self
.svstate
.dststep
, 7)
2642 if self
.svstate_next_mode
== 7:
2643 self
.svstate_next_mode
= 0
2644 return SelectableInt(self
.svstate
.ssubstep
, 7)
2645 if self
.svstate_next_mode
== 8:
2646 self
.svstate_next_mode
= 0
2647 return SelectableInt(self
.svstate
.dsubstep
, 7)
2648 return SelectableInt(0, 7)
2650 def get_src_dststeps(self
):
2651 """gets srcstep, dststep, and ssubstep, dsubstep
2653 return (self
.new_srcstep
, self
.new_dststep
,
2654 self
.new_ssubstep
, self
.new_dsubstep
)
2656 def update_svstate_namespace(self
, overwrite_svstate
=True):
2657 if overwrite_svstate
:
2658 # note, do not get the bit-reversed srcstep here!
2659 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2660 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2662 # update SVSTATE with new srcstep
2663 self
.svstate
.srcstep
= srcstep
2664 self
.svstate
.dststep
= dststep
2665 self
.svstate
.ssubstep
= ssubstep
2666 self
.svstate
.dsubstep
= dsubstep
2667 self
.namespace
['SVSTATE'] = self
.svstate
2668 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2669 yield Settle() # let decoder update
2671 def update_new_svstate_steps(self
, overwrite_svstate
=True):
2672 yield from self
.update_svstate_namespace(overwrite_svstate
)
2673 srcstep
= self
.svstate
.srcstep
2674 dststep
= self
.svstate
.dststep
2675 ssubstep
= self
.svstate
.ssubstep
2676 dsubstep
= self
.svstate
.dsubstep
2677 pack
= self
.svstate
.pack
2678 unpack
= self
.svstate
.unpack
2679 vl
= self
.svstate
.vl
2680 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2681 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2682 rm_mode
= yield self
.dec2
.rm_dec
.mode
2683 ff_inv
= yield self
.dec2
.rm_dec
.inv
2684 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2685 log(" srcstep", srcstep
)
2686 log(" dststep", dststep
)
2688 log(" unpack", unpack
)
2689 log(" ssubstep", ssubstep
)
2690 log(" dsubstep", dsubstep
)
2692 log(" subvl", subvl
)
2693 log(" rm_mode", rm_mode
)
2694 log(" sv_mode", sv_mode
)
2696 log(" cr_bit", cr_bit
)
2698 # check if end reached (we let srcstep overrun, above)
2699 # nothing needs doing (TODO zeroing): just do next instruction
2702 return ((ssubstep
== subvl
and srcstep
== vl
) or
2703 (dsubstep
== subvl
and dststep
== vl
))
2705 def svstate_post_inc(self
, insn_name
, vf
=0):
2706 # check if SV "Vertical First" mode is enabled
2707 vfirst
= self
.svstate
.vfirst
2708 log(" SV Vertical First", vf
, vfirst
)
2709 if not vf
and vfirst
== 1:
2713 # check if it is the SVSTATE.src/dest step that needs incrementing
2714 # this is our Sub-Program-Counter loop from 0 to VL-1
2715 # XXX twin predication TODO
2716 vl
= self
.svstate
.vl
2717 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2718 mvl
= self
.svstate
.maxvl
2719 srcstep
= self
.svstate
.srcstep
2720 dststep
= self
.svstate
.dststep
2721 ssubstep
= self
.svstate
.ssubstep
2722 dsubstep
= self
.svstate
.dsubstep
2723 pack
= self
.svstate
.pack
2724 unpack
= self
.svstate
.unpack
2725 rm_mode
= yield self
.dec2
.rm_dec
.mode
2726 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
2727 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
2728 out_vec
= not (yield self
.dec2
.no_out_vec
)
2729 in_vec
= not (yield self
.dec2
.no_in_vec
)
2730 log(" svstate.vl", vl
)
2731 log(" svstate.mvl", mvl
)
2732 log(" rm.subvl", subvl
)
2733 log(" svstate.srcstep", srcstep
)
2734 log(" svstate.dststep", dststep
)
2735 log(" svstate.ssubstep", ssubstep
)
2736 log(" svstate.dsubstep", dsubstep
)
2737 log(" svstate.pack", pack
)
2738 log(" svstate.unpack", unpack
)
2739 log(" mode", rm_mode
)
2740 log(" reverse", reverse_gear
)
2741 log(" out_vec", out_vec
)
2742 log(" in_vec", in_vec
)
2743 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPType
.P2
.value
)
2744 # check if this was an sv.bc* and if so did it succeed
2745 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
2746 end_loop
= self
.namespace
['end_loop']
2747 log("branch %s end_loop" % insn_name
, end_loop
)
2749 self
.svp64_reset_loop()
2750 self
.update_pc_next()
2752 # check if srcstep needs incrementing by one, stop PC advancing
2753 # but for 2-pred both src/dest have to be checked.
2754 # XXX this might not be true! it may just be LD/ST
2755 if sv_ptype
== SVPType
.P2
.value
:
2756 svp64_is_vector
= (out_vec
or in_vec
)
2758 svp64_is_vector
= out_vec
2759 # loops end at the first "hit" (source or dest)
2760 yield from self
.advance_svstate_steps()
2761 loopend
= self
.loopend
2762 log("loopend", svp64_is_vector
, loopend
)
2763 if not svp64_is_vector
or loopend
:
2764 # reset loop to zero and update NIA
2765 self
.svp64_reset_loop()
2770 # still looping, advance and update NIA
2771 self
.namespace
['SVSTATE'] = self
.svstate
2773 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
2774 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
2775 # this way we keep repeating the same instruction (with new steps)
2776 self
.pc
.NIA
.value
= self
.pc
.CIA
.value
2777 self
.namespace
['NIA'] = self
.pc
.NIA
2778 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
2779 return False # DO NOT allow PC update whilst Sub-PC loop running
2781 def update_pc_next(self
):
2782 # UPDATE program counter
2783 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2784 #self.svstate.spr = self.namespace['SVSTATE']
2785 log("end of call", self
.namespace
['CIA'],
2786 self
.namespace
['NIA'],
2787 self
.namespace
['SVSTATE'])
2789 def svp64_reset_loop(self
):
2790 self
.svstate
.srcstep
= 0
2791 self
.svstate
.dststep
= 0
2792 self
.svstate
.ssubstep
= 0
2793 self
.svstate
.dsubstep
= 0
2794 self
.loopend
= False
2795 log(" svstate.srcstep loop end (PC to update)")
2796 self
.namespace
['SVSTATE'] = self
.svstate
2798 def update_nia(self
):
2799 self
.pc
.update_nia(self
.is_svp64_mode
)
2800 self
.namespace
['NIA'] = self
.pc
.NIA
2804 """Decorator factory.
2806 this decorator will "inject" variables into the function's namespace,
2807 from the *dictionary* in self.namespace. it therefore becomes possible
2808 to make it look like a whole stack of variables which would otherwise
2809 need "self." inserted in front of them (*and* for those variables to be
2810 added to the instance) "appear" in the function.
2812 "self.namespace['SI']" for example becomes accessible as just "SI" but
2813 *only* inside the function, when decorated.
2815 def variable_injector(func
):
2817 def decorator(*args
, **kwargs
):
2819 func_globals
= func
.__globals
__ # Python 2.6+
2820 except AttributeError:
2821 func_globals
= func
.func_globals
# Earlier versions.
2823 context
= args
[0].namespace
# variables to be injected
2824 saved_values
= func_globals
.copy() # Shallow copy of dict.
2825 log("globals before", context
.keys())
2826 func_globals
.update(context
)
2827 result
= func(*args
, **kwargs
)
2828 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
2829 log("args[0]", args
[0].namespace
['CIA'],
2830 args
[0].namespace
['NIA'],
2831 args
[0].namespace
['SVSTATE'])
2832 if 'end_loop' in func_globals
:
2833 log("args[0] end_loop", func_globals
['end_loop'])
2834 args
[0].namespace
= func_globals
2835 #exec (func.__code__, func_globals)
2838 # func_globals = saved_values # Undo changes.
2844 return variable_injector