1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
20 from nmigen
.sim
import Settle
21 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
22 SVP64CROffs
, SVP64MODEb
)
23 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
25 from openpower
.decoder
.isa
.mem
import Mem
, MemException
26 from openpower
.decoder
.isa
.radixmmu
import RADIX
27 from openpower
.decoder
.isa
.svshape
import SVSHAPE
28 from openpower
.decoder
.isa
.svstate
import SVP64State
29 from openpower
.decoder
.orderedset
import OrderedSet
30 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
31 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
32 MicrOp
, OutSel
, SVMode
,
33 SVP64LDSTmode
, SVP64PredCR
,
34 SVP64PredInt
, SVP64PredMode
,
35 SVP64RMMode
, SVPType
, XER_bits
,
36 insns
, spr_byname
, spr_dict
,
38 from openpower
.decoder
.power_insn
import SVP64Instruction
39 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
40 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
41 SelectableInt
, selectconcat
,
42 EFFECTIVELY_UNLIMITED
)
43 from openpower
.fpscr
import FPSCRState
44 from openpower
.xer
import XERState
45 from openpower
.util
import LogKind
, log
47 LDST_UPDATE_INSNS
= ['ldu', 'lwzu', 'lbzu', 'lhzu', 'lhau', 'lfsu', 'lfdu',
50 instruction_info
= namedtuple('instruction_info',
51 'func read_regs uninit_regs write_regs ' +
52 'special_regs op_fields form asmregs')
62 # rrright. this is here basically because the compiler pywriter returns
63 # results in a specific priority order. to make sure regs match up they
64 # need partial sorting. sigh.
66 # TODO (lkcl): adjust other registers that should be in a particular order
67 # probably CA, CA32, and CR
94 "overflow": 7, # should definitely be last
98 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
101 def get_masked_reg(regs
, base
, offs
, ew_bits
):
102 # rrrright. start by breaking down into row/col, based on elwidth
103 gpr_offs
= offs
// (64 // ew_bits
)
104 gpr_col
= offs
% (64 // ew_bits
)
105 # compute the mask based on ew_bits
106 mask
= (1 << ew_bits
) - 1
107 # now select the 64-bit register, but get its value (easier)
108 val
= regs
[base
+ gpr_offs
]
109 # shift down so element we want is at LSB
110 val
>>= gpr_col
* ew_bits
111 # mask so we only return the LSB element
115 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
116 # rrrright. start by breaking down into row/col, based on elwidth
117 gpr_offs
= offs
// (64//ew_bits
)
118 gpr_col
= offs
% (64//ew_bits
)
119 # compute the mask based on ew_bits
120 mask
= (1 << ew_bits
)-1
121 # now select the 64-bit register, but get its value (easier)
122 val
= regs
[base
+gpr_offs
]
123 # now mask out the bit we don't want
124 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
125 # then wipe the bit we don't want from the value
127 # OR the new value in, shifted up
128 val |
= value
<< (gpr_col
*ew_bits
)
129 regs
[base
+gpr_offs
] = val
132 def create_args(reglist
, extra
=None):
133 retval
= list(OrderedSet(reglist
))
134 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
135 if extra
is not None:
136 return [extra
] + retval
141 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
144 self
.isacaller
= isacaller
145 self
.svstate
= svstate
146 for i
in range(len(regfile
)):
147 self
[i
] = SelectableInt(regfile
[i
], 64)
149 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
150 if isinstance(ridx
, SelectableInt
):
153 return self
[ridx
+offs
]
154 # rrrright. start by breaking down into row/col, based on elwidth
155 gpr_offs
= offs
// (64//elwidth
)
156 gpr_col
= offs
% (64//elwidth
)
157 # now select the 64-bit register, but get its value (easier)
158 val
= self
[ridx
+gpr_offs
].value
159 # now shift down and mask out
160 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
161 # finally, return a SelectableInt at the required elwidth
162 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
163 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
164 return SelectableInt(val
, elwidth
)
166 def set_form(self
, form
):
169 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
171 if isinstance(rnum
, SelectableInt
):
173 if isinstance(value
, SelectableInt
):
176 if isinstance(rnum
, tuple):
177 rnum
, base
, offs
= rnum
180 # rrrright. start by breaking down into row/col, based on elwidth
181 gpr_offs
= offs
// (64//elwidth
)
182 gpr_col
= offs
% (64//elwidth
)
183 # compute the mask based on elwidth
184 mask
= (1 << elwidth
)-1
185 # now select the 64-bit register, but get its value (easier)
186 val
= self
[base
+gpr_offs
].value
187 # now mask out the bit we don't want
188 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
189 # then wipe the bit we don't want from the value
191 # OR the new value in, shifted up
192 val |
= value
<< (gpr_col
*elwidth
)
193 # finally put the damn value into the regfile
194 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
195 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
197 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
199 def __setitem__(self
, rnum
, value
):
200 # rnum = rnum.value # only SelectableInt allowed
201 log("GPR setitem", rnum
, value
)
202 if isinstance(rnum
, SelectableInt
):
204 dict.__setitem
__(self
, rnum
, value
)
206 def getz(self
, rnum
):
207 # rnum = rnum.value # only SelectableInt allowed
208 log("GPR getzero?", rnum
)
210 return SelectableInt(0, 64)
213 def _get_regnum(self
, attr
):
214 getform
= self
.sd
.sigforms
[self
.form
]
215 rnum
= getattr(getform
, attr
)
218 def ___getitem__(self
, attr
):
219 """ XXX currently not used
221 rnum
= self
._get
_regnum
(attr
)
222 log("GPR getitem", attr
, rnum
)
223 return self
.regfile
[rnum
]
225 def dump(self
, printout
=True):
227 for i
in range(len(self
)):
228 res
.append(self
[i
].value
)
230 for i
in range(0, len(res
), 8):
233 s
.append("%08x" % res
[i
+j
])
235 print("reg", "%2d" % i
, s
)
240 def __init__(self
, dec2
, initial_sprs
={}):
243 for key
, v
in initial_sprs
.items():
244 if isinstance(key
, SelectableInt
):
246 key
= special_sprs
.get(key
, key
)
247 if isinstance(key
, int):
250 info
= spr_byname
[key
]
251 if not isinstance(v
, SelectableInt
):
252 v
= SelectableInt(v
, info
.length
)
255 def __getitem__(self
, key
):
257 log("dict", self
.items())
258 # if key in special_sprs get the special spr, otherwise return key
259 if isinstance(key
, SelectableInt
):
261 if isinstance(key
, int):
262 key
= spr_dict
[key
].SPR
263 key
= special_sprs
.get(key
, key
)
264 if key
== 'HSRR0': # HACK!
266 if key
== 'HSRR1': # HACK!
269 res
= dict.__getitem
__(self
, key
)
271 if isinstance(key
, int):
274 info
= spr_byname
[key
]
275 self
[key
] = SelectableInt(0, info
.length
)
276 res
= dict.__getitem
__(self
, key
)
277 log("spr returning", key
, res
)
280 def __setitem__(self
, key
, value
):
281 if isinstance(key
, SelectableInt
):
283 if isinstance(key
, int):
284 key
= spr_dict
[key
].SPR
286 key
= special_sprs
.get(key
, key
)
287 if key
== 'HSRR0': # HACK!
288 self
.__setitem
__('SRR0', value
)
289 if key
== 'HSRR1': # HACK!
290 self
.__setitem
__('SRR1', value
)
292 value
= XERState(value
)
293 log("setting spr", key
, value
)
294 dict.__setitem
__(self
, key
, value
)
296 def __call__(self
, ridx
):
299 def dump(self
, printout
=True):
301 keys
= list(self
.keys())
304 sprname
= spr_dict
.get(k
, None)
308 sprname
= sprname
.SPR
309 res
.append((sprname
, self
[k
].value
))
311 for sprname
, value
in res
:
312 print(" ", sprname
, hex(value
))
317 def __init__(self
, pc_init
=0):
318 self
.CIA
= SelectableInt(pc_init
, 64)
319 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
321 def update_nia(self
, is_svp64
):
322 increment
= 8 if is_svp64
else 4
323 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
325 def update(self
, namespace
, is_svp64
):
326 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
328 self
.CIA
= namespace
['NIA'].narrow(64)
329 self
.update_nia(is_svp64
)
330 namespace
['CIA'] = self
.CIA
331 namespace
['NIA'] = self
.NIA
335 # See PowerISA Version 3.0 B Book 1
336 # Section 2.3.1 Condition Register pages 30 - 31
338 LT
= FL
= 0 # negative, less than, floating-point less than
339 GT
= FG
= 1 # positive, greater than, floating-point greater than
340 EQ
= FE
= 2 # equal, floating-point equal
341 SO
= FU
= 3 # summary overflow, floating-point unordered
343 def __init__(self
, init
=0):
344 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
345 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
346 self
.cr
= SelectableInt(init
, 64) # underlying reg
347 # field-selectable versions of Condition Register TODO check bitranges?
350 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
351 _cr
= FieldSelectableInt(self
.cr
, bits
)
355 # decode SVP64 predicate integer to reg number and invert
356 def get_predint(gpr
, mask
):
360 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
361 if mask
== SVP64PredInt
.ALWAYS
.value
:
362 return 0xffff_ffff_ffff_ffff # 64 bits of 1
363 if mask
== SVP64PredInt
.R3_UNARY
.value
:
364 return 1 << (r3
.value
& 0b111111)
365 if mask
== SVP64PredInt
.R3
.value
:
367 if mask
== SVP64PredInt
.R3_N
.value
:
369 if mask
== SVP64PredInt
.R10
.value
:
371 if mask
== SVP64PredInt
.R10_N
.value
:
373 if mask
== SVP64PredInt
.R30
.value
:
375 if mask
== SVP64PredInt
.R30_N
.value
:
379 # decode SVP64 predicate CR to reg number and invert status
380 def _get_predcr(mask
):
381 if mask
== SVP64PredCR
.LT
.value
:
383 if mask
== SVP64PredCR
.GE
.value
:
385 if mask
== SVP64PredCR
.GT
.value
:
387 if mask
== SVP64PredCR
.LE
.value
:
389 if mask
== SVP64PredCR
.EQ
.value
:
391 if mask
== SVP64PredCR
.NE
.value
:
393 if mask
== SVP64PredCR
.SO
.value
:
395 if mask
== SVP64PredCR
.NS
.value
:
399 # read individual CR fields (0..VL-1), extract the required bit
400 # and construct the mask
401 def get_predcr(crl
, mask
, vl
):
402 idx
, noninv
= _get_predcr(mask
)
405 cr
= crl
[i
+SVP64CROffs
.CRPred
]
406 if cr
[idx
].value
== noninv
:
411 # TODO, really should just be using PowerDecoder2
412 def get_idx_map(dec2
, name
):
414 in1_sel
= yield op
.in1_sel
415 in2_sel
= yield op
.in2_sel
416 in3_sel
= yield op
.in3_sel
417 in1
= yield dec2
.e
.read_reg1
.data
418 # identify which regnames map to in1/2/3
419 if name
== 'RA' or name
== 'RA_OR_ZERO':
420 if (in1_sel
== In1Sel
.RA
.value
or
421 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
423 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
426 if in2_sel
== In2Sel
.RB
.value
:
428 if in3_sel
== In3Sel
.RB
.value
:
430 # XXX TODO, RC doesn't exist yet!
432 if in3_sel
== In3Sel
.RC
.value
:
434 elif name
in ['EA', 'RS']:
435 if in1_sel
== In1Sel
.RS
.value
:
437 if in2_sel
== In2Sel
.RS
.value
:
439 if in3_sel
== In3Sel
.RS
.value
:
442 if in1_sel
== In1Sel
.FRA
.value
:
444 if in3_sel
== In3Sel
.FRA
.value
:
447 if in2_sel
== In2Sel
.FRB
.value
:
450 if in3_sel
== In3Sel
.FRC
.value
:
453 if in1_sel
== In1Sel
.FRS
.value
:
455 if in3_sel
== In3Sel
.FRS
.value
:
458 if in1_sel
== In1Sel
.FRT
.value
:
461 if in1_sel
== In1Sel
.RT
.value
:
466 # TODO, really should just be using PowerDecoder2
467 def get_idx_in(dec2
, name
, ewmode
=False):
468 idx
= yield from get_idx_map(dec2
, name
)
472 in1_sel
= yield op
.in1_sel
473 in2_sel
= yield op
.in2_sel
474 in3_sel
= yield op
.in3_sel
475 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
476 in1
= yield dec2
.e
.read_reg1
.data
477 in2
= yield dec2
.e
.read_reg2
.data
478 in3
= yield dec2
.e
.read_reg3
.data
480 in1_base
= yield dec2
.e
.read_reg1
.base
481 in2_base
= yield dec2
.e
.read_reg2
.base
482 in3_base
= yield dec2
.e
.read_reg3
.base
483 in1_offs
= yield dec2
.e
.read_reg1
.offs
484 in2_offs
= yield dec2
.e
.read_reg2
.offs
485 in3_offs
= yield dec2
.e
.read_reg3
.offs
486 in1
= (in1
, in1_base
, in1_offs
)
487 in2
= (in2
, in2_base
, in2_offs
)
488 in3
= (in3
, in3_base
, in3_offs
)
490 in1_isvec
= yield dec2
.in1_isvec
491 in2_isvec
= yield dec2
.in2_isvec
492 in3_isvec
= yield dec2
.in3_isvec
493 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
495 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
497 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
499 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
501 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
503 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
506 return in1
, in1_isvec
508 return in2
, in2_isvec
510 return in3
, in3_isvec
514 # TODO, really should just be using PowerDecoder2
515 def get_cr_in(dec2
, name
):
517 in_sel
= yield op
.cr_in
518 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
519 sv_cr_in
= yield op
.sv_cr_in
520 spec
= yield dec2
.crin_svdec
.spec
521 sv_override
= yield dec2
.dec_cr_in
.sv_override
522 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
523 in1
= yield dec2
.e
.read_cr1
.data
524 cr_isvec
= yield dec2
.cr_in_isvec
525 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
526 log(" sv_cr_in", sv_cr_in
)
527 log(" cr_bf", in_bitfield
)
529 log(" override", sv_override
)
530 # identify which regnames map to in / o2
532 if in_sel
== CRInSel
.BI
.value
:
534 log("get_cr_in not found", name
)
538 # TODO, really should just be using PowerDecoder2
539 def get_cr_out(dec2
, name
):
541 out_sel
= yield op
.cr_out
542 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
543 sv_cr_out
= yield op
.sv_cr_out
544 spec
= yield dec2
.crout_svdec
.spec
545 sv_override
= yield dec2
.dec_cr_out
.sv_override
546 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
547 out
= yield dec2
.e
.write_cr
.data
548 o_isvec
= yield dec2
.cr_out_isvec
549 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
550 log(" sv_cr_out", sv_cr_out
)
551 log(" cr_bf", out_bitfield
)
553 log(" override", sv_override
)
554 # identify which regnames map to out / o2
556 if out_sel
== CROutSel
.BF
.value
:
559 if out_sel
== CROutSel
.CR0
.value
:
561 if name
== 'CR1': # these are not actually calculated correctly
562 if out_sel
== CROutSel
.CR1
.value
:
564 # check RC1 set? if so return implicit vector, this is a REAL bad hack
565 RC1
= yield dec2
.rm_dec
.RC1
567 log("get_cr_out RC1 mode")
569 return 0, True # XXX TODO: offset CR0 from SVSTATE SPR
571 return 1, True # XXX TODO: offset CR1 from SVSTATE SPR
573 log("get_cr_out not found", name
)
577 # TODO, really should just be using PowerDecoder2
578 def get_out_map(dec2
, name
):
580 out_sel
= yield op
.out_sel
581 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
582 out
= yield dec2
.e
.write_reg
.data
583 # identify which regnames map to out / o2
585 if out_sel
== OutSel
.RA
.value
:
588 if out_sel
== OutSel
.RT
.value
:
590 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
592 elif name
== 'RT_OR_ZERO':
593 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
596 if out_sel
== OutSel
.FRA
.value
:
599 if out_sel
== OutSel
.FRS
.value
:
602 if out_sel
== OutSel
.FRT
.value
:
607 # TODO, really should just be using PowerDecoder2
608 def get_idx_out(dec2
, name
, ewmode
=False):
610 out_sel
= yield op
.out_sel
611 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
612 out
= yield dec2
.e
.write_reg
.data
613 o_isvec
= yield dec2
.o_isvec
615 offs
= yield dec2
.e
.write_reg
.offs
616 base
= yield dec2
.e
.write_reg
.base
617 out
= (out
, base
, offs
)
618 # identify which regnames map to out / o2
619 ismap
= yield from get_out_map(dec2
, name
)
621 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
623 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
627 # TODO, really should just be using PowerDecoder2
628 def get_out2_map(dec2
, name
):
629 # check first if register is activated for write
631 out_sel
= yield op
.out_sel
632 out
= yield dec2
.e
.write_ea
.data
633 out_ok
= yield dec2
.e
.write_ea
.ok
637 if name
in ['EA', 'RA']:
638 if hasattr(op
, "upd"):
639 # update mode LD/ST uses read-reg A also as an output
641 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
642 out_sel
, OutSel
.RA
.value
,
644 if upd
== LDSTMode
.update
.value
:
647 fft_en
= yield dec2
.implicit_rs
649 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
653 fft_en
= yield dec2
.implicit_rs
655 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
661 # TODO, really should just be using PowerDecoder2
662 def get_idx_out2(dec2
, name
, ewmode
=False):
663 # check first if register is activated for write
665 out_sel
= yield op
.out_sel
666 out
= yield dec2
.e
.write_ea
.data
668 offs
= yield dec2
.e
.write_ea
.offs
669 base
= yield dec2
.e
.write_ea
.base
670 out
= (out
, base
, offs
)
671 o_isvec
= yield dec2
.o2_isvec
672 ismap
= yield from get_out2_map(dec2
, name
)
674 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
680 """deals with svstate looping.
683 def __init__(self
, svstate
):
684 self
.svstate
= svstate
687 def new_iterators(self
):
688 self
.src_it
= self
.src_iterator()
689 self
.dst_it
= self
.dst_iterator()
693 self
.new_ssubstep
= 0
694 self
.new_dsubstep
= 0
695 self
.pred_dst_zero
= 0
696 self
.pred_src_zero
= 0
698 def src_iterator(self
):
699 """source-stepping iterator
701 pack
= self
.svstate
.pack
705 # pack advances subvl in *outer* loop
706 while True: # outer subvl loop
707 while True: # inner vl loop
710 srcmask
= self
.srcmask
711 srcstep
= self
.svstate
.srcstep
712 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
713 if self
.pred_sz
or pred_src_zero
:
714 self
.pred_src_zero
= not pred_src_zero
715 log(" advance src", srcstep
, vl
,
716 self
.svstate
.ssubstep
, subvl
)
717 # yield actual substep/srcstep
718 yield (self
.svstate
.ssubstep
, srcstep
)
719 # the way yield works these could have been modified.
722 srcstep
= self
.svstate
.srcstep
723 log(" advance src check", srcstep
, vl
,
724 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
725 self
.svstate
.ssubstep
== subvl
)
726 if srcstep
== vl
-1: # end-point
727 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
728 if self
.svstate
.ssubstep
== subvl
: # end-point
729 log(" advance pack stop")
731 break # exit inner loop
732 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
734 if self
.svstate
.ssubstep
== subvl
: # end-point
735 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
736 log(" advance pack stop")
738 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
741 # these cannot be done as for-loops because SVSTATE may change
742 # (srcstep/substep may be modified, interrupted, subvl/vl change)
743 # but they *can* be done as while-loops as long as every SVSTATE
744 # "thing" is re-read every single time a yield gives indices
745 while True: # outer vl loop
746 while True: # inner subvl loop
749 srcmask
= self
.srcmask
750 srcstep
= self
.svstate
.srcstep
751 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
752 if self
.pred_sz
or pred_src_zero
:
753 self
.pred_src_zero
= not pred_src_zero
754 log(" advance src", srcstep
, vl
,
755 self
.svstate
.ssubstep
, subvl
)
756 # yield actual substep/srcstep
757 yield (self
.svstate
.ssubstep
, srcstep
)
758 if self
.svstate
.ssubstep
== subvl
: # end-point
759 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
760 break # exit inner loop
761 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
763 if srcstep
== vl
-1: # end-point
764 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
767 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
769 def dst_iterator(self
):
770 """dest-stepping iterator
772 unpack
= self
.svstate
.unpack
776 # pack advances subvl in *outer* loop
777 while True: # outer subvl loop
778 while True: # inner vl loop
781 dstmask
= self
.dstmask
782 dststep
= self
.svstate
.dststep
783 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
784 if self
.pred_dz
or pred_dst_zero
:
785 self
.pred_dst_zero
= not pred_dst_zero
786 log(" advance dst", dststep
, vl
,
787 self
.svstate
.dsubstep
, subvl
)
788 # yield actual substep/dststep
789 yield (self
.svstate
.dsubstep
, dststep
)
790 # the way yield works these could have been modified.
792 dststep
= self
.svstate
.dststep
793 log(" advance dst check", dststep
, vl
,
794 self
.svstate
.ssubstep
, subvl
)
795 if dststep
== vl
-1: # end-point
796 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
797 if self
.svstate
.dsubstep
== subvl
: # end-point
798 log(" advance unpack stop")
801 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
803 if self
.svstate
.dsubstep
== subvl
: # end-point
804 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
805 log(" advance unpack stop")
807 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
809 # these cannot be done as for-loops because SVSTATE may change
810 # (dststep/substep may be modified, interrupted, subvl/vl change)
811 # but they *can* be done as while-loops as long as every SVSTATE
812 # "thing" is re-read every single time a yield gives indices
813 while True: # outer vl loop
814 while True: # inner subvl loop
816 dstmask
= self
.dstmask
817 dststep
= self
.svstate
.dststep
818 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
819 if self
.pred_dz
or pred_dst_zero
:
820 self
.pred_dst_zero
= not pred_dst_zero
821 log(" advance dst", dststep
, self
.svstate
.vl
,
822 self
.svstate
.dsubstep
, subvl
)
823 # yield actual substep/dststep
824 yield (self
.svstate
.dsubstep
, dststep
)
825 if self
.svstate
.dsubstep
== subvl
: # end-point
826 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
828 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
831 if dststep
== vl
-1: # end-point
832 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
834 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
836 def src_iterate(self
):
837 """source-stepping iterator
841 pack
= self
.svstate
.pack
842 unpack
= self
.svstate
.unpack
843 ssubstep
= self
.svstate
.ssubstep
844 end_ssub
= ssubstep
== subvl
845 end_src
= self
.svstate
.srcstep
== vl
-1
846 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
850 srcstep
= self
.svstate
.srcstep
851 srcmask
= self
.srcmask
853 # pack advances subvl in *outer* loop
855 assert srcstep
<= vl
-1
856 end_src
= srcstep
== vl
-1
861 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
865 srcstep
+= 1 # advance srcstep
866 if not self
.srcstep_skip
:
868 if ((1 << srcstep
) & srcmask
) != 0:
871 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
873 # advance subvl in *inner* loop
876 assert srcstep
<= vl
-1
877 end_src
= srcstep
== vl
-1
878 if end_src
: # end-point
884 if not self
.srcstep_skip
:
886 if ((1 << srcstep
) & srcmask
) != 0:
889 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
890 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
893 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
895 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
896 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
899 def dst_iterate(self
):
900 """dest step iterator
904 pack
= self
.svstate
.pack
905 unpack
= self
.svstate
.unpack
906 dsubstep
= self
.svstate
.dsubstep
907 end_dsub
= dsubstep
== subvl
908 dststep
= self
.svstate
.dststep
909 end_dst
= dststep
== vl
-1
910 dstmask
= self
.dstmask
911 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
916 # unpack advances subvl in *outer* loop
918 assert dststep
<= vl
-1
919 end_dst
= dststep
== vl
-1
924 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
928 dststep
+= 1 # advance dststep
929 if not self
.dststep_skip
:
931 if ((1 << dststep
) & dstmask
) != 0:
934 log(" dskip", bin(dstmask
), bin(1 << dststep
))
936 # advance subvl in *inner* loop
939 assert dststep
<= vl
-1
940 end_dst
= dststep
== vl
-1
941 if end_dst
: # end-point
947 if not self
.dststep_skip
:
949 if ((1 << dststep
) & dstmask
) != 0:
952 log(" dskip", bin(dstmask
), bin(1 << dststep
))
953 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
956 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
958 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
959 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
962 def at_loopend(self
):
963 """tells if this is the last possible element. uses the cached values
964 for src/dst-step and sub-steps
968 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
969 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
970 end_ssub
= ssubstep
== subvl
971 end_dsub
= dsubstep
== subvl
972 if srcstep
== vl
-1 and end_ssub
:
974 if dststep
== vl
-1 and end_dsub
:
978 def advance_svstate_steps(self
):
979 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
980 TODO when Pack/Unpack is set, substep becomes the *outer* loop
982 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
983 if self
.loopend
: # huhn??
988 def read_src_mask(self
):
989 """read/update pred_sz and src mask
991 # get SVSTATE VL (oh and print out some debug stuff)
993 srcstep
= self
.svstate
.srcstep
994 ssubstep
= self
.svstate
.ssubstep
996 # get predicate mask (all 64 bits)
997 srcmask
= 0xffff_ffff_ffff_ffff
999 pmode
= yield self
.dec2
.rm_dec
.predmode
1000 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1001 srcpred
= yield self
.dec2
.rm_dec
.srcpred
1002 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1003 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
1004 if pmode
== SVP64PredMode
.INT
.value
:
1005 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
1006 if sv_ptype
== SVPType
.P2
.value
:
1007 srcmask
= get_predint(self
.gpr
, srcpred
)
1008 elif pmode
== SVP64PredMode
.CR
.value
:
1009 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1010 if sv_ptype
== SVPType
.P2
.value
:
1011 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
1012 # work out if the ssubsteps are completed
1013 ssubstart
= ssubstep
== 0
1014 log(" pmode", pmode
)
1015 log(" ptype", sv_ptype
)
1016 log(" srcpred", bin(srcpred
))
1017 log(" srcmask", bin(srcmask
))
1018 log(" pred_sz", bin(pred_sz
))
1019 log(" ssubstart", ssubstart
)
1021 # store all that above
1022 self
.srcstep_skip
= False
1023 self
.srcmask
= srcmask
1024 self
.pred_sz
= pred_sz
1025 self
.new_ssubstep
= ssubstep
1026 log(" new ssubstep", ssubstep
)
1027 # until the predicate mask has a "1" bit... or we run out of VL
1028 # let srcstep==VL be the indicator to move to next instruction
1030 self
.srcstep_skip
= True
1032 def read_dst_mask(self
):
1033 """same as read_src_mask - check and record everything needed
1035 # get SVSTATE VL (oh and print out some debug stuff)
1036 # yield Delay(1e-10) # make changes visible
1037 vl
= self
.svstate
.vl
1038 dststep
= self
.svstate
.dststep
1039 dsubstep
= self
.svstate
.dsubstep
1041 # get predicate mask (all 64 bits)
1042 dstmask
= 0xffff_ffff_ffff_ffff
1044 pmode
= yield self
.dec2
.rm_dec
.predmode
1045 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1046 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1047 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1048 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1049 if pmode
== SVP64PredMode
.INT
.value
:
1050 dstmask
= get_predint(self
.gpr
, dstpred
)
1051 elif pmode
== SVP64PredMode
.CR
.value
:
1052 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1053 # work out if the ssubsteps are completed
1054 dsubstart
= dsubstep
== 0
1055 log(" pmode", pmode
)
1056 log(" ptype", sv_ptype
)
1057 log(" dstpred", bin(dstpred
))
1058 log(" dstmask", bin(dstmask
))
1059 log(" pred_dz", bin(pred_dz
))
1060 log(" dsubstart", dsubstart
)
1062 self
.dststep_skip
= False
1063 self
.dstmask
= dstmask
1064 self
.pred_dz
= pred_dz
1065 self
.new_dsubstep
= dsubstep
1066 log(" new dsubstep", dsubstep
)
1068 self
.dststep_skip
= True
1070 def svstate_pre_inc(self
):
1071 """check if srcstep/dststep need to skip over masked-out predicate bits
1072 note that this is not supposed to do anything to substep,
1073 it is purely for skipping masked-out bits
1076 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1077 yield from self
.read_src_mask()
1078 yield from self
.read_dst_mask()
1085 srcstep
= self
.svstate
.srcstep
1086 srcmask
= self
.srcmask
1087 pred_src_zero
= self
.pred_sz
1088 vl
= self
.svstate
.vl
1089 # srcstep-skipping opportunity identified
1090 if self
.srcstep_skip
:
1091 # cannot do this with sv.bc - XXX TODO
1094 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1095 log(" sskip", bin(1 << srcstep
))
1098 # now work out if the relevant mask bits require zeroing
1100 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1102 # store new srcstep / dststep
1103 self
.new_srcstep
= srcstep
1104 self
.pred_src_zero
= pred_src_zero
1105 log(" new srcstep", srcstep
)
1108 # dststep-skipping opportunity identified
1109 dststep
= self
.svstate
.dststep
1110 dstmask
= self
.dstmask
1111 pred_dst_zero
= self
.pred_dz
1112 vl
= self
.svstate
.vl
1113 if self
.dststep_skip
:
1114 # cannot do this with sv.bc - XXX TODO
1117 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1118 log(" dskip", bin(1 << dststep
))
1121 # now work out if the relevant mask bits require zeroing
1123 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1125 # store new srcstep / dststep
1126 self
.new_dststep
= dststep
1127 self
.pred_dst_zero
= pred_dst_zero
1128 log(" new dststep", dststep
)
1131 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1132 # decoder2 - an instance of power_decoder2
1133 # regfile - a list of initial values for the registers
1134 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1135 # respect_pc - tracks the program counter. requires initial_insns
1136 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1137 initial_mem
=None, initial_msr
=0,
1149 self
.bigendian
= bigendian
1151 self
.is_svp64_mode
= False
1152 self
.respect_pc
= respect_pc
1153 if initial_sprs
is None:
1155 if initial_mem
is None:
1157 if fpregfile
is None:
1158 fpregfile
= [0] * 32
1159 if initial_insns
is None:
1161 assert self
.respect_pc
== False, "instructions required to honor pc"
1163 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1164 log("ISACaller initial_msr", initial_msr
)
1166 # "fake program counter" mode (for unit testing)
1170 if isinstance(initial_mem
, tuple):
1171 self
.fake_pc
= initial_mem
[0]
1172 disasm_start
= self
.fake_pc
1174 disasm_start
= initial_pc
1176 # disassembly: we need this for now (not given from the decoder)
1177 self
.disassembly
= {}
1179 for i
, code
in enumerate(disassembly
):
1180 self
.disassembly
[i
*4 + disasm_start
] = code
1182 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1183 self
.svp64rm
= SVP64RM()
1184 if initial_svstate
is None:
1186 if isinstance(initial_svstate
, int):
1187 initial_svstate
= SVP64State(initial_svstate
)
1188 # SVSTATE, MSR and PC
1189 StepLoop
.__init
__(self
, initial_svstate
)
1190 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1192 # GPR FPR SPR registers
1193 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1194 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1195 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1196 self
.spr
= SPR(decoder2
, initial_sprs
) # initialise SPRs before MMU
1198 # set up 4 dummy SVSHAPEs if they aren't already set up
1200 sname
= 'SVSHAPE%d' % i
1201 val
= self
.spr
.get(sname
, 0)
1202 # make sure it's an SVSHAPE
1203 self
.spr
[sname
] = SVSHAPE(val
, self
.gpr
)
1204 self
.last_op_svshape
= False
1207 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
, misaligned_ok
=True)
1208 self
.mem
.log_fancy(kind
=LogKind
.InstrInOuts
)
1209 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1210 # MMU mode, redirect underlying Mem through RADIX
1212 self
.mem
= RADIX(self
.mem
, self
)
1214 self
.imem
= RADIX(self
.imem
, self
)
1216 # TODO, needed here:
1217 # FPR (same as GPR except for FP nums)
1218 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1219 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1220 self
.fpscr
= FPSCRState(initial_fpscr
)
1222 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1223 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1225 # 2.3.2 LR (actually SPR #8) -- Done
1226 # 2.3.3 CTR (actually SPR #9) -- Done
1227 # 2.3.4 TAR (actually SPR #815)
1228 # 3.2.2 p45 XER (actually SPR #1) -- Done
1229 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1231 # create CR then allow portions of it to be "selectable" (below)
1232 self
.cr_fields
= CRFields(initial_cr
)
1233 self
.cr
= self
.cr_fields
.cr
1234 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1236 # "undefined", just set to variable-bit-width int (use exts "max")
1237 # self.undefined = SelectableInt(0, EFFECTIVELY_UNLIMITED)
1240 self
.namespace
.update(self
.spr
)
1241 self
.namespace
.update({'GPR': self
.gpr
,
1245 'memassign': self
.memassign
,
1248 'SVSTATE': self
.svstate
,
1249 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1250 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1251 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1252 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1255 'FPSCR': self
.fpscr
,
1256 'undefined': undefined
,
1257 'mode_is_64bit': True,
1258 'SO': XER_bits
['SO'],
1259 'XLEN': 64 # elwidth overrides
1262 for name
in BFP_FLAG_NAMES
:
1263 setattr(self
, name
, 0)
1265 # update pc to requested start point
1266 self
.set_pc(initial_pc
)
1268 # field-selectable versions of Condition Register
1269 self
.crl
= self
.cr_fields
.crl
1271 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1273 self
.decoder
= decoder2
.dec
1274 self
.dec2
= decoder2
1276 super().__init
__(XLEN
=self
.namespace
["XLEN"], FPSCR
=self
.fpscr
)
1280 return self
.namespace
["XLEN"]
1286 def call_trap(self
, trap_addr
, trap_bit
):
1287 """calls TRAP and sets up NIA to the new execution location.
1288 next instruction will begin at trap_addr.
1290 self
.TRAP(trap_addr
, trap_bit
)
1291 self
.namespace
['NIA'] = self
.trap_nia
1292 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1294 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1295 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1297 TRAP function is callable from inside the pseudocode itself,
1298 hence the default arguments. when calling from inside ISACaller
1299 it is best to use call_trap()
1301 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1302 kaivb
= self
.spr
['KAIVB'].value
1303 msr
= self
.namespace
['MSR'].value
1304 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1305 # store CIA(+4?) in SRR0, set NIA to 0x700
1306 # store MSR in SRR1, set MSR to um errr something, have to check spec
1307 # store SVSTATE (if enabled) in SVSRR0
1308 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1309 self
.spr
['SRR1'].value
= msr
1310 if self
.is_svp64_mode
:
1311 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1312 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1313 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1315 # set exception bits. TODO: this should, based on the address
1316 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1317 # bits appropriately. however it turns out that *for now* in all
1318 # cases (all trap_addrs) the exact same thing is needed.
1319 self
.msr
[MSRb
.IR
] = 0
1320 self
.msr
[MSRb
.DR
] = 0
1321 self
.msr
[MSRb
.FE0
] = 0
1322 self
.msr
[MSRb
.FE1
] = 0
1323 self
.msr
[MSRb
.EE
] = 0
1324 self
.msr
[MSRb
.RI
] = 0
1325 self
.msr
[MSRb
.SF
] = 1
1326 self
.msr
[MSRb
.TM
] = 0
1327 self
.msr
[MSRb
.VEC
] = 0
1328 self
.msr
[MSRb
.VSX
] = 0
1329 self
.msr
[MSRb
.PR
] = 0
1330 self
.msr
[MSRb
.FP
] = 0
1331 self
.msr
[MSRb
.PMM
] = 0
1332 self
.msr
[MSRb
.TEs
] = 0
1333 self
.msr
[MSRb
.TEe
] = 0
1334 self
.msr
[MSRb
.UND
] = 0
1335 self
.msr
[MSRb
.LE
] = 1
1337 def memassign(self
, ea
, sz
, val
):
1338 self
.mem
.memassign(ea
, sz
, val
)
1340 def prep_namespace(self
, insn_name
, formname
, op_fields
, xlen
):
1341 # TODO: get field names from form in decoder*1* (not decoder2)
1342 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1344 # then "yield" fields only from op_fields rather than hard-coded
1346 fields
= self
.decoder
.sigforms
[formname
]
1347 log("prep_namespace", formname
, op_fields
, insn_name
)
1348 for name
in op_fields
:
1349 # CR immediates. deal with separately. needs modifying
1351 if self
.is_svp64_mode
and name
in ['BI']: # TODO, more CRs
1352 # BI is a 5-bit, must reconstruct the value
1353 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1354 sig
= getattr(fields
, name
)
1356 # low 2 LSBs (CR field selector) remain same, CR num extended
1357 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1358 val
= (val
& 0b11) |
(regnum
<< 2)
1359 elif self
.is_svp64_mode
and name
in ['BF']: # TODO, more CRs
1360 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, "BF")
1361 log('hack %s' % name
, regnum
, is_vec
)
1364 sig
= getattr(fields
, name
)
1366 # these are all opcode fields involved in index-selection of CR,
1367 # and need to do "standard" arithmetic. CR[BA+32] for example
1368 # would, if using SelectableInt, only be 5-bit.
1369 if name
in ['BF', 'BFA', 'BC', 'BA', 'BB', 'BT', 'BI']:
1370 self
.namespace
[name
] = val
1372 self
.namespace
[name
] = SelectableInt(val
, sig
.width
)
1374 self
.namespace
['XER'] = self
.spr
['XER']
1375 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1376 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1377 self
.namespace
['OV'] = self
.spr
['XER'][XER_bits
['OV']].value
1378 self
.namespace
['OV32'] = self
.spr
['XER'][XER_bits
['OV32']].value
1379 self
.namespace
['XLEN'] = xlen
1381 # add some SVSTATE convenience variables
1382 vl
= self
.svstate
.vl
1383 srcstep
= self
.svstate
.srcstep
1384 self
.namespace
['VL'] = vl
1385 self
.namespace
['srcstep'] = srcstep
1387 # take a copy of the CR field value: if non-VLi fail-first fails
1388 # this is because the pseudocode writes *directly* to CR. sigh
1389 self
.cr_backup
= self
.cr
.value
1391 # sv.bc* need some extra fields
1392 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
1393 # blegh grab bits manually
1394 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1395 # convert to SelectableInt before test
1396 mode
= SelectableInt(mode
, 5)
1397 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1398 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1399 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1400 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1401 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1402 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1403 sz
= yield self
.dec2
.rm_dec
.pred_sz
1404 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1405 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1406 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1407 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1408 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1409 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1410 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1411 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1413 def get_kludged_op_add_ca_ov(self
, inputs
, inp_ca_ov
):
1414 """ this was not at all necessary to do. this function massively
1415 duplicates - in a laborious and complex fashion - the contents of
1416 the CSV files that were extracted two years ago from microwatt's
1417 source code. A-inversion is the "inv A" column, output inversion
1418 is the "inv out" column, carry-in equal to 0 or 1 or CA is the
1421 all of that information is available in
1422 self.instrs[ins_name].op_fields
1423 where info is usually assigned to self.instrs[ins_name]
1425 https://git.libre-soc.org/?p=openpower-isa.git;a=blob;f=openpower/isatables/minor_31.csv;hb=HEAD
1427 the immediate constants are *also* decoded correctly and placed
1428 usually by DecodeIn2Imm into operand2, as part of power_decoder2.py
1430 def ca(a
, b
, ca_in
, width
):
1431 mask
= (1 << width
) - 1
1432 y
= (a
& mask
) + (b
& mask
) + ca_in
1435 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1436 insn
= insns
.get(asmcode
)
1437 SI
= yield self
.dec2
.dec
.SI
1440 inputs
= [i
.value
for i
in inputs
]
1443 if insn
in ("add", "addo", "addc", "addco"):
1447 elif insn
== "addic" or insn
== "addic.":
1451 elif insn
in ("subf", "subfo", "subfc", "subfco"):
1455 elif insn
== "subfic":
1459 elif insn
== "adde" or insn
== "addeo":
1463 elif insn
== "subfe" or insn
== "subfeo":
1467 elif insn
== "addme" or insn
== "addmeo":
1471 elif insn
== "addze" or insn
== "addzeo":
1475 elif insn
== "subfme" or insn
== "subfmeo":
1479 elif insn
== "subfze" or insn
== "subfzeo":
1483 elif insn
== "addex":
1484 # CA[32] aren't actually written, just generate so we have
1485 # something to return
1486 ca64
= ov64
= ca(inputs
[0], inputs
[1], OV
, 64)
1487 ca32
= ov32
= ca(inputs
[0], inputs
[1], OV
, 32)
1488 return ca64
, ca32
, ov64
, ov32
1489 elif insn
== "neg" or insn
== "nego":
1494 raise NotImplementedError(
1495 "op_add kludge unimplemented instruction: ", asmcode
, insn
)
1497 ca64
= ca(a
, b
, ca_in
, 64)
1498 ca32
= ca(a
, b
, ca_in
, 32)
1499 ov64
= ca64
!= ca(a
, b
, ca_in
, 63)
1500 ov32
= ca32
!= ca(a
, b
, ca_in
, 31)
1501 return ca64
, ca32
, ov64
, ov32
1503 def handle_carry_(self
, inputs
, output
, ca
, ca32
, inp_ca_ov
):
1504 op
= yield self
.dec2
.e
.do
.insn_type
1505 if op
== MicrOp
.OP_ADD
.value
and ca
is None and ca32
is None:
1506 retval
= yield from self
.get_kludged_op_add_ca_ov(
1508 ca
, ca32
, ov
, ov32
= retval
1509 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1510 if insns
.get(asmcode
) == 'addex':
1511 # TODO: if 32-bit mode, set ov to ov32
1512 self
.spr
['XER'][XER_bits
['OV']] = ov
1513 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1515 # TODO: if 32-bit mode, set ca to ca32
1516 self
.spr
['XER'][XER_bits
['CA']] = ca
1517 self
.spr
['XER'][XER_bits
['CA32']] = ca32
1519 inv_a
= yield self
.dec2
.e
.do
.invert_in
1521 inputs
[0] = ~inputs
[0]
1523 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1525 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1526 inputs
.append(SelectableInt(imm
, 64))
1529 log("gt input", x
, output
)
1530 gt
= (gtu(x
, output
))
1533 cy
= 1 if any(gts
) else 0
1535 if ca
is None: # already written
1536 self
.spr
['XER'][XER_bits
['CA']] = cy
1539 # ARGH... different for OP_ADD... *sigh*...
1540 op
= yield self
.dec2
.e
.do
.insn_type
1541 if op
== MicrOp
.OP_ADD
.value
:
1542 res32
= (output
.value
& (1 << 32)) != 0
1543 a32
= (inputs
[0].value
& (1 << 32)) != 0
1544 if len(inputs
) >= 2:
1545 b32
= (inputs
[1].value
& (1 << 32)) != 0
1548 cy32
= res32 ^ a32 ^ b32
1549 log("CA32 ADD", cy32
)
1553 log("input", x
, output
)
1554 log(" x[32:64]", x
, x
[32:64])
1555 log(" o[32:64]", output
, output
[32:64])
1556 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1558 cy32
= 1 if any(gts
) else 0
1559 log("CA32", cy32
, gts
)
1560 if ca32
is None: # already written
1561 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1563 def handle_overflow(self
, inputs
, output
, div_overflow
, inp_ca_ov
):
1564 op
= yield self
.dec2
.e
.do
.insn_type
1565 if op
== MicrOp
.OP_ADD
.value
:
1566 retval
= yield from self
.get_kludged_op_add_ca_ov(
1568 ca
, ca32
, ov
, ov32
= retval
1569 # TODO: if 32-bit mode, set ov to ov32
1570 self
.spr
['XER'][XER_bits
['OV']] = ov
1571 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1572 self
.spr
['XER'][XER_bits
['SO']] |
= ov
1574 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1575 inv_a
= yield self
.dec2
.e
.do
.invert_in
1577 inputs
[0] = ~inputs
[0]
1579 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1581 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1582 inputs
.append(SelectableInt(imm
, 64))
1583 log("handle_overflow", inputs
, output
, div_overflow
)
1584 if len(inputs
) < 2 and div_overflow
is None:
1587 # div overflow is different: it's returned by the pseudo-code
1588 # because it's more complex than can be done by analysing the output
1589 if div_overflow
is not None:
1590 ov
, ov32
= div_overflow
, div_overflow
1591 # arithmetic overflow can be done by analysing the input and output
1592 elif len(inputs
) >= 2:
1594 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1595 output_sgn
= exts(output
.value
, output
.bits
) < 0
1596 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1597 output_sgn
!= input_sgn
[0] else 0
1600 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1601 output32_sgn
= exts(output
.value
, 32) < 0
1602 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1603 output32_sgn
!= input32_sgn
[0] else 0
1605 # now update XER OV/OV32/SO
1606 so
= self
.spr
['XER'][XER_bits
['SO']]
1607 new_so
= so | ov
# sticky overflow ORs in old with new
1608 self
.spr
['XER'][XER_bits
['OV']] = ov
1609 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1610 self
.spr
['XER'][XER_bits
['SO']] = new_so
1611 log(" set overflow", ov
, ov32
, so
, new_so
)
1613 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1614 assert isinstance(out
, SelectableInt
), \
1615 "out zero not a SelectableInt %s" % repr(outputs
)
1616 log("handle_comparison", out
.bits
, hex(out
.value
))
1617 # TODO - XXX *processor* in 32-bit mode
1618 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1620 # o32 = exts(out.value, 32)
1621 # print ("handle_comparison exts 32 bit", hex(o32))
1622 out
= exts(out
.value
, out
.bits
)
1623 log("handle_comparison exts", hex(out
))
1624 # create the three main CR flags, EQ GT LT
1625 zero
= SelectableInt(out
== 0, 1)
1626 positive
= SelectableInt(out
> 0, 1)
1627 negative
= SelectableInt(out
< 0, 1)
1628 # get (or not) XER.SO. for setvl this is important *not* to read SO
1630 SO
= SelectableInt(1, 0)
1632 SO
= self
.spr
['XER'][XER_bits
['SO']]
1633 log("handle_comparison SO", SO
.value
,
1634 "overflow", overflow
,
1636 "+ve", positive
.value
,
1637 "-ve", negative
.value
)
1638 # alternative overflow checking (setvl mainly at the moment)
1639 if overflow
is not None and overflow
== 1:
1640 SO
= SelectableInt(1, 1)
1641 # create the four CR field values and set the required CR field
1642 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1643 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1644 self
.crl
[cr_idx
].eq(cr_field
)
1646 def set_pc(self
, pc_val
):
1647 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1648 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1650 def get_next_insn(self
):
1651 """check instruction
1654 pc
= self
.pc
.CIA
.value
1657 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1659 raise KeyError("no instruction at 0x%x" % pc
)
1662 def setup_one(self
):
1663 """set up one instruction
1665 pc
, insn
= self
.get_next_insn()
1666 yield from self
.setup_next_insn(pc
, insn
)
1668 def setup_next_insn(self
, pc
, ins
):
1669 """set up next instruction
1672 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
1673 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
1675 yield self
.dec2
.sv_rm
.eq(0)
1676 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
1677 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
1678 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
1679 yield self
.dec2
.state
.pc
.eq(pc
)
1680 if self
.svstate
is not None:
1681 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
1683 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
1685 opcode
= yield self
.dec2
.dec
.opcode_in
1686 opcode
= SelectableInt(value
=opcode
, bits
=32)
1687 pfx
= SVP64Instruction
.Prefix(opcode
)
1688 log("prefix test: opcode:", pfx
.PO
, bin(pfx
.PO
), pfx
.id)
1689 self
.is_svp64_mode
= bool((pfx
.PO
== 0b000001) and (pfx
.id == 0b11))
1690 self
.pc
.update_nia(self
.is_svp64_mode
)
1692 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
1693 self
.namespace
['NIA'] = self
.pc
.NIA
1694 self
.namespace
['SVSTATE'] = self
.svstate
1695 if not self
.is_svp64_mode
:
1698 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
1699 log("svp64.rm", bin(pfx
.rm
))
1700 log(" svstate.vl", self
.svstate
.vl
)
1701 log(" svstate.mvl", self
.svstate
.maxvl
)
1702 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
1703 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
1704 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
1705 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
1708 def execute_one(self
):
1709 """execute one instruction
1711 self
.insnlog
= [] # log the instruction
1712 # get the disassembly code for this instruction
1713 if not self
.disassembly
:
1714 code
= yield from self
.get_assembly_name()
1717 if self
.is_svp64_mode
:
1718 offs
, dbg
= 4, "svp64 "
1719 code
= self
.disassembly
[self
._pc
+offs
]
1720 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
1721 self
.insnlog
.append(code
)
1722 opname
= code
.split(' ')[0]
1724 yield from self
.call(opname
) # execute the instruction
1725 except MemException
as e
: # check for memory errors
1726 if e
.args
[0] == 'unaligned': # alignment error
1727 # run a Trap but set DAR first
1728 print("memory unaligned exception, DAR", e
.dar
, repr(e
))
1729 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
1730 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
1732 elif e
.args
[0] == 'invalid': # invalid
1733 # run a Trap but set DAR first
1734 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
1735 if e
.mode
== 'EXECUTE':
1736 # XXX TODO: must set a few bits in SRR1,
1737 # see microwatt loadstore1.vhdl
1738 # if m_in.segerr = '0' then
1739 # v.srr1(47 - 33) := m_in.invalid;
1740 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
1741 # v.srr1(47 - 44) := m_in.badtree;
1742 # v.srr1(47 - 45) := m_in.rc_error;
1743 # v.intr_vec := 16#400#;
1745 # v.intr_vec := 16#480#;
1746 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
1748 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
1750 # not supported yet:
1751 raise e
# ... re-raise
1753 # append the log file
1754 with
open("/tmp/insnlog.txt", "a+") as f
:
1755 f
.write(" ".join(self
.insnlog
)+"\n")
1757 log("gprs after code", code
)
1760 for i
in range(len(self
.crl
)):
1761 crs
.append(bin(self
.crl
[i
].asint()))
1762 log("crs", " ".join(crs
))
1763 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
1765 # don't use this except in special circumstances
1766 if not self
.respect_pc
:
1769 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
1770 hex(self
.pc
.NIA
.value
))
1772 def get_assembly_name(self
):
1773 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1774 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1775 dec_insn
= yield self
.dec2
.e
.do
.insn
1776 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
1777 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1778 int_op
= yield self
.dec2
.dec
.op
.internal_op
1779 log("get assembly name asmcode", asmcode
, int_op
,
1780 hex(dec_insn
), bin(insn_1_11
))
1781 asmop
= insns
.get(asmcode
, None)
1783 # sigh reconstruct the assembly instruction name
1784 if hasattr(self
.dec2
.e
.do
, "oe"):
1785 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
1786 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
1790 if hasattr(self
.dec2
.e
.do
, "rc"):
1791 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
1792 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
1796 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
1797 RC1
= yield self
.dec2
.rm_dec
.RC1
1801 # grrrr have to special-case MUL op (see DecodeOE)
1802 log("ov %d en %d rc %d en %d op %d" %
1803 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
1804 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
1809 if not asmop
.endswith("."): # don't add "." to "andis."
1812 if hasattr(self
.dec2
.e
.do
, "lk"):
1813 lk
= yield self
.dec2
.e
.do
.lk
1816 log("int_op", int_op
)
1817 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
1818 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
1822 spr_msb
= yield from self
.get_spr_msb()
1823 if int_op
== MicrOp
.OP_MFCR
.value
:
1828 # XXX TODO: for whatever weird reason this doesn't work
1829 # https://bugs.libre-soc.org/show_bug.cgi?id=390
1830 if int_op
== MicrOp
.OP_MTCRF
.value
:
1837 def reset_remaps(self
):
1838 self
.remap_loopends
= [0] * 4
1839 self
.remap_idxs
= [0, 1, 2, 3]
1841 def get_remap_indices(self
):
1842 """WARNING, this function stores remap_idxs and remap_loopends
1843 in the class for later use. this to avoid problems with yield
1845 # go through all iterators in lock-step, advance to next remap_idx
1846 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
1847 # get four SVSHAPEs. here we are hard-coding
1849 SVSHAPE0
= self
.spr
['SVSHAPE0']
1850 SVSHAPE1
= self
.spr
['SVSHAPE1']
1851 SVSHAPE2
= self
.spr
['SVSHAPE2']
1852 SVSHAPE3
= self
.spr
['SVSHAPE3']
1853 # set up the iterators
1854 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
1855 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
1856 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
1857 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
1861 for i
, (shape
, remap
) in enumerate(remaps
):
1862 # zero is "disabled"
1863 if shape
.value
== 0x0:
1864 self
.remap_idxs
[i
] = 0
1865 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
1866 step
= dststep
if (i
in [3, 4]) else srcstep
1867 # this is terrible. O(N^2) looking for the match. but hey.
1868 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
1871 self
.remap_idxs
[i
] = remap_idx
1872 self
.remap_loopends
[i
] = loopends
1873 dbg
.append((i
, step
, remap_idx
, loopends
))
1874 for (i
, step
, remap_idx
, loopends
) in dbg
:
1875 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
1878 def get_spr_msb(self
):
1879 dec_insn
= yield self
.dec2
.e
.do
.insn
1880 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
1882 def call(self
, name
):
1883 """call(opcode) - the primary execution point for instructions
1885 self
.last_st_addr
= None # reset the last known store address
1886 self
.last_ld_addr
= None # etc.
1888 ins_name
= name
.strip() # remove spaces if not already done so
1890 log("halted - not executing", ins_name
)
1893 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1894 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1895 asmop
= yield from self
.get_assembly_name()
1896 log("call", ins_name
, asmop
)
1898 # sv.setvl is *not* a loop-function. sigh
1899 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
1902 int_op
= yield self
.dec2
.dec
.op
.internal_op
1903 spr_msb
= yield from self
.get_spr_msb()
1905 instr_is_privileged
= False
1906 if int_op
in [MicrOp
.OP_ATTN
.value
,
1907 MicrOp
.OP_MFMSR
.value
,
1908 MicrOp
.OP_MTMSR
.value
,
1909 MicrOp
.OP_MTMSRD
.value
,
1911 MicrOp
.OP_RFID
.value
]:
1912 instr_is_privileged
= True
1913 if int_op
in [MicrOp
.OP_MFSPR
.value
,
1914 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
1915 instr_is_privileged
= True
1917 log("is priv", instr_is_privileged
, hex(self
.msr
.value
),
1919 # check MSR priv bit and whether op is privileged: if so, throw trap
1920 if instr_is_privileged
and self
.msr
[MSRb
.PR
] == 1:
1921 self
.call_trap(0x700, PIb
.PRIV
)
1924 # check halted condition
1925 if ins_name
== 'attn':
1929 # check illegal instruction
1931 if ins_name
not in ['mtcrf', 'mtocrf']:
1932 illegal
= ins_name
!= asmop
1934 # list of instructions not being supported by binutils (.long)
1935 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
1936 if dotstrp
in [*FPTRANS_INSNS
,
1938 'ffmadds', 'fdmadds', 'ffadds',
1940 'setvl', 'svindex', 'svremap', 'svstep',
1941 'svshape', 'svshape2',
1942 'grev', 'ternlogi', 'bmask', 'cprop',
1943 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
1944 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
1945 "dsld", "dsrd", "maddedus",
1946 "shadd", "shaddw", "shadduw",
1947 "fcvttg", "fcvttgo", "fcvttgs", "fcvttgso",
1949 "fcvtfg", "fcvtfgs",
1951 "maddsubrs", "maddrs"
1956 # branch-conditional redirects to sv.bc
1957 if asmop
.startswith('bc') and self
.is_svp64_mode
:
1958 ins_name
= 'sv.%s' % ins_name
1960 # ld-immediate-with-pi mode redirects to ld-with-postinc
1961 ldst_imm_postinc
= False
1962 if 'u' in ins_name
and self
.is_svp64_mode
:
1963 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
1965 ins_name
= ins_name
.replace("u", "up")
1966 ldst_imm_postinc
= True
1967 log(" enable ld/st postinc", ins_name
)
1969 log(" post-processed name", dotstrp
, ins_name
, asmop
)
1971 # illegal instructions call TRAP at 0x700
1973 print("illegal", ins_name
, asmop
)
1974 self
.call_trap(0x700, PIb
.ILLEG
)
1975 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
1976 (ins_name
, asmop
, self
.pc
.CIA
.value
))
1979 # this is for setvl "Vertical" mode: if set true,
1980 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
1981 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
1982 self
.allow_next_step_inc
= False
1983 self
.svstate_next_mode
= 0
1985 # nop has to be supported, we could let the actual op calculate
1986 # but PowerDecoder has a pattern for nop
1987 if ins_name
== 'nop':
1988 self
.update_pc_next()
1991 # get elwidths, defaults to 64
1995 if self
.is_svp64_mode
:
1996 ew_src
= yield self
.dec2
.rm_dec
.ew_src
1997 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
1998 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
1999 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
2000 xlen
= max(ew_src
, ew_dst
)
2001 log("elwdith", ew_src
, ew_dst
)
2002 log("XLEN:", self
.is_svp64_mode
, xlen
)
2004 # look up instruction in ISA.instrs, prepare namespace
2005 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
2006 info
= self
.instrs
[ins_name
+"."]
2007 elif asmop
[-1] == '.' and asmop
in self
.instrs
:
2008 info
= self
.instrs
[asmop
]
2010 info
= self
.instrs
[ins_name
]
2011 yield from self
.prep_namespace(ins_name
, info
.form
, info
.op_fields
,
2014 # preserve order of register names
2015 input_names
= create_args(list(info
.read_regs
) +
2016 list(info
.uninit_regs
))
2017 log("input names", input_names
)
2019 # get SVP64 entry for the current instruction
2020 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
2021 if sv_rm
is not None:
2022 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
2024 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
2025 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
2027 # see if srcstep/dststep need skipping over masked-out predicate bits
2028 # svstep also needs advancement because it calls SVSTATE_NEXT.
2029 # bit the remaps get computed just after pre_inc moves them on
2030 # with remap_set_steps substituting for PowerDecider2 not doing it,
2031 # and SVSTATE_NEXT not being able to.use yield, the preinc on
2032 # svstep is necessary for now.
2034 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
2035 yield from self
.svstate_pre_inc()
2036 if self
.is_svp64_mode
:
2037 pre
= yield from self
.update_new_svstate_steps()
2039 self
.svp64_reset_loop()
2041 self
.update_pc_next()
2043 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2044 pred_dst_zero
= self
.pred_dst_zero
2045 pred_src_zero
= self
.pred_src_zero
2046 vl
= self
.svstate
.vl
2047 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2049 # VL=0 in SVP64 mode means "do nothing: skip instruction"
2050 if self
.is_svp64_mode
and vl
== 0:
2051 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2052 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
2053 self
.namespace
['NIA'], kind
=LogKind
.InstrInOuts
)
2056 # for when SVREMAP is active, using pre-arranged schedule.
2057 # note: modifying PowerDecoder2 needs to "settle"
2058 remap_en
= self
.svstate
.SVme
2059 persist
= self
.svstate
.RMpst
2060 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
2061 if self
.is_svp64_mode
:
2062 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
2064 if persist
or self
.last_op_svshape
:
2065 remaps
= self
.get_remap_indices()
2066 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
2067 yield from self
.remap_set_steps(remaps
)
2068 # after that, settle down (combinatorial) to let Vector reg numbers
2069 # work themselves out
2071 if self
.is_svp64_mode
:
2072 remap_active
= yield self
.dec2
.remap_active
2074 remap_active
= False
2075 log("remap active", bin(remap_active
))
2077 # main input registers (RT, RA ...)
2079 for name
in input_names
:
2080 regval
= (yield from self
.get_input(name
, ew_src
))
2081 log("regval name", name
, regval
)
2082 inputs
.append(regval
)
2084 # arrrrgh, awful hack, to get _RT into namespace
2085 if ins_name
in ['setvl', 'svstep']:
2087 RT
= yield self
.dec2
.dec
.RT
2088 self
.namespace
[regname
] = SelectableInt(RT
, 5)
2090 self
.namespace
["RT"] = SelectableInt(0, 5)
2091 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
2092 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
2094 # in SVP64 mode for LD/ST work out immediate
2095 # XXX TODO: replace_ds for DS-Form rather than D-Form.
2096 # use info.form to detect
2097 if self
.is_svp64_mode
and not ldst_imm_postinc
:
2098 yield from self
.check_replace_d(info
, remap_active
)
2100 # "special" registers
2101 for special
in info
.special_regs
:
2102 if special
in special_sprs
:
2103 inputs
.append(self
.spr
[special
])
2105 inputs
.append(self
.namespace
[special
])
2107 # clear trap (trap) NIA
2108 self
.trap_nia
= None
2110 # check if this was an sv.bc* and create an indicator that
2111 # this is the last check to be made as a loop. combined with
2112 # the ALL/ANY mode we can early-exit
2113 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
2114 no_in_vec
= yield self
.dec2
.no_in_vec
# BI is scalar
2115 end_loop
= no_in_vec
or srcstep
== vl
-1 or dststep
== vl
-1
2116 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
2118 inp_ca_ov
= (self
.spr
['XER'][XER_bits
['CA']].value
,
2119 self
.spr
['XER'][XER_bits
['OV']].value
)
2121 # execute actual instruction here (finally)
2122 log("inputs", inputs
)
2123 results
= info
.func(self
, *inputs
)
2124 output_names
= create_args(info
.write_regs
)
2126 for out
, n
in zip(results
or [], output_names
):
2128 log("results", outs
)
2130 # "inject" decorator takes namespace from function locals: we need to
2131 # overwrite NIA being overwritten (sigh)
2132 if self
.trap_nia
is not None:
2133 self
.namespace
['NIA'] = self
.trap_nia
2135 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
2137 # check if op was a LD/ST so that debugging can check the
2139 if int_op
in [MicrOp
.OP_STORE
.value
,
2141 self
.last_st_addr
= self
.mem
.last_st_addr
2142 if int_op
in [MicrOp
.OP_LOAD
.value
,
2144 self
.last_ld_addr
= self
.mem
.last_ld_addr
2145 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
2146 self
.last_st_addr
, self
.last_ld_addr
)
2148 # detect if CA/CA32 already in outputs (sra*, basically)
2150 ca32
= outs
.get("CA32")
2152 log("carry already done?", ca
, ca32
, output_names
)
2153 carry_en
= yield self
.dec2
.e
.do
.output_carry
2155 yield from self
.handle_carry_(
2156 inputs
, results
[0], ca
, ca32
, inp_ca_ov
=inp_ca_ov
)
2158 # get outout named "overflow" and "CR0"
2159 overflow
= outs
.get('overflow')
2160 cr0
= outs
.get('CR0')
2162 if not self
.is_svp64_mode
: # yeah just no. not in parallel processing
2163 # detect if overflow was in return result
2164 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2165 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2166 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
2168 yield from self
.handle_overflow(
2169 inputs
, results
[0], overflow
, inp_ca_ov
=inp_ca_ov
)
2171 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
2173 if not self
.is_svp64_mode
or not pred_dst_zero
:
2174 if hasattr(self
.dec2
.e
.do
, "rc"):
2175 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2176 # don't do Rc=1 for svstep it is handled explicitly.
2177 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
2178 # to write directly to CR0 instead of in ISACaller. hooyahh.
2179 if rc_en
and ins_name
not in ['svstep']:
2180 yield from self
.do_rc_ov(ins_name
, results
[0], overflow
, cr0
)
2183 ffirst_hit
= False, False
2184 if self
.is_svp64_mode
:
2185 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2186 is_cr
= sv_mode
== SVMode
.CROP
.value
2187 chk
= rc_en
or is_cr
2188 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2190 # any modified return results?
2191 yield from self
.do_outregs_nia(asmop
, ins_name
, info
, outs
,
2192 carry_en
, rc_en
, ffirst_hit
, ew_dst
)
2194 def check_ffirst(self
, info
, rc_en
, srcstep
):
2195 """fail-first mode: checks a bit of Rc Vector, truncates VL
2197 rm_mode
= yield self
.dec2
.rm_dec
.mode
2198 ff_inv
= yield self
.dec2
.rm_dec
.inv
2199 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2200 RC1
= yield self
.dec2
.rm_dec
.RC1
2201 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2202 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2206 log(" cr_bit", cr_bit
)
2207 log(" rc_en", rc_en
)
2208 if not rc_en
or rm_mode
!= SVP64RMMode
.FFIRST
.value
:
2210 # get the CR vevtor, do BO-test
2212 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2213 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2215 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2216 crtest
= self
.crl
[regnum
]
2217 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2218 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2219 log("cr test?", ffirst_hit
)
2222 # Fail-first activated, truncate VL
2223 vli
= SelectableInt(int(vli_
), 7)
2224 self
.svstate
.vl
= srcstep
+ vli
2225 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2226 yield Settle() # let decoder update
2229 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
):
2230 if ins_name
.startswith("f"):
2231 rc_reg
= "CR1" # not calculated correctly yet (not FP compares)
2234 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2235 # hang on... for `setvl` actually you want to test SVSTATE.VL
2236 is_setvl
= ins_name
in ('svstep', 'setvl')
2238 result
= SelectableInt(result
.vl
, 64)
2240 # overflow = None # do not override overflow except in setvl
2242 # if there was not an explicit CR0 in the pseudocode, do implicit Rc=1
2244 self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2246 # otherwise we just blat CR0 into the required regnum
2247 log("explicit rc0", cr0
)
2248 self
.crl
[regnum
].eq(cr0
)
2250 def do_outregs_nia(self
, asmop
, ins_name
, info
, outs
,
2251 ca_en
, rc_en
, ffirst_hit
, ew_dst
):
2252 ffirst_hit
, vli
= ffirst_hit
2253 # write out any regs for this instruction, but only if fail-first is ok
2254 # XXX TODO: allow CR-vector to be written out even if ffirst fails
2255 if not ffirst_hit
or vli
:
2256 for name
, output
in outs
.items():
2257 yield from self
.check_write(info
, name
, output
, ca_en
, ew_dst
)
2258 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2259 # which write directly to CR in the pseudocode (gah, what a mess)
2260 # if ffirst_hit and not vli:
2261 # self.cr.value = self.cr_backup
2264 self
.svp64_reset_loop()
2267 # check advancement of src/dst/sub-steps and if PC needs updating
2268 nia_update
= (yield from self
.check_step_increment(rc_en
,
2271 self
.update_pc_next()
2273 def check_replace_d(self
, info
, remap_active
):
2274 replace_d
= False # update / replace constant in pseudocode
2275 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2276 vl
= self
.svstate
.vl
2277 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2278 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2279 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2280 if info
.form
== 'DS':
2281 # DS-Form, multiply by 4 then knock 2 bits off after
2282 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2284 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2285 imm
= exts(imm
, 16) # sign-extend to integer
2286 # get the right step. LD is from srcstep, ST is dststep
2287 op
= yield self
.dec2
.e
.do
.insn_type
2289 if op
== MicrOp
.OP_LOAD
.value
:
2291 offsmul
= yield self
.dec2
.in1_step
2292 log("D-field REMAP src", imm
, offsmul
, ldstmode
)
2294 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2295 log("D-field src", imm
, offsmul
, ldstmode
)
2296 elif op
== MicrOp
.OP_STORE
.value
:
2297 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2298 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2299 log("D-field dst", imm
, offsmul
, ldstmode
)
2300 # Unit-Strided LD/ST adds offset*width to immediate
2301 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2302 ldst_len
= yield self
.dec2
.e
.do
.data_len
2303 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2305 # Element-strided multiplies the immediate by element step
2306 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2307 imm
= SelectableInt(imm
* offsmul
, 32)
2310 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2311 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2312 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2313 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2314 # new replacement D... errr.. DS
2316 if info
.form
== 'DS':
2317 # TODO: assert 2 LSBs are zero?
2318 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2319 imm
.value
= imm
.value
>> 2
2320 self
.namespace
['DS'] = imm
2322 self
.namespace
['D'] = imm
2324 def get_input(self
, name
, ew_src
):
2325 # using PowerDecoder2, first, find the decoder index.
2326 # (mapping name RA RB RC RS to in1, in2, in3)
2327 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2329 # doing this is not part of svp64, it's because output
2330 # registers, to be modified, need to be in the namespace.
2331 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2333 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2335 if isinstance(regnum
, tuple):
2336 (regnum
, base
, offs
) = regnum
2338 base
, offs
= regnum
, 0 # temporary HACK
2340 # in case getting the register number is needed, _RA, _RB
2341 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2342 regname
= "_" + name
2343 if not self
.is_svp64_mode
or ew_src
== 64:
2344 self
.namespace
[regname
] = regnum
2345 elif regname
in self
.namespace
:
2346 del self
.namespace
[regname
]
2348 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2349 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2351 reg_val
= SelectableInt(self
.fpr(base
, is_vec
, offs
, ew_src
))
2352 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2353 self
.insnlog
.append("rFPR:%d.%d/%d" % (base
, offs
, ew_src
))
2354 elif name
is not None:
2355 reg_val
= SelectableInt(self
.gpr(base
, is_vec
, offs
, ew_src
))
2356 self
.insnlog
.append("rGPR:%d.%d/%d" % (base
, offs
, ew_src
))
2357 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2359 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2360 reg_val
= SelectableInt(0, ew_src
)
2363 def remap_set_steps(self
, remaps
):
2364 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2365 they work in concert with PowerDecoder2 at the moment,
2366 there is no HDL implementation of REMAP. therefore this
2367 function, because ISACaller still uses PowerDecoder2,
2368 will *explicitly* write the dec2.XX_step values. this has
2371 # just some convenient debug info
2373 sname
= 'SVSHAPE%d' % i
2374 shape
= self
.spr
[sname
]
2375 log(sname
, bin(shape
.value
))
2376 log(" lims", shape
.lims
)
2377 log(" mode", shape
.mode
)
2378 log(" skip", shape
.skip
)
2380 # set up the list of steps to remap
2381 mi0
= self
.svstate
.mi0
2382 mi1
= self
.svstate
.mi1
2383 mi2
= self
.svstate
.mi2
2384 mo0
= self
.svstate
.mo0
2385 mo1
= self
.svstate
.mo1
2386 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2387 [self
.dec2
.in2_step
, mi1
], # RB
2388 [self
.dec2
.in3_step
, mi2
], # RC
2389 [self
.dec2
.o_step
, mo0
], # RT
2390 [self
.dec2
.o2_step
, mo1
], # EA
2393 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2394 for i
, reg
in enumerate(rnames
):
2395 idx
= yield from get_idx_map(self
.dec2
, reg
)
2397 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2399 steps
[i
][0] = self
.dec2
.in1_step
2401 steps
[i
][0] = self
.dec2
.in2_step
2403 steps
[i
][0] = self
.dec2
.in3_step
2404 log("remap step", i
, reg
, idx
, steps
[i
][1])
2405 remap_idxs
= self
.remap_idxs
2407 # now cross-index the required SHAPE for each of 3-in 2-out regs
2408 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2409 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2410 (shape
, remap
) = remaps
[shape_idx
]
2411 remap_idx
= remap_idxs
[shape_idx
]
2412 # zero is "disabled"
2413 if shape
.value
== 0x0:
2415 # now set the actual requested step to the current index
2416 if dstep
is not None:
2417 yield dstep
.eq(remap_idx
)
2419 # debug printout info
2420 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2421 i
, rnames
[i
], shape_idx
, remap_idx
))
2423 log("shape remap", x
)
2425 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2426 if name
== 'overflow': # ignore, done already (above)
2428 if name
== 'CR0': # ignore, done already (above)
2430 if isinstance(output
, int):
2431 output
= SelectableInt(output
, EFFECTIVELY_UNLIMITED
)
2433 if name
in ['CA', 'CA32']:
2435 log("writing %s to XER" % name
, output
)
2436 log("write XER %s 0x%x" % (name
, output
.value
))
2437 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2439 log("NOT writing %s to XER" % name
, output
)
2441 # write special SPRs
2442 if name
in info
.special_regs
:
2443 log('writing special %s' % name
, output
, special_sprs
)
2444 log("write reg %s 0x%x" % (name
, output
.value
))
2445 if name
in special_sprs
:
2446 self
.spr
[name
] = output
2448 self
.namespace
[name
].eq(output
)
2450 log('msr written', hex(self
.msr
.value
))
2452 # find out1/out2 PR/FPR
2453 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2455 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2457 # temporary hack for not having 2nd output
2458 regnum
= yield getattr(self
.decoder
, name
)
2460 # convenient debug prefix
2465 # check zeroing due to predicate bit being zero
2466 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2467 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2468 output
= SelectableInt(0, EFFECTIVELY_UNLIMITED
)
2469 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2470 output
.value
, ew_dst
),
2471 kind
=LogKind
.InstrInOuts
)
2472 # zero-extend tov64 bit begore storing (should use EXT oh well)
2473 if output
.bits
> 64:
2474 output
= SelectableInt(output
.value
, 64)
2475 rnum
, base
, offset
= regnum
2477 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2478 self
.insnlog
.append("wFPR:%d.%d/%d" % (rnum
, offset
, ew_dst
))
2480 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2481 self
.insnlog
.append("wGPR:%d.%d/%d" % (rnum
, offset
, ew_dst
))
2483 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2484 # check if it is the SVSTATE.src/dest step that needs incrementing
2485 # this is our Sub-Program-Counter loop from 0 to VL-1
2486 if not self
.allow_next_step_inc
:
2487 if self
.is_svp64_mode
:
2488 return (yield from self
.svstate_post_inc(ins_name
))
2490 # XXX only in non-SVP64 mode!
2491 # record state of whether the current operation was an svshape,
2493 # to be able to know if it should apply in the next instruction.
2494 # also (if going to use this instruction) should disable ability
2495 # to interrupt in between. sigh.
2496 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2503 log("SVSTATE_NEXT: inc requested, mode",
2504 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2505 yield from self
.svstate_pre_inc()
2506 pre
= yield from self
.update_new_svstate_steps()
2508 # reset at end of loop including exit Vertical Mode
2509 log("SVSTATE_NEXT: end of loop, reset")
2510 self
.svp64_reset_loop()
2511 self
.svstate
.vfirst
= 0
2515 self
.handle_comparison(SelectableInt(0, 64)) # CR0
2517 if self
.allow_next_step_inc
== 2:
2518 log("SVSTATE_NEXT: read")
2519 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
2521 log("SVSTATE_NEXT: post-inc")
2522 # use actual (cached) src/dst-step here to check end
2523 remaps
= self
.get_remap_indices()
2524 remap_idxs
= self
.remap_idxs
2525 vl
= self
.svstate
.vl
2526 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2527 if self
.allow_next_step_inc
!= 2:
2528 yield from self
.advance_svstate_steps()
2529 #self.namespace['SVSTATE'] = self.svstate.spr
2530 # set CR0 (if Rc=1) based on end
2531 endtest
= 1 if self
.at_loopend() else 0
2533 #results = [SelectableInt(endtest, 64)]
2534 # self.handle_comparison(results) # CR0
2536 # see if svstep was requested, if so, which SVSTATE
2538 if self
.svstate_next_mode
> 0:
2539 shape_idx
= self
.svstate_next_mode
.value
-1
2540 endings
= self
.remap_loopends
[shape_idx
]
2541 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
2542 log("svstep Rc=1, CR0", cr_field
, endtest
)
2543 self
.crl
[0].eq(cr_field
) # CR0
2545 # reset at end of loop including exit Vertical Mode
2546 log("SVSTATE_NEXT: after increments, reset")
2547 self
.svp64_reset_loop()
2548 self
.svstate
.vfirst
= 0
2551 def SVSTATE_NEXT(self
, mode
, submode
):
2552 """explicitly moves srcstep/dststep on to next element, for
2553 "Vertical-First" mode. this function is called from
2554 setvl pseudo-code, as a pseudo-op "svstep"
2556 WARNING: this function uses information that was created EARLIER
2557 due to it being in the middle of a yield, but this function is
2558 *NOT* called from yield (it's called from compiled pseudocode).
2560 self
.allow_next_step_inc
= submode
.value
+ 1
2561 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
2562 self
.svstate_next_mode
= mode
2563 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
2564 shape_idx
= self
.svstate_next_mode
.value
-1
2565 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
2566 if self
.svstate_next_mode
== 5:
2567 self
.svstate_next_mode
= 0
2568 return SelectableInt(self
.svstate
.srcstep
, 7)
2569 if self
.svstate_next_mode
== 6:
2570 self
.svstate_next_mode
= 0
2571 return SelectableInt(self
.svstate
.dststep
, 7)
2572 if self
.svstate_next_mode
== 7:
2573 self
.svstate_next_mode
= 0
2574 return SelectableInt(self
.svstate
.ssubstep
, 7)
2575 if self
.svstate_next_mode
== 8:
2576 self
.svstate_next_mode
= 0
2577 return SelectableInt(self
.svstate
.dsubstep
, 7)
2578 return SelectableInt(0, 7)
2580 def get_src_dststeps(self
):
2581 """gets srcstep, dststep, and ssubstep, dsubstep
2583 return (self
.new_srcstep
, self
.new_dststep
,
2584 self
.new_ssubstep
, self
.new_dsubstep
)
2586 def update_svstate_namespace(self
, overwrite_svstate
=True):
2587 if overwrite_svstate
:
2588 # note, do not get the bit-reversed srcstep here!
2589 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2590 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2592 # update SVSTATE with new srcstep
2593 self
.svstate
.srcstep
= srcstep
2594 self
.svstate
.dststep
= dststep
2595 self
.svstate
.ssubstep
= ssubstep
2596 self
.svstate
.dsubstep
= dsubstep
2597 self
.namespace
['SVSTATE'] = self
.svstate
2598 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2599 yield Settle() # let decoder update
2601 def update_new_svstate_steps(self
, overwrite_svstate
=True):
2602 yield from self
.update_svstate_namespace(overwrite_svstate
)
2603 srcstep
= self
.svstate
.srcstep
2604 dststep
= self
.svstate
.dststep
2605 ssubstep
= self
.svstate
.ssubstep
2606 dsubstep
= self
.svstate
.dsubstep
2607 pack
= self
.svstate
.pack
2608 unpack
= self
.svstate
.unpack
2609 vl
= self
.svstate
.vl
2610 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2611 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2612 rm_mode
= yield self
.dec2
.rm_dec
.mode
2613 ff_inv
= yield self
.dec2
.rm_dec
.inv
2614 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2615 log(" srcstep", srcstep
)
2616 log(" dststep", dststep
)
2618 log(" unpack", unpack
)
2619 log(" ssubstep", ssubstep
)
2620 log(" dsubstep", dsubstep
)
2622 log(" subvl", subvl
)
2623 log(" rm_mode", rm_mode
)
2624 log(" sv_mode", sv_mode
)
2626 log(" cr_bit", cr_bit
)
2628 # check if end reached (we let srcstep overrun, above)
2629 # nothing needs doing (TODO zeroing): just do next instruction
2632 return ((ssubstep
== subvl
and srcstep
== vl
) or
2633 (dsubstep
== subvl
and dststep
== vl
))
2635 def svstate_post_inc(self
, insn_name
, vf
=0):
2636 # check if SV "Vertical First" mode is enabled
2637 vfirst
= self
.svstate
.vfirst
2638 log(" SV Vertical First", vf
, vfirst
)
2639 if not vf
and vfirst
== 1:
2643 # check if it is the SVSTATE.src/dest step that needs incrementing
2644 # this is our Sub-Program-Counter loop from 0 to VL-1
2645 # XXX twin predication TODO
2646 vl
= self
.svstate
.vl
2647 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2648 mvl
= self
.svstate
.maxvl
2649 srcstep
= self
.svstate
.srcstep
2650 dststep
= self
.svstate
.dststep
2651 ssubstep
= self
.svstate
.ssubstep
2652 dsubstep
= self
.svstate
.dsubstep
2653 pack
= self
.svstate
.pack
2654 unpack
= self
.svstate
.unpack
2655 rm_mode
= yield self
.dec2
.rm_dec
.mode
2656 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
2657 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
2658 out_vec
= not (yield self
.dec2
.no_out_vec
)
2659 in_vec
= not (yield self
.dec2
.no_in_vec
)
2660 log(" svstate.vl", vl
)
2661 log(" svstate.mvl", mvl
)
2662 log(" rm.subvl", subvl
)
2663 log(" svstate.srcstep", srcstep
)
2664 log(" svstate.dststep", dststep
)
2665 log(" svstate.ssubstep", ssubstep
)
2666 log(" svstate.dsubstep", dsubstep
)
2667 log(" svstate.pack", pack
)
2668 log(" svstate.unpack", unpack
)
2669 log(" mode", rm_mode
)
2670 log(" reverse", reverse_gear
)
2671 log(" out_vec", out_vec
)
2672 log(" in_vec", in_vec
)
2673 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPType
.P2
.value
)
2674 # check if this was an sv.bc* and if so did it succeed
2675 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
2676 end_loop
= self
.namespace
['end_loop']
2677 log("branch %s end_loop" % insn_name
, end_loop
)
2679 self
.svp64_reset_loop()
2680 self
.update_pc_next()
2682 # check if srcstep needs incrementing by one, stop PC advancing
2683 # but for 2-pred both src/dest have to be checked.
2684 # XXX this might not be true! it may just be LD/ST
2685 if sv_ptype
== SVPType
.P2
.value
:
2686 svp64_is_vector
= (out_vec
or in_vec
)
2688 svp64_is_vector
= out_vec
2689 # loops end at the first "hit" (source or dest)
2690 yield from self
.advance_svstate_steps()
2691 loopend
= self
.loopend
2692 log("loopend", svp64_is_vector
, loopend
)
2693 if not svp64_is_vector
or loopend
:
2694 # reset loop to zero and update NIA
2695 self
.svp64_reset_loop()
2700 # still looping, advance and update NIA
2701 self
.namespace
['SVSTATE'] = self
.svstate
2703 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
2704 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
2705 # this way we keep repeating the same instruction (with new steps)
2706 self
.pc
.NIA
.value
= self
.pc
.CIA
.value
2707 self
.namespace
['NIA'] = self
.pc
.NIA
2708 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
2709 return False # DO NOT allow PC update whilst Sub-PC loop running
2711 def update_pc_next(self
):
2712 # UPDATE program counter
2713 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2714 #self.svstate.spr = self.namespace['SVSTATE']
2715 log("end of call", self
.namespace
['CIA'],
2716 self
.namespace
['NIA'],
2717 self
.namespace
['SVSTATE'])
2719 def svp64_reset_loop(self
):
2720 self
.svstate
.srcstep
= 0
2721 self
.svstate
.dststep
= 0
2722 self
.svstate
.ssubstep
= 0
2723 self
.svstate
.dsubstep
= 0
2724 self
.loopend
= False
2725 log(" svstate.srcstep loop end (PC to update)")
2726 self
.namespace
['SVSTATE'] = self
.svstate
2728 def update_nia(self
):
2729 self
.pc
.update_nia(self
.is_svp64_mode
)
2730 self
.namespace
['NIA'] = self
.pc
.NIA
2734 """Decorator factory.
2736 this decorator will "inject" variables into the function's namespace,
2737 from the *dictionary* in self.namespace. it therefore becomes possible
2738 to make it look like a whole stack of variables which would otherwise
2739 need "self." inserted in front of them (*and* for those variables to be
2740 added to the instance) "appear" in the function.
2742 "self.namespace['SI']" for example becomes accessible as just "SI" but
2743 *only* inside the function, when decorated.
2745 def variable_injector(func
):
2747 def decorator(*args
, **kwargs
):
2749 func_globals
= func
.__globals
__ # Python 2.6+
2750 except AttributeError:
2751 func_globals
= func
.func_globals
# Earlier versions.
2753 context
= args
[0].namespace
# variables to be injected
2754 saved_values
= func_globals
.copy() # Shallow copy of dict.
2755 log("globals before", context
.keys())
2756 func_globals
.update(context
)
2757 result
= func(*args
, **kwargs
)
2758 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
2759 log("args[0]", args
[0].namespace
['CIA'],
2760 args
[0].namespace
['NIA'],
2761 args
[0].namespace
['SVSTATE'])
2762 if 'end_loop' in func_globals
:
2763 log("args[0] end_loop", func_globals
['end_loop'])
2764 args
[0].namespace
= func_globals
2765 #exec (func.__code__, func_globals)
2768 # func_globals = saved_values # Undo changes.
2774 return variable_injector