1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
20 from nmigen
.sim
import Settle
21 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
22 SVP64CROffs
, SVP64MODEb
)
23 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
25 from openpower
.decoder
.isa
.mem
import Mem
, MemException
26 from openpower
.decoder
.isa
.radixmmu
import RADIX
27 from openpower
.decoder
.isa
.svshape
import SVSHAPE
28 from openpower
.decoder
.isa
.svstate
import SVP64State
29 from openpower
.decoder
.orderedset
import OrderedSet
30 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
31 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
32 MicrOp
, OutSel
, SVMode
,
33 SVP64LDSTmode
, SVP64PredCR
,
34 SVP64PredInt
, SVP64PredMode
,
35 SVP64RMMode
, SVPType
, XER_bits
,
36 insns
, spr_byname
, spr_dict
)
37 from openpower
.decoder
.power_insn
import SVP64Instruction
38 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
39 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
40 SelectableInt
, selectconcat
)
41 from openpower
.util
import LogKind
, log
43 instruction_info
= namedtuple('instruction_info',
44 'func read_regs uninit_regs write_regs ' +
45 'special_regs op_fields form asmregs')
55 # rrright. this is here basically because the compiler pywriter returns
56 # results in a specific priority order. to make sure regs match up they
57 # need partial sorting. sigh.
59 # TODO (lkcl): adjust other registers that should be in a particular order
60 # probably CA, CA32, and CR
86 "overflow": 7, # should definitely be last
90 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
93 def get_masked_reg(regs
, base
, offs
, ew_bits
):
94 # rrrright. start by breaking down into row/col, based on elwidth
95 gpr_offs
= offs
// (64 // ew_bits
)
96 gpr_col
= offs
% (64 // ew_bits
)
97 # compute the mask based on ew_bits
98 mask
= (1 << ew_bits
) - 1
99 # now select the 64-bit register, but get its value (easier)
100 val
= regs
[base
+ gpr_offs
]
101 # shift down so element we want is at LSB
102 val
>>= gpr_col
* ew_bits
103 # mask so we only return the LSB element
107 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
108 # rrrright. start by breaking down into row/col, based on elwidth
109 gpr_offs
= offs
// (64//ew_bits
)
110 gpr_col
= offs
% (64//ew_bits
)
111 # compute the mask based on ew_bits
112 mask
= (1 << ew_bits
)-1
113 # now select the 64-bit register, but get its value (easier)
114 val
= regs
[base
+gpr_offs
]
115 # now mask out the bit we don't want
116 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
117 # then wipe the bit we don't want from the value
119 # OR the new value in, shifted up
120 val |
= value
<< (gpr_col
*ew_bits
)
121 regs
[base
+gpr_offs
] = val
124 def create_args(reglist
, extra
=None):
125 retval
= list(OrderedSet(reglist
))
126 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
127 if extra
is not None:
128 return [extra
] + retval
133 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
136 self
.isacaller
= isacaller
137 self
.svstate
= svstate
138 for i
in range(len(regfile
)):
139 self
[i
] = SelectableInt(regfile
[i
], 64)
141 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
142 if isinstance(ridx
, SelectableInt
):
145 return self
[ridx
+offs
]
146 # rrrright. start by breaking down into row/col, based on elwidth
147 gpr_offs
= offs
// (64//elwidth
)
148 gpr_col
= offs
% (64//elwidth
)
149 # now select the 64-bit register, but get its value (easier)
150 val
= self
[ridx
+gpr_offs
].value
151 # now shift down and mask out
152 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
153 # finally, return a SelectableInt at the required elwidth
154 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
155 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
156 return SelectableInt(val
, elwidth
)
158 def set_form(self
, form
):
161 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
163 if isinstance(rnum
, SelectableInt
):
165 if isinstance(value
, SelectableInt
):
168 if isinstance(rnum
, tuple):
169 rnum
, base
, offs
= rnum
172 # rrrright. start by breaking down into row/col, based on elwidth
173 gpr_offs
= offs
// (64//elwidth
)
174 gpr_col
= offs
% (64//elwidth
)
175 # compute the mask based on elwidth
176 mask
= (1 << elwidth
)-1
177 # now select the 64-bit register, but get its value (easier)
178 val
= self
[base
+gpr_offs
].value
179 # now mask out the bit we don't want
180 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
181 # then wipe the bit we don't want from the value
183 # OR the new value in, shifted up
184 val |
= value
<< (gpr_col
*elwidth
)
185 # finally put the damn value into the regfile
186 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
187 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
189 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
191 def __setitem__(self
, rnum
, value
):
192 # rnum = rnum.value # only SelectableInt allowed
193 log("GPR setitem", rnum
, value
)
194 if isinstance(rnum
, SelectableInt
):
196 dict.__setitem
__(self
, rnum
, value
)
198 def getz(self
, rnum
):
199 # rnum = rnum.value # only SelectableInt allowed
200 log("GPR getzero?", rnum
)
202 return SelectableInt(0, 64)
205 def _get_regnum(self
, attr
):
206 getform
= self
.sd
.sigforms
[self
.form
]
207 rnum
= getattr(getform
, attr
)
210 def ___getitem__(self
, attr
):
211 """ XXX currently not used
213 rnum
= self
._get
_regnum
(attr
)
214 log("GPR getitem", attr
, rnum
)
215 return self
.regfile
[rnum
]
217 def dump(self
, printout
=True):
219 for i
in range(len(self
)):
220 res
.append(self
[i
].value
)
222 for i
in range(0, len(res
), 8):
225 s
.append("%08x" % res
[i
+j
])
227 print("reg", "%2d" % i
, s
)
232 def __init__(self
, dec2
, initial_sprs
={}):
235 for key
, v
in initial_sprs
.items():
236 if isinstance(key
, SelectableInt
):
238 key
= special_sprs
.get(key
, key
)
239 if isinstance(key
, int):
242 info
= spr_byname
[key
]
243 if not isinstance(v
, SelectableInt
):
244 v
= SelectableInt(v
, info
.length
)
247 def __getitem__(self
, key
):
249 log("dict", self
.items())
250 # if key in special_sprs get the special spr, otherwise return key
251 if isinstance(key
, SelectableInt
):
253 if isinstance(key
, int):
254 key
= spr_dict
[key
].SPR
255 key
= special_sprs
.get(key
, key
)
256 if key
== 'HSRR0': # HACK!
258 if key
== 'HSRR1': # HACK!
261 res
= dict.__getitem
__(self
, key
)
263 if isinstance(key
, int):
266 info
= spr_byname
[key
]
267 dict.__setitem
__(self
, key
, SelectableInt(0, info
.length
))
268 res
= dict.__getitem
__(self
, key
)
269 log("spr returning", key
, res
)
272 def __setitem__(self
, key
, value
):
273 if isinstance(key
, SelectableInt
):
275 if isinstance(key
, int):
276 key
= spr_dict
[key
].SPR
278 key
= special_sprs
.get(key
, key
)
279 if key
== 'HSRR0': # HACK!
280 self
.__setitem
__('SRR0', value
)
281 if key
== 'HSRR1': # HACK!
282 self
.__setitem
__('SRR1', value
)
283 log("setting spr", key
, value
)
284 dict.__setitem
__(self
, key
, value
)
286 def __call__(self
, ridx
):
289 def dump(self
, printout
=True):
291 keys
= list(self
.keys())
294 sprname
= spr_dict
.get(k
, None)
298 sprname
= sprname
.SPR
299 res
.append((sprname
, self
[k
].value
))
301 for sprname
, value
in res
:
302 print(" ", sprname
, hex(value
))
307 def __init__(self
, pc_init
=0):
308 self
.CIA
= SelectableInt(pc_init
, 64)
309 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
311 def update_nia(self
, is_svp64
):
312 increment
= 8 if is_svp64
else 4
313 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
315 def update(self
, namespace
, is_svp64
):
316 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
318 self
.CIA
= namespace
['NIA'].narrow(64)
319 self
.update_nia(is_svp64
)
320 namespace
['CIA'] = self
.CIA
321 namespace
['NIA'] = self
.NIA
325 # See PowerISA Version 3.0 B Book 1
326 # Section 2.3.1 Condition Register pages 30 - 31
328 LT
= FL
= 0 # negative, less than, floating-point less than
329 GT
= FG
= 1 # positive, greater than, floating-point greater than
330 EQ
= FE
= 2 # equal, floating-point equal
331 SO
= FU
= 3 # summary overflow, floating-point unordered
333 def __init__(self
, init
=0):
334 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
335 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
336 self
.cr
= SelectableInt(init
, 64) # underlying reg
337 # field-selectable versions of Condition Register TODO check bitranges?
340 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
341 _cr
= FieldSelectableInt(self
.cr
, bits
)
345 # decode SVP64 predicate integer to reg number and invert
346 def get_predint(gpr
, mask
):
350 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
351 if mask
== SVP64PredInt
.ALWAYS
.value
:
352 return 0xffff_ffff_ffff_ffff # 64 bits of 1
353 if mask
== SVP64PredInt
.R3_UNARY
.value
:
354 return 1 << (r3
.value
& 0b111111)
355 if mask
== SVP64PredInt
.R3
.value
:
357 if mask
== SVP64PredInt
.R3_N
.value
:
359 if mask
== SVP64PredInt
.R10
.value
:
361 if mask
== SVP64PredInt
.R10_N
.value
:
363 if mask
== SVP64PredInt
.R30
.value
:
365 if mask
== SVP64PredInt
.R30_N
.value
:
369 # decode SVP64 predicate CR to reg number and invert status
370 def _get_predcr(mask
):
371 if mask
== SVP64PredCR
.LT
.value
:
373 if mask
== SVP64PredCR
.GE
.value
:
375 if mask
== SVP64PredCR
.GT
.value
:
377 if mask
== SVP64PredCR
.LE
.value
:
379 if mask
== SVP64PredCR
.EQ
.value
:
381 if mask
== SVP64PredCR
.NE
.value
:
383 if mask
== SVP64PredCR
.SO
.value
:
385 if mask
== SVP64PredCR
.NS
.value
:
389 # read individual CR fields (0..VL-1), extract the required bit
390 # and construct the mask
391 def get_predcr(crl
, mask
, vl
):
392 idx
, noninv
= _get_predcr(mask
)
395 cr
= crl
[i
+SVP64CROffs
.CRPred
]
396 if cr
[idx
].value
== noninv
:
401 # TODO, really should just be using PowerDecoder2
402 def get_idx_map(dec2
, name
):
404 in1_sel
= yield op
.in1_sel
405 in2_sel
= yield op
.in2_sel
406 in3_sel
= yield op
.in3_sel
407 in1
= yield dec2
.e
.read_reg1
.data
408 # identify which regnames map to in1/2/3
409 if name
== 'RA' or name
== 'RA_OR_ZERO':
410 if (in1_sel
== In1Sel
.RA
.value
or
411 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
413 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
416 if in2_sel
== In2Sel
.RB
.value
:
418 if in3_sel
== In3Sel
.RB
.value
:
420 # XXX TODO, RC doesn't exist yet!
422 if in3_sel
== In3Sel
.RC
.value
:
424 elif name
in ['EA', 'RS']:
425 if in1_sel
== In1Sel
.RS
.value
:
427 if in2_sel
== In2Sel
.RS
.value
:
429 if in3_sel
== In3Sel
.RS
.value
:
432 if in1_sel
== In1Sel
.FRA
.value
:
435 if in2_sel
== In2Sel
.FRB
.value
:
438 if in3_sel
== In3Sel
.FRC
.value
:
441 if in1_sel
== In1Sel
.FRS
.value
:
443 if in3_sel
== In3Sel
.FRS
.value
:
448 # TODO, really should just be using PowerDecoder2
449 def get_idx_in(dec2
, name
, ewmode
=False):
450 idx
= yield from get_idx_map(dec2
, name
)
454 in1_sel
= yield op
.in1_sel
455 in2_sel
= yield op
.in2_sel
456 in3_sel
= yield op
.in3_sel
457 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
458 in1
= yield dec2
.e
.read_reg1
.data
459 in2
= yield dec2
.e
.read_reg2
.data
460 in3
= yield dec2
.e
.read_reg3
.data
462 in1_base
= yield dec2
.e
.read_reg1
.base
463 in2_base
= yield dec2
.e
.read_reg2
.base
464 in3_base
= yield dec2
.e
.read_reg3
.base
465 in1_offs
= yield dec2
.e
.read_reg1
.offs
466 in2_offs
= yield dec2
.e
.read_reg2
.offs
467 in3_offs
= yield dec2
.e
.read_reg3
.offs
468 in1
= (in1
, in1_base
, in1_offs
)
469 in2
= (in2
, in2_base
, in2_offs
)
470 in3
= (in3
, in3_base
, in3_offs
)
472 in1_isvec
= yield dec2
.in1_isvec
473 in2_isvec
= yield dec2
.in2_isvec
474 in3_isvec
= yield dec2
.in3_isvec
475 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
477 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
479 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
481 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
483 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
485 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
488 return in1
, in1_isvec
490 return in2
, in2_isvec
492 return in3
, in3_isvec
496 # TODO, really should just be using PowerDecoder2
497 def get_cr_in(dec2
, name
):
499 in_sel
= yield op
.cr_in
500 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
501 sv_cr_in
= yield op
.sv_cr_in
502 spec
= yield dec2
.crin_svdec
.spec
503 sv_override
= yield dec2
.dec_cr_in
.sv_override
504 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
505 in1
= yield dec2
.e
.read_cr1
.data
506 cr_isvec
= yield dec2
.cr_in_isvec
507 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
508 log(" sv_cr_in", sv_cr_in
)
509 log(" cr_bf", in_bitfield
)
511 log(" override", sv_override
)
512 # identify which regnames map to in / o2
514 if in_sel
== CRInSel
.BI
.value
:
516 log("get_cr_in not found", name
)
520 # TODO, really should just be using PowerDecoder2
521 def get_cr_out(dec2
, name
):
523 out_sel
= yield op
.cr_out
524 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
525 sv_cr_out
= yield op
.sv_cr_out
526 spec
= yield dec2
.crout_svdec
.spec
527 sv_override
= yield dec2
.dec_cr_out
.sv_override
528 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
529 out
= yield dec2
.e
.write_cr
.data
530 o_isvec
= yield dec2
.cr_out_isvec
531 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
532 log(" sv_cr_out", sv_cr_out
)
533 log(" cr_bf", out_bitfield
)
535 log(" override", sv_override
)
536 # identify which regnames map to out / o2
538 if out_sel
== CROutSel
.BF
.value
:
541 if out_sel
== CROutSel
.CR0
.value
:
543 if name
== 'CR1': # these are not actually calculated correctly
544 if out_sel
== CROutSel
.CR1
.value
:
546 # check RC1 set? if so return implicit vector, this is a REAL bad hack
547 RC1
= yield dec2
.rm_dec
.RC1
549 log("get_cr_out RC1 mode")
551 return 0, True # XXX TODO: offset CR0 from SVSTATE SPR
553 return 1, True # XXX TODO: offset CR1 from SVSTATE SPR
555 log("get_cr_out not found", name
)
559 # TODO, really should just be using PowerDecoder2
560 def get_out_map(dec2
, name
):
562 out_sel
= yield op
.out_sel
563 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
564 out
= yield dec2
.e
.write_reg
.data
565 # identify which regnames map to out / o2
567 if out_sel
== OutSel
.RA
.value
:
570 if out_sel
== OutSel
.RT
.value
:
572 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
574 elif name
== 'RT_OR_ZERO':
575 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
578 if out_sel
== OutSel
.FRA
.value
:
581 if out_sel
== OutSel
.FRT
.value
:
586 # TODO, really should just be using PowerDecoder2
587 def get_idx_out(dec2
, name
, ewmode
=False):
589 out_sel
= yield op
.out_sel
590 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
591 out
= yield dec2
.e
.write_reg
.data
592 o_isvec
= yield dec2
.o_isvec
594 offs
= yield dec2
.e
.write_reg
.offs
595 base
= yield dec2
.e
.write_reg
.base
596 out
= (out
, base
, offs
)
597 # identify which regnames map to out / o2
598 ismap
= yield from get_out_map(dec2
, name
)
600 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
602 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
606 # TODO, really should just be using PowerDecoder2
607 def get_out2_map(dec2
, name
):
608 # check first if register is activated for write
610 out_sel
= yield op
.out_sel
611 out
= yield dec2
.e
.write_ea
.data
612 out_ok
= yield dec2
.e
.write_ea
.ok
616 if name
in ['EA', 'RA']:
617 if hasattr(op
, "upd"):
618 # update mode LD/ST uses read-reg A also as an output
620 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
621 out_sel
, OutSel
.RA
.value
,
623 if upd
== LDSTMode
.update
.value
:
626 fft_en
= yield dec2
.implicit_rs
628 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
632 fft_en
= yield dec2
.implicit_rs
634 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
640 # TODO, really should just be using PowerDecoder2
641 def get_idx_out2(dec2
, name
, ewmode
=False):
642 # check first if register is activated for write
644 out_sel
= yield op
.out_sel
645 out
= yield dec2
.e
.write_ea
.data
647 offs
= yield dec2
.e
.write_ea
.offs
648 base
= yield dec2
.e
.write_ea
.base
649 out
= (out
, base
, offs
)
650 o_isvec
= yield dec2
.o2_isvec
651 ismap
= yield from get_out2_map(dec2
, name
)
653 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
659 """deals with svstate looping.
662 def __init__(self
, svstate
):
663 self
.svstate
= svstate
666 def new_iterators(self
):
667 self
.src_it
= self
.src_iterator()
668 self
.dst_it
= self
.dst_iterator()
672 self
.new_ssubstep
= 0
673 self
.new_dsubstep
= 0
674 self
.pred_dst_zero
= 0
675 self
.pred_src_zero
= 0
677 def src_iterator(self
):
678 """source-stepping iterator
680 pack
= self
.svstate
.pack
684 # pack advances subvl in *outer* loop
685 while True: # outer subvl loop
686 while True: # inner vl loop
689 srcmask
= self
.srcmask
690 srcstep
= self
.svstate
.srcstep
691 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
692 if self
.pred_sz
or pred_src_zero
:
693 self
.pred_src_zero
= not pred_src_zero
694 log(" advance src", srcstep
, vl
,
695 self
.svstate
.ssubstep
, subvl
)
696 # yield actual substep/srcstep
697 yield (self
.svstate
.ssubstep
, srcstep
)
698 # the way yield works these could have been modified.
701 srcstep
= self
.svstate
.srcstep
702 log(" advance src check", srcstep
, vl
,
703 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
704 self
.svstate
.ssubstep
== subvl
)
705 if srcstep
== vl
-1: # end-point
706 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
707 if self
.svstate
.ssubstep
== subvl
: # end-point
708 log(" advance pack stop")
710 break # exit inner loop
711 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
713 if self
.svstate
.ssubstep
== subvl
: # end-point
714 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
715 log(" advance pack stop")
717 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
720 # these cannot be done as for-loops because SVSTATE may change
721 # (srcstep/substep may be modified, interrupted, subvl/vl change)
722 # but they *can* be done as while-loops as long as every SVSTATE
723 # "thing" is re-read every single time a yield gives indices
724 while True: # outer vl loop
725 while True: # inner subvl loop
728 srcmask
= self
.srcmask
729 srcstep
= self
.svstate
.srcstep
730 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
731 if self
.pred_sz
or pred_src_zero
:
732 self
.pred_src_zero
= not pred_src_zero
733 log(" advance src", srcstep
, vl
,
734 self
.svstate
.ssubstep
, subvl
)
735 # yield actual substep/srcstep
736 yield (self
.svstate
.ssubstep
, srcstep
)
737 if self
.svstate
.ssubstep
== subvl
: # end-point
738 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
739 break # exit inner loop
740 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
742 if srcstep
== vl
-1: # end-point
743 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
746 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
748 def dst_iterator(self
):
749 """dest-stepping iterator
751 unpack
= self
.svstate
.unpack
755 # pack advances subvl in *outer* loop
756 while True: # outer subvl loop
757 while True: # inner vl loop
760 dstmask
= self
.dstmask
761 dststep
= self
.svstate
.dststep
762 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
763 if self
.pred_dz
or pred_dst_zero
:
764 self
.pred_dst_zero
= not pred_dst_zero
765 log(" advance dst", dststep
, vl
,
766 self
.svstate
.dsubstep
, subvl
)
767 # yield actual substep/dststep
768 yield (self
.svstate
.dsubstep
, dststep
)
769 # the way yield works these could have been modified.
771 dststep
= self
.svstate
.dststep
772 log(" advance dst check", dststep
, vl
,
773 self
.svstate
.ssubstep
, subvl
)
774 if dststep
== vl
-1: # end-point
775 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
776 if self
.svstate
.dsubstep
== subvl
: # end-point
777 log(" advance unpack stop")
780 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
782 if self
.svstate
.dsubstep
== subvl
: # end-point
783 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
784 log(" advance unpack stop")
786 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
788 # these cannot be done as for-loops because SVSTATE may change
789 # (dststep/substep may be modified, interrupted, subvl/vl change)
790 # but they *can* be done as while-loops as long as every SVSTATE
791 # "thing" is re-read every single time a yield gives indices
792 while True: # outer vl loop
793 while True: # inner subvl loop
795 dstmask
= self
.dstmask
796 dststep
= self
.svstate
.dststep
797 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
798 if self
.pred_dz
or pred_dst_zero
:
799 self
.pred_dst_zero
= not pred_dst_zero
800 log(" advance dst", dststep
, self
.svstate
.vl
,
801 self
.svstate
.dsubstep
, subvl
)
802 # yield actual substep/dststep
803 yield (self
.svstate
.dsubstep
, dststep
)
804 if self
.svstate
.dsubstep
== subvl
: # end-point
805 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
807 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
810 if dststep
== vl
-1: # end-point
811 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
813 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
815 def src_iterate(self
):
816 """source-stepping iterator
820 pack
= self
.svstate
.pack
821 unpack
= self
.svstate
.unpack
822 ssubstep
= self
.svstate
.ssubstep
823 end_ssub
= ssubstep
== subvl
824 end_src
= self
.svstate
.srcstep
== vl
-1
825 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
829 srcstep
= self
.svstate
.srcstep
830 srcmask
= self
.srcmask
832 # pack advances subvl in *outer* loop
834 assert srcstep
<= vl
-1
835 end_src
= srcstep
== vl
-1
840 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
844 srcstep
+= 1 # advance srcstep
845 if not self
.srcstep_skip
:
847 if ((1 << srcstep
) & srcmask
) != 0:
850 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
852 # advance subvl in *inner* loop
855 assert srcstep
<= vl
-1
856 end_src
= srcstep
== vl
-1
857 if end_src
: # end-point
863 if not self
.srcstep_skip
:
865 if ((1 << srcstep
) & srcmask
) != 0:
868 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
869 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
872 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
874 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
875 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
878 def dst_iterate(self
):
879 """dest step iterator
883 pack
= self
.svstate
.pack
884 unpack
= self
.svstate
.unpack
885 dsubstep
= self
.svstate
.dsubstep
886 end_dsub
= dsubstep
== subvl
887 dststep
= self
.svstate
.dststep
888 end_dst
= dststep
== vl
-1
889 dstmask
= self
.dstmask
890 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
895 # unpack advances subvl in *outer* loop
897 assert dststep
<= vl
-1
898 end_dst
= dststep
== vl
-1
903 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
907 dststep
+= 1 # advance dststep
908 if not self
.dststep_skip
:
910 if ((1 << dststep
) & dstmask
) != 0:
913 log(" dskip", bin(dstmask
), bin(1 << dststep
))
915 # advance subvl in *inner* loop
918 assert dststep
<= vl
-1
919 end_dst
= dststep
== vl
-1
920 if end_dst
: # end-point
926 if not self
.dststep_skip
:
928 if ((1 << dststep
) & dstmask
) != 0:
931 log(" dskip", bin(dstmask
), bin(1 << dststep
))
932 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
935 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
937 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
938 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
941 def at_loopend(self
):
942 """tells if this is the last possible element. uses the cached values
943 for src/dst-step and sub-steps
947 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
948 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
949 end_ssub
= ssubstep
== subvl
950 end_dsub
= dsubstep
== subvl
951 if srcstep
== vl
-1 and end_ssub
:
953 if dststep
== vl
-1 and end_dsub
:
957 def advance_svstate_steps(self
):
958 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
959 TODO when Pack/Unpack is set, substep becomes the *outer* loop
961 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
962 if self
.loopend
: # huhn??
967 def read_src_mask(self
):
968 """read/update pred_sz and src mask
970 # get SVSTATE VL (oh and print out some debug stuff)
972 srcstep
= self
.svstate
.srcstep
973 ssubstep
= self
.svstate
.ssubstep
975 # get predicate mask (all 64 bits)
976 srcmask
= 0xffff_ffff_ffff_ffff
978 pmode
= yield self
.dec2
.rm_dec
.predmode
979 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
980 srcpred
= yield self
.dec2
.rm_dec
.srcpred
981 dstpred
= yield self
.dec2
.rm_dec
.dstpred
982 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
983 if pmode
== SVP64PredMode
.INT
.value
:
984 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
985 if sv_ptype
== SVPType
.P2
.value
:
986 srcmask
= get_predint(self
.gpr
, srcpred
)
987 elif pmode
== SVP64PredMode
.CR
.value
:
988 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
989 if sv_ptype
== SVPType
.P2
.value
:
990 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
991 # work out if the ssubsteps are completed
992 ssubstart
= ssubstep
== 0
994 log(" ptype", sv_ptype
)
995 log(" srcpred", bin(srcpred
))
996 log(" srcmask", bin(srcmask
))
997 log(" pred_sz", bin(pred_sz
))
998 log(" ssubstart", ssubstart
)
1000 # store all that above
1001 self
.srcstep_skip
= False
1002 self
.srcmask
= srcmask
1003 self
.pred_sz
= pred_sz
1004 self
.new_ssubstep
= ssubstep
1005 log(" new ssubstep", ssubstep
)
1006 # until the predicate mask has a "1" bit... or we run out of VL
1007 # let srcstep==VL be the indicator to move to next instruction
1009 self
.srcstep_skip
= True
1011 def read_dst_mask(self
):
1012 """same as read_src_mask - check and record everything needed
1014 # get SVSTATE VL (oh and print out some debug stuff)
1015 # yield Delay(1e-10) # make changes visible
1016 vl
= self
.svstate
.vl
1017 dststep
= self
.svstate
.dststep
1018 dsubstep
= self
.svstate
.dsubstep
1020 # get predicate mask (all 64 bits)
1021 dstmask
= 0xffff_ffff_ffff_ffff
1023 pmode
= yield self
.dec2
.rm_dec
.predmode
1024 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1025 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1026 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1027 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1028 if pmode
== SVP64PredMode
.INT
.value
:
1029 dstmask
= get_predint(self
.gpr
, dstpred
)
1030 elif pmode
== SVP64PredMode
.CR
.value
:
1031 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1032 # work out if the ssubsteps are completed
1033 dsubstart
= dsubstep
== 0
1034 log(" pmode", pmode
)
1035 log(" ptype", sv_ptype
)
1036 log(" dstpred", bin(dstpred
))
1037 log(" dstmask", bin(dstmask
))
1038 log(" pred_dz", bin(pred_dz
))
1039 log(" dsubstart", dsubstart
)
1041 self
.dststep_skip
= False
1042 self
.dstmask
= dstmask
1043 self
.pred_dz
= pred_dz
1044 self
.new_dsubstep
= dsubstep
1045 log(" new dsubstep", dsubstep
)
1047 self
.dststep_skip
= True
1049 def svstate_pre_inc(self
):
1050 """check if srcstep/dststep need to skip over masked-out predicate bits
1051 note that this is not supposed to do anything to substep,
1052 it is purely for skipping masked-out bits
1055 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1056 yield from self
.read_src_mask()
1057 yield from self
.read_dst_mask()
1064 srcstep
= self
.svstate
.srcstep
1065 srcmask
= self
.srcmask
1066 pred_src_zero
= self
.pred_sz
1067 vl
= self
.svstate
.vl
1068 # srcstep-skipping opportunity identified
1069 if self
.srcstep_skip
:
1070 # cannot do this with sv.bc - XXX TODO
1073 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1074 log(" sskip", bin(1 << srcstep
))
1077 # now work out if the relevant mask bits require zeroing
1079 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1081 # store new srcstep / dststep
1082 self
.new_srcstep
= srcstep
1083 self
.pred_src_zero
= pred_src_zero
1084 log(" new srcstep", srcstep
)
1087 # dststep-skipping opportunity identified
1088 dststep
= self
.svstate
.dststep
1089 dstmask
= self
.dstmask
1090 pred_dst_zero
= self
.pred_dz
1091 vl
= self
.svstate
.vl
1092 if self
.dststep_skip
:
1093 # cannot do this with sv.bc - XXX TODO
1096 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1097 log(" dskip", bin(1 << dststep
))
1100 # now work out if the relevant mask bits require zeroing
1102 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1104 # store new srcstep / dststep
1105 self
.new_dststep
= dststep
1106 self
.pred_dst_zero
= pred_dst_zero
1107 log(" new dststep", dststep
)
1110 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1111 # decoder2 - an instance of power_decoder2
1112 # regfile - a list of initial values for the registers
1113 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1114 # respect_pc - tracks the program counter. requires initial_insns
1115 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1116 initial_mem
=None, initial_msr
=0,
1127 self
.bigendian
= bigendian
1129 self
.is_svp64_mode
= False
1130 self
.respect_pc
= respect_pc
1131 if initial_sprs
is None:
1133 if initial_mem
is None:
1135 if fpregfile
is None:
1136 fpregfile
= [0] * 32
1137 if initial_insns
is None:
1139 assert self
.respect_pc
== False, "instructions required to honor pc"
1141 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1142 log("ISACaller initial_msr", initial_msr
)
1144 # "fake program counter" mode (for unit testing)
1148 if isinstance(initial_mem
, tuple):
1149 self
.fake_pc
= initial_mem
[0]
1150 disasm_start
= self
.fake_pc
1152 disasm_start
= initial_pc
1154 # disassembly: we need this for now (not given from the decoder)
1155 self
.disassembly
= {}
1157 for i
, code
in enumerate(disassembly
):
1158 self
.disassembly
[i
*4 + disasm_start
] = code
1160 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1161 self
.svp64rm
= SVP64RM()
1162 if initial_svstate
is None:
1164 if isinstance(initial_svstate
, int):
1165 initial_svstate
= SVP64State(initial_svstate
)
1166 # SVSTATE, MSR and PC
1167 StepLoop
.__init
__(self
, initial_svstate
)
1168 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1170 # GPR FPR SPR registers
1171 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1172 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1173 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1174 self
.spr
= SPR(decoder2
, initial_sprs
) # initialise SPRs before MMU
1176 # set up 4 dummy SVSHAPEs if they aren't already set up
1178 sname
= 'SVSHAPE%d' % i
1179 val
= self
.spr
.get(sname
, 0)
1180 # make sure it's an SVSHAPE
1181 self
.spr
[sname
] = SVSHAPE(val
, self
.gpr
)
1182 self
.last_op_svshape
= False
1185 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
, misaligned_ok
=True)
1186 self
.mem
.log_fancy(kind
=LogKind
.InstrInOuts
)
1187 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1188 # MMU mode, redirect underlying Mem through RADIX
1190 self
.mem
= RADIX(self
.mem
, self
)
1192 self
.imem
= RADIX(self
.imem
, self
)
1194 # TODO, needed here:
1195 # FPR (same as GPR except for FP nums)
1196 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1197 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1198 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1199 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1201 # 2.3.2 LR (actually SPR #8) -- Done
1202 # 2.3.3 CTR (actually SPR #9) -- Done
1203 # 2.3.4 TAR (actually SPR #815)
1204 # 3.2.2 p45 XER (actually SPR #1) -- Done
1205 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1207 # create CR then allow portions of it to be "selectable" (below)
1208 self
.cr_fields
= CRFields(initial_cr
)
1209 self
.cr
= self
.cr_fields
.cr
1210 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1212 # "undefined", just set to variable-bit-width int (use exts "max")
1213 # self.undefined = SelectableInt(0, 256) # TODO, not hard-code 256!
1216 self
.namespace
.update(self
.spr
)
1217 self
.namespace
.update({'GPR': self
.gpr
,
1221 'memassign': self
.memassign
,
1224 'SVSTATE': self
.svstate
,
1225 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1226 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1227 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1228 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1231 'undefined': undefined
,
1232 'mode_is_64bit': True,
1233 'SO': XER_bits
['SO'],
1234 'XLEN': 64 # elwidth overrides
1237 # update pc to requested start point
1238 self
.set_pc(initial_pc
)
1240 # field-selectable versions of Condition Register
1241 self
.crl
= self
.cr_fields
.crl
1243 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1245 self
.decoder
= decoder2
.dec
1246 self
.dec2
= decoder2
1248 super().__init
__(XLEN
=self
.namespace
["XLEN"])
1252 return self
.namespace
["XLEN"]
1254 def call_trap(self
, trap_addr
, trap_bit
):
1255 """calls TRAP and sets up NIA to the new execution location.
1256 next instruction will begin at trap_addr.
1258 self
.TRAP(trap_addr
, trap_bit
)
1259 self
.namespace
['NIA'] = self
.trap_nia
1260 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1262 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1263 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1265 TRAP function is callable from inside the pseudocode itself,
1266 hence the default arguments. when calling from inside ISACaller
1267 it is best to use call_trap()
1269 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1270 kaivb
= self
.spr
['KAIVB'].value
1271 msr
= self
.namespace
['MSR'].value
1272 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1273 # store CIA(+4?) in SRR0, set NIA to 0x700
1274 # store MSR in SRR1, set MSR to um errr something, have to check spec
1275 # store SVSTATE (if enabled) in SVSRR0
1276 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1277 self
.spr
['SRR1'].value
= msr
1278 if self
.is_svp64_mode
:
1279 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1280 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1281 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1283 # set exception bits. TODO: this should, based on the address
1284 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1285 # bits appropriately. however it turns out that *for now* in all
1286 # cases (all trap_addrs) the exact same thing is needed.
1287 self
.msr
[MSRb
.IR
] = 0
1288 self
.msr
[MSRb
.DR
] = 0
1289 self
.msr
[MSRb
.FE0
] = 0
1290 self
.msr
[MSRb
.FE1
] = 0
1291 self
.msr
[MSRb
.EE
] = 0
1292 self
.msr
[MSRb
.RI
] = 0
1293 self
.msr
[MSRb
.SF
] = 1
1294 self
.msr
[MSRb
.TM
] = 0
1295 self
.msr
[MSRb
.VEC
] = 0
1296 self
.msr
[MSRb
.VSX
] = 0
1297 self
.msr
[MSRb
.PR
] = 0
1298 self
.msr
[MSRb
.FP
] = 0
1299 self
.msr
[MSRb
.PMM
] = 0
1300 self
.msr
[MSRb
.TEs
] = 0
1301 self
.msr
[MSRb
.TEe
] = 0
1302 self
.msr
[MSRb
.UND
] = 0
1303 self
.msr
[MSRb
.LE
] = 1
1305 def memassign(self
, ea
, sz
, val
):
1306 self
.mem
.memassign(ea
, sz
, val
)
1308 def prep_namespace(self
, insn_name
, formname
, op_fields
, xlen
):
1309 # TODO: get field names from form in decoder*1* (not decoder2)
1310 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1312 # then "yield" fields only from op_fields rather than hard-coded
1314 fields
= self
.decoder
.sigforms
[formname
]
1315 log("prep_namespace", formname
, op_fields
, insn_name
)
1316 for name
in op_fields
:
1317 # CR immediates. deal with separately. needs modifying
1319 if self
.is_svp64_mode
and name
in ['BI']: # TODO, more CRs
1320 # BI is a 5-bit, must reconstruct the value
1321 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1322 sig
= getattr(fields
, name
)
1324 # low 2 LSBs (CR field selector) remain same, CR num extended
1325 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1326 val
= (val
& 0b11) |
(regnum
<< 2)
1327 elif self
.is_svp64_mode
and name
in ['BF']: # TODO, more CRs
1328 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, "BF")
1329 log('hack %s' % name
, regnum
, is_vec
)
1332 sig
= getattr(fields
, name
)
1334 # these are all opcode fields involved in index-selection of CR,
1335 # and need to do "standard" arithmetic. CR[BA+32] for example
1336 # would, if using SelectableInt, only be 5-bit.
1337 if name
in ['BF', 'BFA', 'BC', 'BA', 'BB', 'BT', 'BI']:
1338 self
.namespace
[name
] = val
1340 self
.namespace
[name
] = SelectableInt(val
, sig
.width
)
1342 self
.namespace
['XER'] = self
.spr
['XER']
1343 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1344 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1345 self
.namespace
['OV'] = self
.spr
['XER'][XER_bits
['OV']].value
1346 self
.namespace
['OV32'] = self
.spr
['XER'][XER_bits
['OV32']].value
1347 self
.namespace
['XLEN'] = xlen
1349 # add some SVSTATE convenience variables
1350 vl
= self
.svstate
.vl
1351 srcstep
= self
.svstate
.srcstep
1352 self
.namespace
['VL'] = vl
1353 self
.namespace
['srcstep'] = srcstep
1355 # take a copy of the CR field value: if non-VLi fail-first fails
1356 # this is because the pseudocode writes *directly* to CR. sigh
1357 self
.cr_backup
= self
.cr
.value
1359 # sv.bc* need some extra fields
1360 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
1361 # blegh grab bits manually
1362 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1363 # convert to SelectableInt before test
1364 mode
= SelectableInt(mode
, 5)
1365 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1366 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1367 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1368 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1369 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1370 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1371 sz
= yield self
.dec2
.rm_dec
.pred_sz
1372 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1373 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1374 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1375 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1376 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1377 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1378 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1379 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1381 def get_kludged_op_add_ca_ov(self
, inputs
, inp_ca_ov
):
1382 """ this was not at all necessary to do. this function massively
1383 duplicates - in a laborious and complex fashion - the contents of
1384 the CSV files that were extracted two years ago from microwatt's
1385 source code. A-inversion is the "inv A" column, output inversion
1386 is the "inv out" column, carry-in equal to 0 or 1 or CA is the
1389 all of that information is available in
1390 self.instrs[ins_name].op_fields
1391 where info is usually assigned to self.instrs[ins_name]
1393 https://git.libre-soc.org/?p=openpower-isa.git;a=blob;f=openpower/isatables/minor_31.csv;hb=HEAD
1395 the immediate constants are *also* decoded correctly and placed
1396 usually by DecodeIn2Imm into operand2, as part of power_decoder2.py
1398 def ca(a
, b
, ca_in
, width
):
1399 mask
= (1 << width
) - 1
1400 y
= (a
& mask
) + (b
& mask
) + ca_in
1403 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1404 insn
= insns
.get(asmcode
)
1405 SI
= yield self
.dec2
.dec
.SI
1408 inputs
= [i
.value
for i
in inputs
]
1411 if insn
in ("add", "addo", "addc", "addco"):
1415 elif insn
== "addic" or insn
== "addic.":
1419 elif insn
in ("subf", "subfo", "subfc", "subfco"):
1423 elif insn
== "subfic":
1427 elif insn
== "adde" or insn
== "addeo":
1431 elif insn
== "subfe" or insn
== "subfeo":
1435 elif insn
== "addme" or insn
== "addmeo":
1439 elif insn
== "addze" or insn
== "addzeo":
1443 elif insn
== "subfme" or insn
== "subfmeo":
1447 elif insn
== "subfze" or insn
== "subfzeo":
1451 elif insn
== "addex":
1452 # CA[32] aren't actually written, just generate so we have
1453 # something to return
1454 ca64
= ov64
= ca(inputs
[0], inputs
[1], OV
, 64)
1455 ca32
= ov32
= ca(inputs
[0], inputs
[1], OV
, 32)
1456 return ca64
, ca32
, ov64
, ov32
1457 elif insn
== "neg" or insn
== "nego":
1462 raise NotImplementedError(
1463 "op_add kludge unimplemented instruction: ", asmcode
, insn
)
1465 ca64
= ca(a
, b
, ca_in
, 64)
1466 ca32
= ca(a
, b
, ca_in
, 32)
1467 ov64
= ca64
!= ca(a
, b
, ca_in
, 63)
1468 ov32
= ca32
!= ca(a
, b
, ca_in
, 31)
1469 return ca64
, ca32
, ov64
, ov32
1471 def handle_carry_(self
, inputs
, output
, ca
, ca32
, inp_ca_ov
):
1472 op
= yield self
.dec2
.e
.do
.insn_type
1473 if op
== MicrOp
.OP_ADD
.value
and ca
is None and ca32
is None:
1474 retval
= yield from self
.get_kludged_op_add_ca_ov(
1476 ca
, ca32
, ov
, ov32
= retval
1477 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1478 if insns
.get(asmcode
) == 'addex':
1479 # TODO: if 32-bit mode, set ov to ov32
1480 self
.spr
['XER'][XER_bits
['OV']] = ov
1481 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1483 # TODO: if 32-bit mode, set ca to ca32
1484 self
.spr
['XER'][XER_bits
['CA']] = ca
1485 self
.spr
['XER'][XER_bits
['CA32']] = ca32
1487 inv_a
= yield self
.dec2
.e
.do
.invert_in
1489 inputs
[0] = ~inputs
[0]
1491 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1493 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1494 inputs
.append(SelectableInt(imm
, 64))
1497 log("gt input", x
, output
)
1498 gt
= (gtu(x
, output
))
1501 cy
= 1 if any(gts
) else 0
1503 if ca
is None: # already written
1504 self
.spr
['XER'][XER_bits
['CA']] = cy
1507 # ARGH... different for OP_ADD... *sigh*...
1508 op
= yield self
.dec2
.e
.do
.insn_type
1509 if op
== MicrOp
.OP_ADD
.value
:
1510 res32
= (output
.value
& (1 << 32)) != 0
1511 a32
= (inputs
[0].value
& (1 << 32)) != 0
1512 if len(inputs
) >= 2:
1513 b32
= (inputs
[1].value
& (1 << 32)) != 0
1516 cy32
= res32 ^ a32 ^ b32
1517 log("CA32 ADD", cy32
)
1521 log("input", x
, output
)
1522 log(" x[32:64]", x
, x
[32:64])
1523 log(" o[32:64]", output
, output
[32:64])
1524 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1526 cy32
= 1 if any(gts
) else 0
1527 log("CA32", cy32
, gts
)
1528 if ca32
is None: # already written
1529 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1531 def handle_overflow(self
, inputs
, output
, div_overflow
, inp_ca_ov
):
1532 op
= yield self
.dec2
.e
.do
.insn_type
1533 if op
== MicrOp
.OP_ADD
.value
:
1534 retval
= yield from self
.get_kludged_op_add_ca_ov(
1536 ca
, ca32
, ov
, ov32
= retval
1537 # TODO: if 32-bit mode, set ov to ov32
1538 self
.spr
['XER'][XER_bits
['OV']] = ov
1539 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1540 self
.spr
['XER'][XER_bits
['SO']] |
= ov
1542 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1543 inv_a
= yield self
.dec2
.e
.do
.invert_in
1545 inputs
[0] = ~inputs
[0]
1547 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1549 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1550 inputs
.append(SelectableInt(imm
, 64))
1551 log("handle_overflow", inputs
, output
, div_overflow
)
1552 if len(inputs
) < 2 and div_overflow
is None:
1555 # div overflow is different: it's returned by the pseudo-code
1556 # because it's more complex than can be done by analysing the output
1557 if div_overflow
is not None:
1558 ov
, ov32
= div_overflow
, div_overflow
1559 # arithmetic overflow can be done by analysing the input and output
1560 elif len(inputs
) >= 2:
1562 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1563 output_sgn
= exts(output
.value
, output
.bits
) < 0
1564 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1565 output_sgn
!= input_sgn
[0] else 0
1568 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1569 output32_sgn
= exts(output
.value
, 32) < 0
1570 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1571 output32_sgn
!= input32_sgn
[0] else 0
1573 # now update XER OV/OV32/SO
1574 so
= self
.spr
['XER'][XER_bits
['SO']]
1575 new_so
= so | ov
# sticky overflow ORs in old with new
1576 self
.spr
['XER'][XER_bits
['OV']] = ov
1577 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1578 self
.spr
['XER'][XER_bits
['SO']] = new_so
1579 log(" set overflow", ov
, ov32
, so
, new_so
)
1581 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1582 assert isinstance(out
, SelectableInt
), \
1583 "out zero not a SelectableInt %s" % repr(outputs
)
1584 log("handle_comparison", out
.bits
, hex(out
.value
))
1585 # TODO - XXX *processor* in 32-bit mode
1586 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1588 # o32 = exts(out.value, 32)
1589 # print ("handle_comparison exts 32 bit", hex(o32))
1590 out
= exts(out
.value
, out
.bits
)
1591 log("handle_comparison exts", hex(out
))
1592 # create the three main CR flags, EQ GT LT
1593 zero
= SelectableInt(out
== 0, 1)
1594 positive
= SelectableInt(out
> 0, 1)
1595 negative
= SelectableInt(out
< 0, 1)
1596 # get (or not) XER.SO. for setvl this is important *not* to read SO
1598 SO
= SelectableInt(1, 0)
1600 SO
= self
.spr
['XER'][XER_bits
['SO']]
1601 log("handle_comparison SO", SO
.value
,
1602 "overflow", overflow
,
1604 "+ve", positive
.value
,
1605 "-ve", negative
.value
)
1606 # alternative overflow checking (setvl mainly at the moment)
1607 if overflow
is not None and overflow
== 1:
1608 SO
= SelectableInt(1, 1)
1609 # create the four CR field values and set the required CR field
1610 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1611 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1612 self
.crl
[cr_idx
].eq(cr_field
)
1614 def set_pc(self
, pc_val
):
1615 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1616 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1618 def get_next_insn(self
):
1619 """check instruction
1622 pc
= self
.pc
.CIA
.value
1625 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1627 raise KeyError("no instruction at 0x%x" % pc
)
1630 def setup_one(self
):
1631 """set up one instruction
1633 pc
, insn
= self
.get_next_insn()
1634 yield from self
.setup_next_insn(pc
, insn
)
1636 def setup_next_insn(self
, pc
, ins
):
1637 """set up next instruction
1640 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
1641 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
1643 yield self
.dec2
.sv_rm
.eq(0)
1644 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
1645 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
1646 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
1647 yield self
.dec2
.state
.pc
.eq(pc
)
1648 if self
.svstate
is not None:
1649 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
1651 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
1653 opcode
= yield self
.dec2
.dec
.opcode_in
1654 opcode
= SelectableInt(value
=opcode
, bits
=32)
1655 pfx
= SVP64Instruction
.Prefix(opcode
)
1656 log("prefix test: opcode:", pfx
.PO
, bin(pfx
.PO
), pfx
.id)
1657 self
.is_svp64_mode
= bool((pfx
.PO
== 0b000001) and (pfx
.id == 0b11))
1658 self
.pc
.update_nia(self
.is_svp64_mode
)
1660 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
1661 self
.namespace
['NIA'] = self
.pc
.NIA
1662 self
.namespace
['SVSTATE'] = self
.svstate
1663 if not self
.is_svp64_mode
:
1666 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
1667 log("svp64.rm", bin(pfx
.rm
))
1668 log(" svstate.vl", self
.svstate
.vl
)
1669 log(" svstate.mvl", self
.svstate
.maxvl
)
1670 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
1671 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
1672 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
1673 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
1676 def execute_one(self
):
1677 """execute one instruction
1679 # get the disassembly code for this instruction
1680 if not self
.disassembly
:
1681 code
= yield from self
.get_assembly_name()
1684 if self
.is_svp64_mode
:
1685 offs
, dbg
= 4, "svp64 "
1686 code
= self
.disassembly
[self
._pc
+offs
]
1687 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
1688 opname
= code
.split(' ')[0]
1690 yield from self
.call(opname
) # execute the instruction
1691 except MemException
as e
: # check for memory errors
1692 if e
.args
[0] == 'unaligned': # alignment error
1693 # run a Trap but set DAR first
1694 print("memory unaligned exception, DAR", e
.dar
, repr(e
))
1695 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
1696 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
1698 elif e
.args
[0] == 'invalid': # invalid
1699 # run a Trap but set DAR first
1700 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
1701 if e
.mode
== 'EXECUTE':
1702 # XXX TODO: must set a few bits in SRR1,
1703 # see microwatt loadstore1.vhdl
1704 # if m_in.segerr = '0' then
1705 # v.srr1(47 - 33) := m_in.invalid;
1706 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
1707 # v.srr1(47 - 44) := m_in.badtree;
1708 # v.srr1(47 - 45) := m_in.rc_error;
1709 # v.intr_vec := 16#400#;
1711 # v.intr_vec := 16#480#;
1712 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
1714 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
1716 # not supported yet:
1717 raise e
# ... re-raise
1719 log("gprs after code", code
)
1722 for i
in range(len(self
.crl
)):
1723 crs
.append(bin(self
.crl
[i
].asint()))
1724 log("crs", " ".join(crs
))
1725 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
1727 # don't use this except in special circumstances
1728 if not self
.respect_pc
:
1731 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
1732 hex(self
.pc
.NIA
.value
))
1734 def get_assembly_name(self
):
1735 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1736 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1737 dec_insn
= yield self
.dec2
.e
.do
.insn
1738 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
1739 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1740 int_op
= yield self
.dec2
.dec
.op
.internal_op
1741 log("get assembly name asmcode", asmcode
, int_op
,
1742 hex(dec_insn
), bin(insn_1_11
))
1743 asmop
= insns
.get(asmcode
, None)
1745 # sigh reconstruct the assembly instruction name
1746 if hasattr(self
.dec2
.e
.do
, "oe"):
1747 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
1748 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
1752 if hasattr(self
.dec2
.e
.do
, "rc"):
1753 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
1754 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
1758 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
1759 RC1
= yield self
.dec2
.rm_dec
.RC1
1763 # grrrr have to special-case MUL op (see DecodeOE)
1764 log("ov %d en %d rc %d en %d op %d" %
1765 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
1766 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
1771 if not asmop
.endswith("."): # don't add "." to "andis."
1774 if hasattr(self
.dec2
.e
.do
, "lk"):
1775 lk
= yield self
.dec2
.e
.do
.lk
1778 log("int_op", int_op
)
1779 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
1780 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
1784 spr_msb
= yield from self
.get_spr_msb()
1785 if int_op
== MicrOp
.OP_MFCR
.value
:
1790 # XXX TODO: for whatever weird reason this doesn't work
1791 # https://bugs.libre-soc.org/show_bug.cgi?id=390
1792 if int_op
== MicrOp
.OP_MTCRF
.value
:
1799 def reset_remaps(self
):
1800 self
.remap_loopends
= [0] * 4
1801 self
.remap_idxs
= [0, 1, 2, 3]
1803 def get_remap_indices(self
):
1804 """WARNING, this function stores remap_idxs and remap_loopends
1805 in the class for later use. this to avoid problems with yield
1807 # go through all iterators in lock-step, advance to next remap_idx
1808 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
1809 # get four SVSHAPEs. here we are hard-coding
1811 SVSHAPE0
= self
.spr
['SVSHAPE0']
1812 SVSHAPE1
= self
.spr
['SVSHAPE1']
1813 SVSHAPE2
= self
.spr
['SVSHAPE2']
1814 SVSHAPE3
= self
.spr
['SVSHAPE3']
1815 # set up the iterators
1816 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
1817 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
1818 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
1819 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
1823 for i
, (shape
, remap
) in enumerate(remaps
):
1824 # zero is "disabled"
1825 if shape
.value
== 0x0:
1826 self
.remap_idxs
[i
] = 0
1827 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
1828 step
= dststep
if (i
in [3, 4]) else srcstep
1829 # this is terrible. O(N^2) looking for the match. but hey.
1830 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
1833 self
.remap_idxs
[i
] = remap_idx
1834 self
.remap_loopends
[i
] = loopends
1835 dbg
.append((i
, step
, remap_idx
, loopends
))
1836 for (i
, step
, remap_idx
, loopends
) in dbg
:
1837 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
1840 def get_spr_msb(self
):
1841 dec_insn
= yield self
.dec2
.e
.do
.insn
1842 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
1844 def call(self
, name
):
1845 """call(opcode) - the primary execution point for instructions
1847 self
.last_st_addr
= None # reset the last known store address
1848 self
.last_ld_addr
= None # etc.
1850 ins_name
= name
.strip() # remove spaces if not already done so
1852 log("halted - not executing", ins_name
)
1855 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
1856 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
1857 asmop
= yield from self
.get_assembly_name()
1858 log("call", ins_name
, asmop
)
1860 # sv.setvl is *not* a loop-function. sigh
1861 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
1864 int_op
= yield self
.dec2
.dec
.op
.internal_op
1865 spr_msb
= yield from self
.get_spr_msb()
1867 instr_is_privileged
= False
1868 if int_op
in [MicrOp
.OP_ATTN
.value
,
1869 MicrOp
.OP_MFMSR
.value
,
1870 MicrOp
.OP_MTMSR
.value
,
1871 MicrOp
.OP_MTMSRD
.value
,
1873 MicrOp
.OP_RFID
.value
]:
1874 instr_is_privileged
= True
1875 if int_op
in [MicrOp
.OP_MFSPR
.value
,
1876 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
1877 instr_is_privileged
= True
1879 log("is priv", instr_is_privileged
, hex(self
.msr
.value
),
1881 # check MSR priv bit and whether op is privileged: if so, throw trap
1882 if instr_is_privileged
and self
.msr
[MSRb
.PR
] == 1:
1883 self
.call_trap(0x700, PIb
.PRIV
)
1886 # check halted condition
1887 if ins_name
== 'attn':
1891 # check illegal instruction
1893 if ins_name
not in ['mtcrf', 'mtocrf']:
1894 illegal
= ins_name
!= asmop
1896 # list of instructions not being supported by binutils (.long)
1897 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
1898 if dotstrp
in [*FPTRANS_INSNS
,
1899 'ffmadds', 'fdmadds', 'ffadds',
1901 'setvl', 'svindex', 'svremap', 'svstep',
1902 'svshape', 'svshape2',
1903 'grev', 'ternlogi', 'bmask', 'cprop',
1904 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
1905 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
1906 "dsld", "dsrd", "maddedus",
1907 "shadd", "shaddw", "shadduw",
1912 # branch-conditional redirects to sv.bc
1913 if asmop
.startswith('bc') and self
.is_svp64_mode
:
1914 ins_name
= 'sv.%s' % ins_name
1916 # ld-immediate-with-pi mode redirects to ld-with-postinc
1917 ldst_imm_postinc
= False
1918 if 'u' in ins_name
and self
.is_svp64_mode
:
1919 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
1921 ins_name
= ins_name
.replace("u", "up")
1922 ldst_imm_postinc
= True
1923 log(" enable ld/st postinc", ins_name
)
1925 log(" post-processed name", dotstrp
, ins_name
, asmop
)
1927 # illegal instructions call TRAP at 0x700
1929 print("illegal", ins_name
, asmop
)
1930 self
.call_trap(0x700, PIb
.ILLEG
)
1931 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
1932 (ins_name
, asmop
, self
.pc
.CIA
.value
))
1935 # this is for setvl "Vertical" mode: if set true,
1936 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
1937 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
1938 self
.allow_next_step_inc
= False
1939 self
.svstate_next_mode
= 0
1941 # nop has to be supported, we could let the actual op calculate
1942 # but PowerDecoder has a pattern for nop
1943 if ins_name
== 'nop':
1944 self
.update_pc_next()
1947 # get elwidths, defaults to 64
1951 if self
.is_svp64_mode
:
1952 ew_src
= yield self
.dec2
.rm_dec
.ew_src
1953 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
1954 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
1955 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
1956 xlen
= max(ew_src
, ew_dst
)
1957 log("elwdith", ew_src
, ew_dst
)
1958 log("XLEN:", self
.is_svp64_mode
, xlen
)
1960 # look up instruction in ISA.instrs, prepare namespace
1961 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
1962 info
= self
.instrs
[ins_name
+"."]
1963 elif asmop
[-1] == '.' and asmop
in self
.instrs
:
1964 info
= self
.instrs
[asmop
]
1966 info
= self
.instrs
[ins_name
]
1967 yield from self
.prep_namespace(ins_name
, info
.form
, info
.op_fields
,
1970 # preserve order of register names
1971 input_names
= create_args(list(info
.read_regs
) +
1972 list(info
.uninit_regs
))
1973 log("input names", input_names
)
1975 # get SVP64 entry for the current instruction
1976 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
1977 if sv_rm
is not None:
1978 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
1980 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
1981 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
1983 # see if srcstep/dststep need skipping over masked-out predicate bits
1984 # svstep also needs advancement because it calls SVSTATE_NEXT.
1985 # bit the remaps get computed just after pre_inc moves them on
1986 # with remap_set_steps substituting for PowerDecider2 not doing it,
1987 # and SVSTATE_NEXT not being able to.use yield, the preinc on
1988 # svstep is necessary for now.
1990 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
1991 yield from self
.svstate_pre_inc()
1992 if self
.is_svp64_mode
:
1993 pre
= yield from self
.update_new_svstate_steps()
1995 self
.svp64_reset_loop()
1997 self
.update_pc_next()
1999 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2000 pred_dst_zero
= self
.pred_dst_zero
2001 pred_src_zero
= self
.pred_src_zero
2002 vl
= self
.svstate
.vl
2003 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2005 # VL=0 in SVP64 mode means "do nothing: skip instruction"
2006 if self
.is_svp64_mode
and vl
== 0:
2007 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2008 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
2009 self
.namespace
['NIA'], kind
=LogKind
.InstrInOuts
)
2012 # for when SVREMAP is active, using pre-arranged schedule.
2013 # note: modifying PowerDecoder2 needs to "settle"
2014 remap_en
= self
.svstate
.SVme
2015 persist
= self
.svstate
.RMpst
2016 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
2017 if self
.is_svp64_mode
:
2018 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
2020 if persist
or self
.last_op_svshape
:
2021 remaps
= self
.get_remap_indices()
2022 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
2023 yield from self
.remap_set_steps(remaps
)
2024 # after that, settle down (combinatorial) to let Vector reg numbers
2025 # work themselves out
2027 if self
.is_svp64_mode
:
2028 remap_active
= yield self
.dec2
.remap_active
2030 remap_active
= False
2031 log("remap active", bin(remap_active
))
2033 # main input registers (RT, RA ...)
2035 for name
in input_names
:
2036 regval
= (yield from self
.get_input(name
, ew_src
))
2037 log("regval name", name
, regval
)
2038 inputs
.append(regval
)
2040 # arrrrgh, awful hack, to get _RT into namespace
2041 if ins_name
in ['setvl', 'svstep']:
2043 RT
= yield self
.dec2
.dec
.RT
2044 self
.namespace
[regname
] = SelectableInt(RT
, 5)
2046 self
.namespace
["RT"] = SelectableInt(0, 5)
2047 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
2048 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
2050 # in SVP64 mode for LD/ST work out immediate
2051 # XXX TODO: replace_ds for DS-Form rather than D-Form.
2052 # use info.form to detect
2053 if self
.is_svp64_mode
and not ldst_imm_postinc
:
2054 yield from self
.check_replace_d(info
, remap_active
)
2056 # "special" registers
2057 for special
in info
.special_regs
:
2058 if special
in special_sprs
:
2059 inputs
.append(self
.spr
[special
])
2061 inputs
.append(self
.namespace
[special
])
2063 # clear trap (trap) NIA
2064 self
.trap_nia
= None
2066 # check if this was an sv.bc* and create an indicator that
2067 # this is the last check to be made as a loop. combined with
2068 # the ALL/ANY mode we can early-exit
2069 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
2070 no_in_vec
= yield self
.dec2
.no_in_vec
# BI is scalar
2071 end_loop
= no_in_vec
or srcstep
== vl
-1 or dststep
== vl
-1
2072 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
2074 inp_ca_ov
= (self
.spr
['XER'][XER_bits
['CA']].value
,
2075 self
.spr
['XER'][XER_bits
['OV']].value
)
2077 # execute actual instruction here (finally)
2078 log("inputs", inputs
)
2079 results
= info
.func(self
, *inputs
)
2080 output_names
= create_args(info
.write_regs
)
2082 for out
, n
in zip(results
or [], output_names
):
2084 log("results", outs
)
2086 # "inject" decorator takes namespace from function locals: we need to
2087 # overwrite NIA being overwritten (sigh)
2088 if self
.trap_nia
is not None:
2089 self
.namespace
['NIA'] = self
.trap_nia
2091 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
2093 # check if op was a LD/ST so that debugging can check the
2095 if int_op
in [MicrOp
.OP_STORE
.value
,
2097 self
.last_st_addr
= self
.mem
.last_st_addr
2098 if int_op
in [MicrOp
.OP_LOAD
.value
,
2100 self
.last_ld_addr
= self
.mem
.last_ld_addr
2101 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
2102 self
.last_st_addr
, self
.last_ld_addr
)
2104 # detect if CA/CA32 already in outputs (sra*, basically)
2106 ca32
= outs
.get("CA32")
2108 log("carry already done?", ca
, ca32
, output_names
)
2109 carry_en
= yield self
.dec2
.e
.do
.output_carry
2111 yield from self
.handle_carry_(
2112 inputs
, results
[0], ca
, ca32
, inp_ca_ov
=inp_ca_ov
)
2114 # get outout named "overflow" and "CR0"
2115 overflow
= outs
.get('overflow')
2116 cr0
= outs
.get('CR0')
2118 if not self
.is_svp64_mode
: # yeah just no. not in parallel processing
2119 # detect if overflow was in return result
2120 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2121 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2122 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
2124 yield from self
.handle_overflow(
2125 inputs
, results
[0], overflow
, inp_ca_ov
=inp_ca_ov
)
2127 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
2129 if not self
.is_svp64_mode
or not pred_dst_zero
:
2130 if hasattr(self
.dec2
.e
.do
, "rc"):
2131 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2132 # don't do Rc=1 for svstep it is handled explicitly.
2133 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
2134 # to write directly to CR0 instead of in ISACaller. hooyahh.
2135 if rc_en
and ins_name
not in ['svstep']:
2136 yield from self
.do_rc_ov(ins_name
, results
[0], overflow
, cr0
)
2139 ffirst_hit
= False, False
2140 if self
.is_svp64_mode
:
2141 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2142 is_cr
= sv_mode
== SVMode
.CROP
.value
2143 chk
= rc_en
or is_cr
2144 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2146 # any modified return results?
2147 yield from self
.do_outregs_nia(asmop
, ins_name
, info
, outs
,
2148 carry_en
, rc_en
, ffirst_hit
, ew_dst
)
2150 def check_ffirst(self
, info
, rc_en
, srcstep
):
2151 """fail-first mode: checks a bit of Rc Vector, truncates VL
2153 rm_mode
= yield self
.dec2
.rm_dec
.mode
2154 ff_inv
= yield self
.dec2
.rm_dec
.inv
2155 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2156 RC1
= yield self
.dec2
.rm_dec
.RC1
2157 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2158 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2162 log(" cr_bit", cr_bit
)
2163 log(" rc_en", rc_en
)
2164 if not rc_en
or rm_mode
!= SVP64RMMode
.FFIRST
.value
:
2166 # get the CR vevtor, do BO-test
2168 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2169 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2171 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2172 crtest
= self
.crl
[regnum
]
2173 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2174 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2175 log("cr test?", ffirst_hit
)
2178 # Fail-first activated, truncate VL
2179 vli
= SelectableInt(int(vli_
), 7)
2180 self
.svstate
.vl
= srcstep
+ vli
2181 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2182 yield Settle() # let decoder update
2185 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
):
2186 if ins_name
.startswith("f"):
2187 rc_reg
= "CR1" # not calculated correctly yet (not FP compares)
2190 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2191 # hang on... for `setvl` actually you want to test SVSTATE.VL
2192 is_setvl
= ins_name
in ('svstep', 'setvl')
2194 result
= SelectableInt(result
.vl
, 64)
2196 # overflow = None # do not override overflow except in setvl
2198 # if there was not an explicit CR0 in the pseudocode, do implicit Rc=1
2200 self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2202 # otherwise we just blat CR0 into the required regnum
2203 log("explicit rc0", cr0
)
2204 self
.crl
[regnum
].eq(cr0
)
2206 def do_outregs_nia(self
, asmop
, ins_name
, info
, outs
,
2207 carry_en
, rc_en
, ffirst_hit
, ew_dst
):
2208 ffirst_hit
, vli
= ffirst_hit
2209 # write out any regs for this instruction
2210 for name
, output
in outs
.items():
2211 yield from self
.check_write(info
, name
, output
, carry_en
, ew_dst
)
2212 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2213 # which write directly to CR in the pseudocode (gah, what a mess)
2214 # if ffirst_hit and not vli:
2215 # self.cr.value = self.cr_backup
2218 self
.svp64_reset_loop()
2221 # check advancement of src/dst/sub-steps and if PC needs updating
2222 nia_update
= (yield from self
.check_step_increment(rc_en
,
2225 self
.update_pc_next()
2227 def check_replace_d(self
, info
, remap_active
):
2228 replace_d
= False # update / replace constant in pseudocode
2229 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2230 vl
= self
.svstate
.vl
2231 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2232 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2233 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2234 if info
.form
== 'DS':
2235 # DS-Form, multiply by 4 then knock 2 bits off after
2236 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2238 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2239 imm
= exts(imm
, 16) # sign-extend to integer
2240 # get the right step. LD is from srcstep, ST is dststep
2241 op
= yield self
.dec2
.e
.do
.insn_type
2243 if op
== MicrOp
.OP_LOAD
.value
:
2245 offsmul
= yield self
.dec2
.in1_step
2246 log("D-field REMAP src", imm
, offsmul
, ldstmode
)
2248 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2249 log("D-field src", imm
, offsmul
, ldstmode
)
2250 elif op
== MicrOp
.OP_STORE
.value
:
2251 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2252 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2253 log("D-field dst", imm
, offsmul
, ldstmode
)
2254 # Unit-Strided LD/ST adds offset*width to immediate
2255 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2256 ldst_len
= yield self
.dec2
.e
.do
.data_len
2257 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2259 # Element-strided multiplies the immediate by element step
2260 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2261 imm
= SelectableInt(imm
* offsmul
, 32)
2264 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2265 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2266 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2267 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2268 # new replacement D... errr.. DS
2270 if info
.form
== 'DS':
2271 # TODO: assert 2 LSBs are zero?
2272 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2273 imm
.value
= imm
.value
>> 2
2274 self
.namespace
['DS'] = imm
2276 self
.namespace
['D'] = imm
2278 def get_input(self
, name
, ew_src
):
2279 # using PowerDecoder2, first, find the decoder index.
2280 # (mapping name RA RB RC RS to in1, in2, in3)
2281 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2283 # doing this is not part of svp64, it's because output
2284 # registers, to be modified, need to be in the namespace.
2285 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2287 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2289 if isinstance(regnum
, tuple):
2290 (regnum
, base
, offs
) = regnum
2292 base
, offs
= regnum
, 0 # temporary HACK
2294 # in case getting the register number is needed, _RA, _RB
2295 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2296 regname
= "_" + name
2297 if not self
.is_svp64_mode
or ew_src
== 64:
2298 self
.namespace
[regname
] = regnum
2299 elif regname
in self
.namespace
:
2300 del self
.namespace
[regname
]
2302 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2303 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2305 reg_val
= SelectableInt(self
.fpr(base
, is_vec
, offs
, ew_src
))
2306 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2307 elif name
is not None:
2308 reg_val
= SelectableInt(self
.gpr(base
, is_vec
, offs
, ew_src
))
2309 log("read reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
))
2311 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2312 reg_val
= SelectableInt(0, ew_src
)
2315 def remap_set_steps(self
, remaps
):
2316 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2317 they work in concert with PowerDecoder2 at the moment,
2318 there is no HDL implementation of REMAP. therefore this
2319 function, because ISACaller still uses PowerDecoder2,
2320 will *explicitly* write the dec2.XX_step values. this has
2323 # just some convenient debug info
2325 sname
= 'SVSHAPE%d' % i
2326 shape
= self
.spr
[sname
]
2327 log(sname
, bin(shape
.value
))
2328 log(" lims", shape
.lims
)
2329 log(" mode", shape
.mode
)
2330 log(" skip", shape
.skip
)
2332 # set up the list of steps to remap
2333 mi0
= self
.svstate
.mi0
2334 mi1
= self
.svstate
.mi1
2335 mi2
= self
.svstate
.mi2
2336 mo0
= self
.svstate
.mo0
2337 mo1
= self
.svstate
.mo1
2338 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2339 [self
.dec2
.in2_step
, mi1
], # RB
2340 [self
.dec2
.in3_step
, mi2
], # RC
2341 [self
.dec2
.o_step
, mo0
], # RT
2342 [self
.dec2
.o2_step
, mo1
], # EA
2345 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2346 for i
, reg
in enumerate(rnames
):
2347 idx
= yield from get_idx_map(self
.dec2
, reg
)
2349 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2351 steps
[i
][0] = self
.dec2
.in1_step
2353 steps
[i
][0] = self
.dec2
.in2_step
2355 steps
[i
][0] = self
.dec2
.in3_step
2356 log("remap step", i
, reg
, idx
, steps
[i
][1])
2357 remap_idxs
= self
.remap_idxs
2359 # now cross-index the required SHAPE for each of 3-in 2-out regs
2360 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2361 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2362 (shape
, remap
) = remaps
[shape_idx
]
2363 remap_idx
= remap_idxs
[shape_idx
]
2364 # zero is "disabled"
2365 if shape
.value
== 0x0:
2367 # now set the actual requested step to the current index
2368 if dstep
is not None:
2369 yield dstep
.eq(remap_idx
)
2371 # debug printout info
2372 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2373 i
, rnames
[i
], shape_idx
, remap_idx
))
2375 log("shape remap", x
)
2377 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2378 if name
== 'overflow': # ignore, done already (above)
2380 if name
== 'CR0': # ignore, done already (above)
2382 if isinstance(output
, int):
2383 output
= SelectableInt(output
, 256)
2385 if name
in ['CA', 'CA32']:
2387 log("writing %s to XER" % name
, output
)
2388 log("write XER %s 0x%x" % (name
, output
.value
))
2389 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2391 log("NOT writing %s to XER" % name
, output
)
2393 # write special SPRs
2394 if name
in info
.special_regs
:
2395 log('writing special %s' % name
, output
, special_sprs
)
2396 log("write reg %s 0x%x" % (name
, output
.value
))
2397 if name
in special_sprs
:
2398 self
.spr
[name
] = output
2400 self
.namespace
[name
].eq(output
)
2402 log('msr written', hex(self
.msr
.value
))
2404 # find out1/out2 PR/FPR
2405 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2407 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2409 # temporary hack for not having 2nd output
2410 regnum
= yield getattr(self
.decoder
, name
)
2412 # convenient debug prefix
2417 # check zeroing due to predicate bit being zero
2418 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2419 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2420 output
= SelectableInt(0, 256)
2421 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2422 output
.value
, ew_dst
),
2423 kind
=LogKind
.InstrInOuts
)
2424 # zero-extend tov64 bit begore storing (should use EXT oh well)
2425 if output
.bits
> 64:
2426 output
= SelectableInt(output
.value
, 64)
2428 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2430 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2432 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2433 # check if it is the SVSTATE.src/dest step that needs incrementing
2434 # this is our Sub-Program-Counter loop from 0 to VL-1
2435 if not self
.allow_next_step_inc
:
2436 if self
.is_svp64_mode
:
2437 return (yield from self
.svstate_post_inc(ins_name
))
2439 # XXX only in non-SVP64 mode!
2440 # record state of whether the current operation was an svshape,
2442 # to be able to know if it should apply in the next instruction.
2443 # also (if going to use this instruction) should disable ability
2444 # to interrupt in between. sigh.
2445 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2452 log("SVSTATE_NEXT: inc requested, mode",
2453 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2454 yield from self
.svstate_pre_inc()
2455 pre
= yield from self
.update_new_svstate_steps()
2457 # reset at end of loop including exit Vertical Mode
2458 log("SVSTATE_NEXT: end of loop, reset")
2459 self
.svp64_reset_loop()
2460 self
.svstate
.vfirst
= 0
2464 self
.handle_comparison(SelectableInt(0, 64)) # CR0
2466 if self
.allow_next_step_inc
== 2:
2467 log("SVSTATE_NEXT: read")
2468 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
2470 log("SVSTATE_NEXT: post-inc")
2471 # use actual (cached) src/dst-step here to check end
2472 remaps
= self
.get_remap_indices()
2473 remap_idxs
= self
.remap_idxs
2474 vl
= self
.svstate
.vl
2475 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2476 if self
.allow_next_step_inc
!= 2:
2477 yield from self
.advance_svstate_steps()
2478 #self.namespace['SVSTATE'] = self.svstate.spr
2479 # set CR0 (if Rc=1) based on end
2480 endtest
= 1 if self
.at_loopend() else 0
2482 #results = [SelectableInt(endtest, 64)]
2483 # self.handle_comparison(results) # CR0
2485 # see if svstep was requested, if so, which SVSTATE
2487 if self
.svstate_next_mode
> 0:
2488 shape_idx
= self
.svstate_next_mode
.value
-1
2489 endings
= self
.remap_loopends
[shape_idx
]
2490 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
2491 log("svstep Rc=1, CR0", cr_field
, endtest
)
2492 self
.crl
[0].eq(cr_field
) # CR0
2494 # reset at end of loop including exit Vertical Mode
2495 log("SVSTATE_NEXT: after increments, reset")
2496 self
.svp64_reset_loop()
2497 self
.svstate
.vfirst
= 0
2500 def SVSTATE_NEXT(self
, mode
, submode
):
2501 """explicitly moves srcstep/dststep on to next element, for
2502 "Vertical-First" mode. this function is called from
2503 setvl pseudo-code, as a pseudo-op "svstep"
2505 WARNING: this function uses information that was created EARLIER
2506 due to it being in the middle of a yield, but this function is
2507 *NOT* called from yield (it's called from compiled pseudocode).
2509 self
.allow_next_step_inc
= submode
.value
+ 1
2510 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
2511 self
.svstate_next_mode
= mode
2512 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
2513 shape_idx
= self
.svstate_next_mode
.value
-1
2514 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
2515 if self
.svstate_next_mode
== 5:
2516 self
.svstate_next_mode
= 0
2517 return SelectableInt(self
.svstate
.srcstep
, 7)
2518 if self
.svstate_next_mode
== 6:
2519 self
.svstate_next_mode
= 0
2520 return SelectableInt(self
.svstate
.dststep
, 7)
2521 if self
.svstate_next_mode
== 7:
2522 self
.svstate_next_mode
= 0
2523 return SelectableInt(self
.svstate
.ssubstep
, 7)
2524 if self
.svstate_next_mode
== 8:
2525 self
.svstate_next_mode
= 0
2526 return SelectableInt(self
.svstate
.dsubstep
, 7)
2527 return SelectableInt(0, 7)
2529 def get_src_dststeps(self
):
2530 """gets srcstep, dststep, and ssubstep, dsubstep
2532 return (self
.new_srcstep
, self
.new_dststep
,
2533 self
.new_ssubstep
, self
.new_dsubstep
)
2535 def update_svstate_namespace(self
, overwrite_svstate
=True):
2536 if overwrite_svstate
:
2537 # note, do not get the bit-reversed srcstep here!
2538 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2539 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2541 # update SVSTATE with new srcstep
2542 self
.svstate
.srcstep
= srcstep
2543 self
.svstate
.dststep
= dststep
2544 self
.svstate
.ssubstep
= ssubstep
2545 self
.svstate
.dsubstep
= dsubstep
2546 self
.namespace
['SVSTATE'] = self
.svstate
2547 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2548 yield Settle() # let decoder update
2550 def update_new_svstate_steps(self
, overwrite_svstate
=True):
2551 yield from self
.update_svstate_namespace(overwrite_svstate
)
2552 srcstep
= self
.svstate
.srcstep
2553 dststep
= self
.svstate
.dststep
2554 ssubstep
= self
.svstate
.ssubstep
2555 dsubstep
= self
.svstate
.dsubstep
2556 pack
= self
.svstate
.pack
2557 unpack
= self
.svstate
.unpack
2558 vl
= self
.svstate
.vl
2559 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2560 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2561 rm_mode
= yield self
.dec2
.rm_dec
.mode
2562 ff_inv
= yield self
.dec2
.rm_dec
.inv
2563 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2564 log(" srcstep", srcstep
)
2565 log(" dststep", dststep
)
2567 log(" unpack", unpack
)
2568 log(" ssubstep", ssubstep
)
2569 log(" dsubstep", dsubstep
)
2571 log(" subvl", subvl
)
2572 log(" rm_mode", rm_mode
)
2573 log(" sv_mode", sv_mode
)
2575 log(" cr_bit", cr_bit
)
2577 # check if end reached (we let srcstep overrun, above)
2578 # nothing needs doing (TODO zeroing): just do next instruction
2581 return ((ssubstep
== subvl
and srcstep
== vl
) or
2582 (dsubstep
== subvl
and dststep
== vl
))
2584 def svstate_post_inc(self
, insn_name
, vf
=0):
2585 # check if SV "Vertical First" mode is enabled
2586 vfirst
= self
.svstate
.vfirst
2587 log(" SV Vertical First", vf
, vfirst
)
2588 if not vf
and vfirst
== 1:
2592 # check if it is the SVSTATE.src/dest step that needs incrementing
2593 # this is our Sub-Program-Counter loop from 0 to VL-1
2594 # XXX twin predication TODO
2595 vl
= self
.svstate
.vl
2596 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2597 mvl
= self
.svstate
.maxvl
2598 srcstep
= self
.svstate
.srcstep
2599 dststep
= self
.svstate
.dststep
2600 ssubstep
= self
.svstate
.ssubstep
2601 dsubstep
= self
.svstate
.dsubstep
2602 pack
= self
.svstate
.pack
2603 unpack
= self
.svstate
.unpack
2604 rm_mode
= yield self
.dec2
.rm_dec
.mode
2605 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
2606 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
2607 out_vec
= not (yield self
.dec2
.no_out_vec
)
2608 in_vec
= not (yield self
.dec2
.no_in_vec
)
2609 log(" svstate.vl", vl
)
2610 log(" svstate.mvl", mvl
)
2611 log(" rm.subvl", subvl
)
2612 log(" svstate.srcstep", srcstep
)
2613 log(" svstate.dststep", dststep
)
2614 log(" svstate.ssubstep", ssubstep
)
2615 log(" svstate.dsubstep", dsubstep
)
2616 log(" svstate.pack", pack
)
2617 log(" svstate.unpack", unpack
)
2618 log(" mode", rm_mode
)
2619 log(" reverse", reverse_gear
)
2620 log(" out_vec", out_vec
)
2621 log(" in_vec", in_vec
)
2622 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPType
.P2
.value
)
2623 # check if this was an sv.bc* and if so did it succeed
2624 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
2625 end_loop
= self
.namespace
['end_loop']
2626 log("branch %s end_loop" % insn_name
, end_loop
)
2628 self
.svp64_reset_loop()
2629 self
.update_pc_next()
2631 # check if srcstep needs incrementing by one, stop PC advancing
2632 # but for 2-pred both src/dest have to be checked.
2633 # XXX this might not be true! it may just be LD/ST
2634 if sv_ptype
== SVPType
.P2
.value
:
2635 svp64_is_vector
= (out_vec
or in_vec
)
2637 svp64_is_vector
= out_vec
2638 # loops end at the first "hit" (source or dest)
2639 yield from self
.advance_svstate_steps()
2640 loopend
= self
.loopend
2641 log("loopend", svp64_is_vector
, loopend
)
2642 if not svp64_is_vector
or loopend
:
2643 # reset loop to zero and update NIA
2644 self
.svp64_reset_loop()
2649 # still looping, advance and update NIA
2650 self
.namespace
['SVSTATE'] = self
.svstate
2652 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
2653 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
2654 # this way we keep repeating the same instruction (with new steps)
2655 self
.pc
.NIA
.value
= self
.pc
.CIA
.value
2656 self
.namespace
['NIA'] = self
.pc
.NIA
2657 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
2658 return False # DO NOT allow PC update whilst Sub-PC loop running
2660 def update_pc_next(self
):
2661 # UPDATE program counter
2662 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2663 #self.svstate.spr = self.namespace['SVSTATE']
2664 log("end of call", self
.namespace
['CIA'],
2665 self
.namespace
['NIA'],
2666 self
.namespace
['SVSTATE'])
2668 def svp64_reset_loop(self
):
2669 self
.svstate
.srcstep
= 0
2670 self
.svstate
.dststep
= 0
2671 self
.svstate
.ssubstep
= 0
2672 self
.svstate
.dsubstep
= 0
2673 self
.loopend
= False
2674 log(" svstate.srcstep loop end (PC to update)")
2675 self
.namespace
['SVSTATE'] = self
.svstate
2677 def update_nia(self
):
2678 self
.pc
.update_nia(self
.is_svp64_mode
)
2679 self
.namespace
['NIA'] = self
.pc
.NIA
2683 """Decorator factory.
2685 this decorator will "inject" variables into the function's namespace,
2686 from the *dictionary* in self.namespace. it therefore becomes possible
2687 to make it look like a whole stack of variables which would otherwise
2688 need "self." inserted in front of them (*and* for those variables to be
2689 added to the instance) "appear" in the function.
2691 "self.namespace['SI']" for example becomes accessible as just "SI" but
2692 *only* inside the function, when decorated.
2694 def variable_injector(func
):
2696 def decorator(*args
, **kwargs
):
2698 func_globals
= func
.__globals
__ # Python 2.6+
2699 except AttributeError:
2700 func_globals
= func
.func_globals
# Earlier versions.
2702 context
= args
[0].namespace
# variables to be injected
2703 saved_values
= func_globals
.copy() # Shallow copy of dict.
2704 log("globals before", context
.keys())
2705 func_globals
.update(context
)
2706 result
= func(*args
, **kwargs
)
2707 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
2708 log("args[0]", args
[0].namespace
['CIA'],
2709 args
[0].namespace
['NIA'],
2710 args
[0].namespace
['SVSTATE'])
2711 if 'end_loop' in func_globals
:
2712 log("args[0] end_loop", func_globals
['end_loop'])
2713 args
[0].namespace
= func_globals
2714 #exec (func.__code__, func_globals)
2717 # func_globals = saved_values # Undo changes.
2723 return variable_injector