From: Luke Kenneth Casson Leighton Date: Sun, 8 Aug 2021 21:06:45 +0000 (+0100) Subject: add start of SVP64ASM encoder for sv.bc and sv.bclr X-Git-Tag: xlen-bcd~151 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=4ad3c373c7fbd826447de0c64b558eeb2b53d174;p=openpower-isa.git add start of SVP64ASM encoder for sv.bc and sv.bclr TODO, sv.bca, sv.bclrl etc. --- diff --git a/src/openpower/consts.py b/src/openpower/consts.py index 747c9d12..f55eedc3 100644 --- a/src/openpower/consts.py +++ b/src/openpower/consts.py @@ -225,6 +225,7 @@ class SVP64MODEb: LDST_SHIFT = 2 # set =1 for shift mode # when predicate not set: 0=ignore/skip 1=zero DZ = 3 # for destination + BC_SNZ = 3 # for branch-conditional mode SZ = 4 # for source # reduce mode REDUCE = 2 # 0=normal predication 1=reduce mode diff --git a/src/openpower/sv/trans/svp64.py b/src/openpower/sv/trans/svp64.py index 2898a3d9..5b548764 100644 --- a/src/openpower/sv/trans/svp64.py +++ b/src/openpower/sv/trans/svp64.py @@ -514,7 +514,7 @@ class SVP64Asm: else: # range is CR0-CR127 in increments of 8 assert sv_extra & 0b11 == 0, \ - "vector CR %s cannot fit into EXTRA2 %s" % \ + "vector CR %s cannot fit into EXTRA3 %s" % \ (rname, str(extras[extra_idx])) # all good: encode as vector (bit 3 set) sv_extra = 0b100 | (sv_extra >> 2) @@ -582,6 +582,11 @@ class SVP64Asm: # see https://libre-soc.org/openpower/sv/ldst/ is_ldst = is_ld or is_st + # branch-conditional detection + is_bc = v30b_op in [ + "bc", "bclr", + ] + # parts of svp64_rm mmode = 0 # bit 0 pmask = 0 # bits 1-3 @@ -609,6 +614,16 @@ class SVP64Asm: failfirst = False ldst_elstride = 0 + # branch-conditional bits + bc_all = 0 + bc_lru = 0 + bc_brc = 0 + bc_svstep = 0 + bc_vsb = 0 + bc_vlset = 0 + bc_vli = 0 + bc_snz = 0 + # ok let's start identifying opcode augmentation fields for encmode in opmodes: # predicate mask (src and dest) @@ -685,6 +700,33 @@ class SVP64Asm: mapreduce_crm = True elif encmode == 'svm': # sub-vector mode mapreduce_svm = True + elif is_bc: + if encmode == 'all': + bc_all = 1 + elif encmode == 'st': # svstep mode + bc_step = 1 + elif encmode == 'sr': # svstep BRc mode + bc_step = 1 + bc_brc = 1 + elif encmode == 'vs': # VLSET mode + bc_vlset = 1 + elif encmode == 'vsi': # VLSET mode with VLI (VL inclusives) + bc_vlset = 1 + bc_vli = 1 + elif encmode == 'vsb': # VLSET mode with VSb + bc_vlset = 1 + bc_vsb = 1 + elif encmode == 'vsbi': # VLSET mode with VLI and VSb + bc_vlset = 1 + bc_vli = 1 + bc_vsb = 1 + elif encmode == 'snz': # sz (only) already set above + src_zero = 1 + bc_snz = 1 + elif encmode == 'lu': # LR update mode + bc_lru = 1 + else: + raise AssertionError("unknown encmode %s" % encmode) else: raise AssertionError("unknown encmode %s" % encmode) @@ -735,83 +777,92 @@ class SVP64Asm: assert sv_mode is None, \ "LD shift cannot have modes (%s) applied" % sv_mode - ###################################### - # "normal" mode - if sv_mode is None: - mode |= src_zero << SVP64MODE.SZ # predicate zeroing - mode |= dst_zero << SVP64MODE.DZ # predicate zeroing - if is_ldst: - # TODO: for now, LD/ST-indexed is ignored. - mode |= ldst_elstride << SVP64MODE.ELS_NORMAL # element-strided - # shifted mode - if ldst_shift: - mode |= 1 << SVP64MODE.LDST_SHIFT - else: - # TODO, reduce and subvector mode - # 00 1 dz CRM reduce mode (mapreduce), SUBVL=1 - # 00 1 SVM CRM subvector reduce mode, SUBVL>1 - pass - sv_mode = 0b00 - - ###################################### - # "mapreduce" modes - elif sv_mode == 0b00: - mode |= (0b1<1 + pass + sv_mode = 0b00 + + ###################################### + # "mapreduce" modes + elif sv_mode == 0b00: + mode |= (0b1<