Zvbb is part of the vector crypto extensions.
This extension adds the following instructions:
- vandn.[vv,vx]
- vbrev.v
- vbrev8.v
- vrev8.v
- vclz.v
- vctz.v
- vcpop.v
- vrol.[vv,vx]
- vror.[vv,vx,vi]
- vwsll.[vv,vx,vi]
bfd/ChangeLog:
* elfxx-riscv.c (riscv_multi_subset_supports): Add instruction
class support for Zvbb.
(riscv_multi_subset_supports_ext): Likewise.
gas/ChangeLog:
* config/tc-riscv.c (validate_riscv_insn): Add 'l' as new format
string directive.
(riscv_ip): Likewise.
* testsuite/gas/riscv/zvbb.d: New test.
* testsuite/gas/riscv/zvbb.s: New test.
include/ChangeLog:
* opcode/riscv-opc.h (MATCH_VANDN_VV): New.
(MASK_VANDN_VV): New.
(MATCH_VANDN_VX): New.
(MASK_VANDN_VX): New.
(MATCH_VBREV8_V): New.
(MASK_VBREV8_V): New.
(MATCH_VBREV_V): New.
(MASK_VBREV_V): New.
(MATCH_VCLZ_V): New.
(MASK_VCLZ_V): New.
(MATCH_VCPOP_V): New.
(MASK_VCPOP_V): New.
(MATCH_VCTZ_V): New.
(MASK_VCTZ_V): New.
(MATCH_VREV8_V): New.
(MASK_VREV8_V): New.
(MATCH_VROL_VV): New.
(MASK_VROL_VV): New.
(MATCH_VROL_VX): New.
(MASK_VROL_VX): New.
(MATCH_VROR_VI): New.
(MASK_VROR_VI): New.
(MATCH_VROR_VV): New.
(MASK_VROR_VV): New.
(MATCH_VROR_VX): New.
(MASK_VROR_VX): New.
(MATCH_VWSLL_VI): New.
(MASK_VWSLL_VI): New.
(MATCH_VWSLL_VV): New.
(MASK_VWSLL_VV): New.
(MATCH_VWSLL_VX): New.
(MASK_VWSLL_VX): New.
(DECLARE_INSN): New.
* opcode/riscv.h (EXTRACT_RVV_VI_UIMM6): New.
(ENCODE_RVV_VI_UIMM6): New.
(enum riscv_insn_class): Add instruction class for Zvbb.
opcodes/ChangeLog:
* riscv-dis.c (print_insn_args): Add 'l' as new format string
directive.
* riscv-opc.c: Add Zvbb instructions.
Signed-off-by: Nathan Huckleberry <nhuck@google.com>
Signed-off-by: Christoph Müllner <christoph.muellner@vrull.eu>
{"zve64x", ISA_SPEC_CLASS_DRAFT, 1, 0, 0 },
{"zve64f", ISA_SPEC_CLASS_DRAFT, 1, 0, 0 },
{"zve64d", ISA_SPEC_CLASS_DRAFT, 1, 0, 0 },
+ {"zvbb", ISA_SPEC_CLASS_DRAFT, 1, 0, 0 },
{"zvl32b", ISA_SPEC_CLASS_DRAFT, 1, 0, 0 },
{"zvl64b", ISA_SPEC_CLASS_DRAFT, 1, 0, 0 },
{"zvl128b", ISA_SPEC_CLASS_DRAFT, 1, 0, 0 },
|| riscv_subset_supports (rps, "zve64d")
|| riscv_subset_supports (rps, "zve64f")
|| riscv_subset_supports (rps, "zve32f"));
+ case INSN_CLASS_ZVBB:
+ return riscv_subset_supports (rps, "zvbb");
case INSN_CLASS_SVINVAL:
return riscv_subset_supports (rps, "svinval");
case INSN_CLASS_H:
return _("v' or `zve64x' or `zve32x");
case INSN_CLASS_ZVEF:
return _("v' or `zve64d' or `zve64f' or `zve32f");
+ case INSN_CLASS_ZVBB:
+ return _("zvbb");
case INSN_CLASS_SVINVAL:
return "svinval";
case INSN_CLASS_H:
case 'i':
case 'j':
case 'k': USE_BITS (OP_MASK_VIMM, OP_SH_VIMM); break;
+ case 'l': used_bits |= ENCODE_RVV_VI_UIMM6 (-1U); break;
case 'm': USE_BITS (OP_MASK_VMASK, OP_SH_VMASK); break;
case 'M': break; /* Macro operand, must be a mask register. */
case 'T': break; /* Macro operand, must be a vector register. */
asarg = expr_parse_end;
continue;
+ case 'l': /* 6-bit vector arith unsigned immediate */
+ my_getExpression (imm_expr, asarg);
+ check_absolute_expr (ip, imm_expr, FALSE);
+ if (imm_expr->X_add_number < 0
+ || imm_expr->X_add_number >= 64)
+ as_bad (_("bad value for vector immediate field, "
+ "value must be 0...63"));
+ ip->insn_opcode |= ENCODE_RVV_VI_UIMM6 (imm_expr->X_add_number);
+ imm_expr->X_op = O_absent;
+ asarg = expr_parse_end;
+ continue;
+
case 'm': /* optional vector mask */
if (*asarg == '\0')
{
--- /dev/null
+#as: -march=rv64gc_zvbb
+#objdump: -dr
+
+.*:[ ]+file format .*
+
+
+Disassembly of section .text:
+0+000 <.text>:
+[ ]+[0-9a-f]+:[ ]+06860257[ ]+vandn.vv[ ]+v4,v8,v12
+[ ]+[0-9a-f]+:[ ]+04860257[ ]+vandn.vv[ ]+v4,v8,v12,v0.t
+[ ]+[0-9a-f]+:[ ]+0685c257[ ]+vandn.vx[ ]+v4,v8,a1
+[ ]+[0-9a-f]+:[ ]+0485c257[ ]+vandn.vx[ ]+v4,v8,a1,v0.t
+[ ]+[0-9a-f]+:[ ]+4a852257[ ]+vbrev.v[ ]+v4,v8
+[ ]+[0-9a-f]+:[ ]+48852257[ ]+vbrev.v[ ]+v4,v8,v0.t
+[ ]+[0-9a-f]+:[ ]+4a842257[ ]+vbrev8.v[ ]+v4,v8
+[ ]+[0-9a-f]+:[ ]+48842257[ ]+vbrev8.v[ ]+v4,v8,v0.t
+[ ]+[0-9a-f]+:[ ]+4a84a257[ ]+vrev8.v[ ]+v4,v8
+[ ]+[0-9a-f]+:[ ]+4884a257[ ]+vrev8.v[ ]+v4,v8,v0.t
+[ ]+[0-9a-f]+:[ ]+4a84a257[ ]+vrev8.v[ ]+v4,v8
+[ ]+[0-9a-f]+:[ ]+4884a257[ ]+vrev8.v[ ]+v4,v8,v0.t
+[ ]+[0-9a-f]+:[ ]+4a862257[ ]+vclz.v[ ]+v4,v8
+[ ]+[0-9a-f]+:[ ]+48862257[ ]+vclz.v[ ]+v4,v8,v0.t
+[ ]+[0-9a-f]+:[ ]+4a86a257[ ]+vctz.v[ ]+v4,v8
+[ ]+[0-9a-f]+:[ ]+4886a257[ ]+vctz.v[ ]+v4,v8,v0.t
+[ ]+[0-9a-f]+:[ ]+4a872257[ ]+vcpop.v[ ]+v4,v8
+[ ]+[0-9a-f]+:[ ]+48872257[ ]+vcpop.v[ ]+v4,v8,v0.t
+[ ]+[0-9a-f]+:[ ]+56860257[ ]+vrol.vv[ ]+v4,v8,v12
+[ ]+[0-9a-f]+:[ ]+54860257[ ]+vrol.vv[ ]+v4,v8,v12,v0.t
+[ ]+[0-9a-f]+:[ ]+5685c257[ ]+vrol.vx[ ]+v4,v8,a1
+[ ]+[0-9a-f]+:[ ]+5485c257[ ]+vrol.vx[ ]+v4,v8,a1,v0.t
+[ ]+[0-9a-f]+:[ ]+52860257[ ]+vror.vv[ ]+v4,v8,v12
+[ ]+[0-9a-f]+:[ ]+50860257[ ]+vror.vv[ ]+v4,v8,v12,v0.t
+[ ]+[0-9a-f]+:[ ]+5285c257[ ]+vror.vx[ ]+v4,v8,a1
+[ ]+[0-9a-f]+:[ ]+5085c257[ ]+vror.vx[ ]+v4,v8,a1,v0.t
+[ ]+[0-9a-f]+:[ ]+52803257[ ]+vror.vi[ ]+v4,v8,0
+[ ]+[0-9a-f]+:[ ]+548fb257[ ]+vror.vi[ ]+v4,v8,63,v0.t
+[ ]+[0-9a-f]+:[ ]+d6860257[ ]+vwsll.vv[ ]+v4,v8,v12
+[ ]+[0-9a-f]+:[ ]+d4860257[ ]+vwsll.vv[ ]+v4,v8,v12,v0.t
+[ ]+[0-9a-f]+:[ ]+d685c257[ ]+vwsll.vx[ ]+v4,v8,a1
+[ ]+[0-9a-f]+:[ ]+d485c257[ ]+vwsll.vx[ ]+v4,v8,a1,v0.t
+[ ]+[0-9a-f]+:[ ]+d6803257[ ]+vwsll.vi[ ]+v4,v8,0
+[ ]+[0-9a-f]+:[ ]+d48fb257[ ]+vwsll.vi[ ]+v4,v8,31,v0.t
+
--- /dev/null
+ vandn.vv v4, v8, v12
+ vandn.vv v4, v8, v12, v0.t
+ vandn.vx v4, v8, a1
+ vandn.vx v4, v8, a1, v0.t
+ vbrev.v v4, v8
+ vbrev.v v4, v8, v0.t
+ vbrev8.v v4, v8
+ vbrev8.v v4, v8, v0.t
+ vrev8.v v4, v8
+ vrev8.v v4, v8, v0.t
+ vrev8.v v4, v8
+ vrev8.v v4, v8, v0.t
+ vclz.v v4, v8
+ vclz.v v4, v8, v0.t
+ vctz.v v4, v8
+ vctz.v v4, v8, v0.t
+ vcpop.v v4, v8
+ vcpop.v v4, v8, v0.t
+ vrol.vv v4, v8, v12
+ vrol.vv v4, v8, v12, v0.t
+ vrol.vx v4, v8, a1
+ vrol.vx v4, v8, a1, v0.t
+ vror.vv v4, v8, v12
+ vror.vv v4, v8, v12, v0.t
+ vror.vx v4, v8, a1
+ vror.vx v4, v8, a1, v0.t
+ vror.vi v4, v8, 0
+ vror.vi v4, v8, 63, v0.t
+ vwsll.vv v4, v8, v12
+ vwsll.vv v4, v8, v12, v0.t
+ vwsll.vx v4, v8, a1
+ vwsll.vx v4, v8, a1, v0.t
+ vwsll.vi v4, v8, 0
+ vwsll.vi v4, v8, 31, v0.t
#define MASK_VDOTUVV 0xfc00707f
#define MATCH_VFDOTVV 0xe4001057
#define MASK_VFDOTVV 0xfc00707f
+/* Zvbb instructions. */
+#define MATCH_VANDN_VV 0x4000057
+#define MASK_VANDN_VV 0xfc00707f
+#define MATCH_VANDN_VX 0x4004057
+#define MASK_VANDN_VX 0xfc00707f
+#define MATCH_VBREV8_V 0x48042057
+#define MASK_VBREV8_V 0xfc0ff07f
+#define MATCH_VBREV_V 0x48052057
+#define MASK_VBREV_V 0xfc0ff07f
+#define MATCH_VCLZ_V 0x48062057
+#define MASK_VCLZ_V 0xfc0ff07f
+#define MATCH_VCPOP_V 0x48072057
+#define MASK_VCPOP_V 0xfc0ff07f
+#define MATCH_VCTZ_V 0x4806a057
+#define MASK_VCTZ_V 0xfc0ff07f
+#define MATCH_VREV8_V 0x4804a057
+#define MASK_VREV8_V 0xfc0ff07f
+#define MATCH_VROL_VV 0x54000057
+#define MASK_VROL_VV 0xfc00707f
+#define MATCH_VROL_VX 0x54004057
+#define MASK_VROL_VX 0xfc00707f
+#define MATCH_VROR_VI 0x50003057
+#define MASK_VROR_VI 0xf800707f
+#define MATCH_VROR_VV 0x50000057
+#define MASK_VROR_VV 0xfc00707f
+#define MATCH_VROR_VX 0x50004057
+#define MASK_VROR_VX 0xfc00707f
+#define MATCH_VWSLL_VI 0xd4003057
+#define MASK_VWSLL_VI 0xfc00707f
+#define MATCH_VWSLL_VV 0xd4000057
+#define MASK_VWSLL_VV 0xfc00707f
+#define MATCH_VWSLL_VX 0xd4004057
+#define MASK_VWSLL_VX 0xfc00707f
/* Svinval instruction. */
#define MATCH_SINVAL_VMA 0x16000073
#define MASK_SINVAL_VMA 0xfe007fff
/* Zawrs instructions. */
DECLARE_INSN(wrs_nto, MATCH_WRS_NTO, MASK_WRS_NTO)
DECLARE_INSN(wrs_sto, MATCH_WRS_STO, MASK_WRS_STO)
+/* Zvbb instructions. */
+DECLARE_INSN(vandn_vv, MATCH_VANDN_VV, MASK_VANDN_VV)
+DECLARE_INSN(vandn_vx, MATCH_VANDN_VX, MASK_VANDN_VX)
+DECLARE_INSN(vbrev8_v, MATCH_VBREV8_V, MASK_VBREV8_V)
+DECLARE_INSN(vbrev_v, MATCH_VBREV_V, MASK_VBREV_V)
+DECLARE_INSN(vclz_v, MATCH_VCLZ_V, MASK_VCLZ_V)
+DECLARE_INSN(vcpop_v, MATCH_VCPOP_V, MASK_VCPOP_V)
+DECLARE_INSN(vctz_v, MATCH_VCTZ_V, MASK_VCTZ_V)
+DECLARE_INSN(vrev8_v, MATCH_VREV8_V, MASK_VREV8_V)
+DECLARE_INSN(vrol_vv, MATCH_VROL_VV, MASK_VROL_VV)
+DECLARE_INSN(vrol_vx, MATCH_VROL_VX, MASK_VROL_VX)
+DECLARE_INSN(vror_vi, MATCH_VROR_VI, MASK_VROR_VI)
+DECLARE_INSN(vror_vv, MATCH_VROR_VV, MASK_VROR_VV)
+DECLARE_INSN(vror_vx, MATCH_VROR_VX, MASK_VROR_VX)
+DECLARE_INSN(vwsll_vi, MATCH_VWSLL_VI, MASK_VWSLL_VI)
+DECLARE_INSN(vwsll_vv, MATCH_VWSLL_VV, MASK_VWSLL_VV)
+DECLARE_INSN(vwsll_vx, MATCH_VWSLL_VX, MASK_VWSLL_VX)
/* Vendor-specific (T-Head) XTheadBa instructions. */
DECLARE_INSN(th_addsl, MATCH_TH_ADDSL, MASK_TH_ADDSL)
/* Vendor-specific (T-Head) XTheadBb instructions. */
(RV_X(x, 15, 5) | (-RV_X(x, 19, 1) << 5))
#define EXTRACT_RVV_VI_UIMM(x) \
(RV_X(x, 15, 5))
+#define EXTRACT_RVV_VI_UIMM6(x) \
+ (RV_X(x, 15, 5) | (RV_X(x, 26, 1) << 5))
#define EXTRACT_RVV_OFFSET(x) \
(RV_X(x, 29, 3))
#define EXTRACT_RVV_VB_IMM(x) \
(RV_X(x, 0, 10) << 20)
#define ENCODE_RVV_VC_IMM(x) \
(RV_X(x, 0, 11) << 20)
+#define ENCODE_RVV_VI_UIMM6(x) \
+ (RV_X(x, 0, 5) << 15 | RV_X(x, 5, 1) << 26)
#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
INSN_CLASS_ZKND_OR_ZKNE,
INSN_CLASS_V,
INSN_CLASS_ZVEF,
+ INSN_CLASS_ZVBB,
INSN_CLASS_SVINVAL,
INSN_CLASS_ZICBOM,
INSN_CLASS_ZICBOP,
print (info->stream, dis_style_immediate, "%d",
(int)EXTRACT_RVV_OFFSET (l));
break;
+ case 'l':
+ print (info->stream, dis_style_immediate, "%d",
+ (int)EXTRACT_RVV_VI_UIMM6 (l));
+ break;
case 'm':
if (!EXTRACT_OPERAND (VMASK, l))
{
{"vmv4r.v", 0, INSN_CLASS_V, "Vd,Vt", MATCH_VMV4RV, MASK_VMV4RV, match_opcode, 0},
{"vmv8r.v", 0, INSN_CLASS_V, "Vd,Vt", MATCH_VMV8RV, MASK_VMV8RV, match_opcode, 0},
+/* Zvbb instructions. */
+{"vandn.vv", 0, INSN_CLASS_ZVBB, "Vd,Vt,VsVm", MATCH_VANDN_VV, MASK_VANDN_VV, match_opcode, 0},
+{"vandn.vx", 0, INSN_CLASS_ZVBB, "Vd,Vt,sVm", MATCH_VANDN_VX, MASK_VANDN_VX, match_opcode, 0},
+{"vbrev.v", 0, INSN_CLASS_ZVBB, "Vd,VtVm", MATCH_VBREV_V, MASK_VBREV_V, match_opcode, 0},
+{"vbrev8.v", 0, INSN_CLASS_ZVBB, "Vd,VtVm", MATCH_VBREV8_V, MASK_VBREV8_V, match_opcode, 0},
+{"vrev8.v", 0, INSN_CLASS_ZVBB, "Vd,VtVm", MATCH_VREV8_V, MASK_VREV8_V, match_opcode, 0},
+{"vclz.v", 0, INSN_CLASS_ZVBB, "Vd,VtVm", MATCH_VCLZ_V, MASK_VCLZ_V, match_opcode, 0},
+{"vctz.v", 0, INSN_CLASS_ZVBB, "Vd,VtVm", MATCH_VCTZ_V, MASK_VCTZ_V, match_opcode, 0},
+{"vcpop.v", 0, INSN_CLASS_ZVBB, "Vd,VtVm", MATCH_VCPOP_V, MASK_VCPOP_V, match_opcode, 0},
+{"vrol.vv", 0, INSN_CLASS_ZVBB, "Vd,Vt,VsVm", MATCH_VROL_VV, MASK_VROL_VV, match_opcode, 0},
+{"vrol.vx", 0, INSN_CLASS_ZVBB, "Vd,Vt,sVm", MATCH_VROL_VX, MASK_VROL_VX, match_opcode, 0},
+{"vror.vv", 0, INSN_CLASS_ZVBB, "Vd,Vt,VsVm", MATCH_VROR_VV, MASK_VROR_VV, match_opcode, 0},
+{"vror.vx", 0, INSN_CLASS_ZVBB, "Vd,Vt,sVm", MATCH_VROR_VX, MASK_VROR_VX, match_opcode, 0},
+{"vror.vi", 0, INSN_CLASS_ZVBB, "Vd,Vt,VlVm", MATCH_VROR_VI, MASK_VROR_VI, match_opcode, 0},
+{"vwsll.vv", 0, INSN_CLASS_ZVBB, "Vd,Vt,VsVm", MATCH_VWSLL_VV, MASK_VWSLL_VV, match_opcode, 0},
+{"vwsll.vx", 0, INSN_CLASS_ZVBB, "Vd,Vt,sVm", MATCH_VWSLL_VX, MASK_VWSLL_VX, match_opcode, 0},
+{"vwsll.vi", 0, INSN_CLASS_ZVBB, "Vd,Vt,VjVm", MATCH_VWSLL_VI, MASK_VWSLL_VI, match_opcode, 0},
+
/* Supervisor instructions. */
{"csrr", 0, INSN_CLASS_ZICSR, "d,E", MATCH_CSRRS, MASK_CSRRS|MASK_RS1, match_opcode, INSN_ALIAS },
{"csrw", 0, INSN_CLASS_ZICSR, "E,s", MATCH_CSRRW, MASK_CSRRW|MASK_RD, match_opcode, INSN_ALIAS },