#include "dw2gencfi.h"
#include "dwarf2dbg.h"
-/* Types of processor to assemble for. */
-#ifndef CPU_DEFAULT
-#define CPU_DEFAULT AARCH64_ARCH_V8
-#endif
-
#define streq(a, b) (strcmp (a, b) == 0)
#define END_OF_INSN '\0'
static const aarch64_feature_set *march_cpu_opt = NULL;
/* Constants for known architecture features. */
-static const aarch64_feature_set cpu_default = CPU_DEFAULT;
+static const aarch64_feature_set cpu_default = AARCH64_ARCH_FEATURES (V8A);
/* Currently active instruction sequence. */
static aarch64_instr_sequence *insn_sequence = NULL;
inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
}
-static inline void
-set_expected_error (unsigned int flags)
-{
- set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
- inst.parsing_error.data[0].i = flags;
-}
-
static inline void
set_syntax_error (const char *error)
{
BASIC_REG_TYPE(R_64) /* x[0-30] */ \
BASIC_REG_TYPE(SP_32) /* wsp */ \
BASIC_REG_TYPE(SP_64) /* sp */ \
- BASIC_REG_TYPE(Z_32) /* wzr */ \
- BASIC_REG_TYPE(Z_64) /* xzr */ \
+ BASIC_REG_TYPE(ZR_32) /* wzr */ \
+ BASIC_REG_TYPE(ZR_64) /* xzr */ \
BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
- BASIC_REG_TYPE(VN) /* v[0-31] */ \
- BASIC_REG_TYPE(ZN) /* z[0-31] */ \
- BASIC_REG_TYPE(PN) /* p[0-15] */ \
+ BASIC_REG_TYPE(V) /* v[0-31] */ \
+ BASIC_REG_TYPE(Z) /* z[0-31] */ \
+ BASIC_REG_TYPE(P) /* p[0-15] */ \
+ BASIC_REG_TYPE(PN) /* pn[0-15] */ \
BASIC_REG_TYPE(ZA) /* za */ \
BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
+ BASIC_REG_TYPE(ZT0) /* zt0 */ \
/* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
/* Typecheck: same, plus SVE registers. */ \
MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
- | REG_TYPE(ZN)) \
+ | REG_TYPE(Z)) \
/* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
- MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
- | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
+ MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
+ | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
/* Typecheck: same, plus SVE registers. */ \
MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
- | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
- | REG_TYPE(ZN)) \
+ | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
+ | REG_TYPE(Z)) \
/* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
| REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
/* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
- MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
+ MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
| REG_TYPE(SP_32) | REG_TYPE(SP_64) \
- | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
+ | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
/* Typecheck: any [BHSDQ]P FP. */ \
MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
| REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
/* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
- MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
- | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
+ MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
+ | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
| REG_TYPE(FP_B) | REG_TYPE(FP_H) \
| REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
/* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
be used for SVE instructions, since Zn and Pn are valid symbols \
in other contexts. */ \
- MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
+ MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
+ | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
+ | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
+ | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
+ | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
+ | REG_TYPE(Z) | REG_TYPE(P)) \
+ /* Likewise, but with predicate-as-counter registers added. */ \
+ MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP_PN, REG_TYPE(R_32) | REG_TYPE(R_64) \
| REG_TYPE(SP_32) | REG_TYPE(SP_64) \
- | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
+ | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
| REG_TYPE(FP_B) | REG_TYPE(FP_H) \
| REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
- | REG_TYPE(ZN) | REG_TYPE(PN)) \
+ | REG_TYPE(Z) | REG_TYPE(P) | REG_TYPE(PN)) \
/* Any integer register; used for error messages only. */ \
MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
| REG_TYPE(SP_32) | REG_TYPE(SP_64) \
- | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
+ | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
/* Any vector register. */ \
- MULTI_REG_TYPE(VZ, REG_TYPE(VN) | REG_TYPE(ZN)) \
+ MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
/* An SVE vector or predicate register. */ \
- MULTI_REG_TYPE(ZP, REG_TYPE(ZN) | REG_TYPE(PN)) \
+ MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
/* Any vector or predicate register. */ \
- MULTI_REG_TYPE(VZP, REG_TYPE(VN) | REG_TYPE(ZN) | REG_TYPE(PN)) \
+ MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
/* The whole of ZA or a single tile. */ \
MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
/* A horizontal or vertical slice of a ZA tile. */ \
| reg_type_masks[REG_TYPE_ZATHV])))
return N_("expected 'za' rather than a ZA tile at operand %d");
+ if ((mask & reg_type_masks[REG_TYPE_PN])
+ && (seen & reg_type_masks[REG_TYPE_P]))
+ return N_("expected a predicate-as-counter rather than predicate-as-mask"
+ " register at operand %d");
+
+ if ((mask & reg_type_masks[REG_TYPE_P])
+ && (seen & reg_type_masks[REG_TYPE_PN]))
+ return N_("expected a predicate-as-mask rather than predicate-as-counter"
+ " register at operand %d");
+
/* Integer, zero and stack registers. */
if (mask == reg_type_masks[REG_TYPE_R_64])
return N_("expected a 64-bit integer register at operand %d");
- if (mask == reg_type_masks[REG_TYPE_R_Z])
+ if (mask == reg_type_masks[REG_TYPE_R_ZR])
return N_("expected an integer or zero register at operand %d");
if (mask == reg_type_masks[REG_TYPE_R_SP])
return N_("expected an integer or stack pointer register at operand %d");
if (mask == reg_type_masks[REG_TYPE_BHSDQ])
return N_("expected a scalar SIMD or floating-point register"
" at operand %d");
- if (mask == reg_type_masks[REG_TYPE_VN])
+ if (mask == reg_type_masks[REG_TYPE_V])
return N_("expected an Advanced SIMD vector register at operand %d");
- if (mask == reg_type_masks[REG_TYPE_ZN])
+ if (mask == reg_type_masks[REG_TYPE_Z])
return N_("expected an SVE vector register at operand %d");
- if (mask == reg_type_masks[REG_TYPE_PN])
+ if (mask == reg_type_masks[REG_TYPE_P]
+ || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
+ /* Use this error for "predicate-as-mask only" and "either kind of
+ predicate". We report a more specific error if P is used where
+ PN is expected, and vice versa, so the issue at this point is
+ "predicate-like" vs. "not predicate-like". */
return N_("expected an SVE predicate register at operand %d");
+ if (mask == reg_type_masks[REG_TYPE_PN])
+ return N_("expected an SVE predicate-as-counter register at operand %d");
if (mask == reg_type_masks[REG_TYPE_VZ])
return N_("expected a vector register at operand %d");
if (mask == reg_type_masks[REG_TYPE_ZP])
if (mask == reg_type_masks[REG_TYPE_VZP])
return N_("expected a vector or predicate register at operand %d");
- /* ZA-related registers. */
+ /* SME-related registers. */
if (mask == reg_type_masks[REG_TYPE_ZA])
return N_("expected a ZA array vector at operand %d");
- if (mask == reg_type_masks[REG_TYPE_ZA_ZAT])
- return N_("expected 'za' or a ZA tile at operand %d");
+ if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
+ return N_("expected ZT0 or a ZA mask at operand %d");
if (mask == reg_type_masks[REG_TYPE_ZAT])
return N_("expected a ZA tile at operand %d");
if (mask == reg_type_masks[REG_TYPE_ZATHV])
return N_("expected a ZA tile slice at operand %d");
/* Integer and vector combos. */
- if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VN]))
+ if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
return N_("expected an integer register or Advanced SIMD vector register"
" at operand %d");
- if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_ZN]))
+ if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
return N_("expected an integer register or SVE vector register"
" at operand %d");
- if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZ]))
+ if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
return N_("expected an integer or vector register at operand %d");
- if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_PN]))
+ if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
return N_("expected an integer or predicate register at operand %d");
- if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZP]))
+ if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
return N_("expected an integer, vector or predicate register"
" at operand %d");
/* SVE and SME combos. */
- if (mask == (reg_type_masks[REG_TYPE_ZN] | reg_type_masks[REG_TYPE_ZATHV]))
+ if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
return N_("expected an SVE vector register or ZA tile slice"
" at operand %d");
{
case REG_TYPE_R_32:
case REG_TYPE_SP_32:
- case REG_TYPE_Z_32:
+ case REG_TYPE_ZR_32:
return AARCH64_OPND_QLF_W;
case REG_TYPE_R_64:
case REG_TYPE_SP_64:
- case REG_TYPE_Z_64:
+ case REG_TYPE_ZR_64:
return AARCH64_OPND_QLF_X;
case REG_TYPE_FP_B:
switch (reg->type)
{
- case REG_TYPE_ZN:
- if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
+ case REG_TYPE_Z:
+ if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
|| str[0] != '.')
return NULL;
switch (TOLOWER (str[1]))
break;
default:
- if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
+ if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
return NULL;
*qualifier = inherent_reg_qualifier (reg);
break;
static const reg_entry *
aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
{
- return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
+ return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
}
/* Parse the qualifier of a vector register or vector element of type
gas_assert (*ptr == '.');
ptr++;
- if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
+ if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
{
width = 0;
goto elt_size;
element_size = 64;
break;
case 'q':
- if (reg_type != REG_TYPE_VN || width == 1)
+ if (reg_type != REG_TYPE_V || width == 1)
{
type = NT_q;
element_size = 128;
{
switch (type)
{
- case REG_TYPE_VN:
- case REG_TYPE_ZN:
+ case REG_TYPE_V:
+ case REG_TYPE_Z:
case REG_TYPE_ZA:
case REG_TYPE_ZAT:
case REG_TYPE_ZATH:
case REG_TYPE_ZATV:
return ch == '.';
+ case REG_TYPE_P:
case REG_TYPE_PN:
return ch == '.' || ch == '/';
/* Register if of the form Vn.[bhsdq]. */
is_typed_vecreg = true;
- if (type != REG_TYPE_VN)
+ if (type != REG_TYPE_V)
{
/* The width is always variable; we don't allow an integer width
to be specified. */
if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
{
/* Reject Sn[index] syntax. */
- if (!is_typed_vecreg)
+ if (reg->type != REG_TYPE_Z
+ && reg->type != REG_TYPE_PN
+ && reg->type != REG_TYPE_ZT0
+ && !is_typed_vecreg)
{
first_error (_("this type of register can't be indexed"));
return NULL;
}
/* A vector reg Vn should be typed or indexed. */
- if (type == REG_TYPE_VN && atype.defined == 0)
+ if (type == REG_TYPE_V && atype.defined == 0)
{
first_error (_("invalid use of vector register"));
}
&& e1.index == e2.index);
}
+/* Return the register number mask for registers of type REG_TYPE. */
+
+static inline int
+reg_type_mask (aarch64_reg_type reg_type)
+{
+ return reg_type == REG_TYPE_P ? 15 : 31;
+}
+
/* This function parses a list of vector registers of type TYPE.
On success, it returns the parsed register list information in the
following encoded format:
char *str = *ccp;
int nb_regs;
struct vector_type_el typeinfo, typeinfo_first;
- int val, val_range;
+ uint32_t val, val_range, mask;
int in_range;
int ret_val;
- int i;
bool error = false;
bool expect_index = false;
unsigned int ptr_flags = PTR_IN_REGLIST;
typeinfo_first.element_size = 0;
typeinfo_first.index = 0;
ret_val = 0;
- val = -1;
- val_range = -1;
+ val = -1u;
+ val_range = -1u;
in_range = 0;
+ mask = reg_type_mask (type);
do
{
if (in_range)
}
val = reg->number;
/* reject [bhsd]n */
- if (type == REG_TYPE_VN && typeinfo.defined == 0)
+ if (type == REG_TYPE_V && typeinfo.defined == 0)
{
set_first_syntax_error (_("invalid scalar register in list"));
error = true;
if (in_range)
{
- if (val < val_range)
+ if (val == val_range)
{
set_first_syntax_error
(_("invalid range in vector register list"));
error = true;
}
- val_range++;
+ val_range = (val_range + 1) & mask;
}
else
{
}
}
if (! error)
- for (i = val_range; i <= val; i++)
+ for (;;)
{
- ret_val |= i << (5 * nb_regs);
+ ret_val |= val_range << ((5 * nb_regs) & 31);
nb_regs++;
+ if (val_range == val)
+ break;
+ val_range = (val_range + 1) & mask;
}
in_range = 0;
ptr_flags |= PTR_GOOD_MATCH;
return false;
}
- if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
+ if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
{
- set_expected_reg_error (REG_TYPE_R_Z, reg, 0);
+ set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
return false;
}
{
aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
- REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
+ REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
}
/* Parse an address in which SVE vector registers and MUL VL are allowed.
/* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
of SIZE tokens in which index I gives the token for field value I,
- or is null if field value I is invalid. REG_TYPE says which register
- names should be treated as registers rather than as symbolic immediates.
+ or is null if field value I is invalid. If the symbolic operand
+ can also be given as a 0-based integer, REG_TYPE says which register
+ names should be treated as registers rather than as symbolic immediates
+ while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
Return true on success, moving *STR past the operand and storing the
field value in *VAL. */
return true;
}
+ if (reg_type == REG_TYPE_MAX)
+ return false;
+
if (!parse_immediate_expression (&p, &exp, reg_type))
return false;
return false;
}
+ if (skip_past_char (str, ':'))
+ {
+ int64_t end;
+ if (!parse_sme_immediate (str, &end))
+ {
+ set_syntax_error (_("expected a constant immediate offset"));
+ return false;
+ }
+ if (end < opnd->index.imm)
+ {
+ set_syntax_error (_("the last offset is less than the"
+ " first offset"));
+ return false;
+ }
+ if (end == opnd->index.imm)
+ {
+ set_syntax_error (_("the last offset is equal to the"
+ " first offset"));
+ return false;
+ }
+ opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
+ }
+
+ opnd->group_size = 0;
+ if (skip_past_char (str, ','))
+ {
+ if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
+ {
+ *str += 4;
+ opnd->group_size = 2;
+ }
+ else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
+ {
+ *str += 4;
+ opnd->group_size = 4;
+ }
+ else
+ {
+ set_syntax_error (_("invalid vector group size"));
+ return false;
+ }
+ }
+
if (!skip_past_char (str, ']'))
{
set_syntax_error (_("expected ']'"));
"name '%s'"), buf);
if (!pstatefield_p
&& !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
- o->value, o->flags, o->features))
+ o->value, o->flags,
+ &o->features))
as_bad (_("selected processor does not support system register "
"name '%s'"), buf);
if (aarch64_sys_reg_deprecated_p (o->flags))
goto failure; \
} while (0)
+#define po_strict_enum_or_fail(array) do { \
+ if (!parse_enum_string (&str, &val, array, \
+ ARRAY_SIZE (array), REG_TYPE_MAX)) \
+ goto failure; \
+ } while (0)
+
#define po_misc_or_fail(expr) do { \
if (!expr) \
goto failure; \
} while (0)
\f
+/* A primitive log calculator. */
+
+static inline unsigned int
+get_log2 (unsigned int n)
+{
+ unsigned int count = 0;
+ while (n > 1)
+ {
+ n >>= 1;
+ count += 1;
+ }
+ return count;
+}
+
/* encode the 12-bit imm field of Add/sub immediate */
static inline uint32_t
encode_addsub_imm (uint32_t imm)
"AARCH64_OPDE_SYNTAX_ERROR",
"AARCH64_OPDE_FATAL_SYNTAX_ERROR",
"AARCH64_OPDE_INVALID_VARIANT",
- "AARCH64_OPDE_REG_LIST",
+ "AARCH64_OPDE_INVALID_VG_SIZE",
+ "AARCH64_OPDE_REG_LIST_LENGTH",
+ "AARCH64_OPDE_REG_LIST_STRIDE",
"AARCH64_OPDE_UNTIED_IMMS",
"AARCH64_OPDE_UNTIED_OPERAND",
"AARCH64_OPDE_OUT_OF_RANGE",
"AARCH64_OPDE_UNALIGNED",
"AARCH64_OPDE_OTHER_ERROR",
+ "AARCH64_OPDE_INVALID_REGNO",
};
#endif /* DEBUG_AARCH64 */
gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
- gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_INVALID_VARIANT);
- gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST);
+ gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
+ gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
+ gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
+ gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
- gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
+ gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
+ gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
return lhs > rhs;
}
detail->index + 1, str);
break;
+ case AARCH64_OPDE_INVALID_REGNO:
+ handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
+ detail->data[0].s, detail->data[1].i,
+ detail->data[0].s, detail->data[2].i, idx + 1, str);
+ break;
+
case AARCH64_OPDE_OUT_OF_RANGE:
if (detail->data[0].i != detail->data[1].i)
handler (_("%s out of range %d to %d at operand %d -- `%s'"),
detail->data[0].i, idx + 1, str);
break;
- case AARCH64_OPDE_REG_LIST:
- if (detail->data[0].i == 1)
- handler (_("invalid number of registers in the list; "
- "only 1 register is expected at operand %d -- `%s'"),
+ case AARCH64_OPDE_INVALID_VG_SIZE:
+ if (detail->data[0].i == 0)
+ handler (_("unexpected vector group size at operand %d -- `%s'"),
idx + 1, str);
else
- handler (_("invalid number of registers in the list; "
- "%d registers are expected at operand %d -- `%s'"),
- detail->data[0].i, idx + 1, str);
+ handler (_("operand %d must have a vector group size of %d -- `%s'"),
+ idx + 1, detail->data[0].i, str);
+ break;
+
+ case AARCH64_OPDE_REG_LIST_LENGTH:
+ if (detail->data[0].i == (1 << 1))
+ handler (_("expected a single-register list at operand %d -- `%s'"),
+ idx + 1, str);
+ else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
+ handler (_("expected a list of %d registers at operand %d -- `%s'"),
+ get_log2 (detail->data[0].i), idx + 1, str);
+ else if (detail->data[0].i == 0x14)
+ handler (_("expected a list of %d or %d registers at"
+ " operand %d -- `%s'"),
+ 2, 4, idx + 1, str);
+ else
+ handler (_("invalid number of registers in the list"
+ " at operand %d -- `%s'"), idx + 1, str);
+ break;
+
+ case AARCH64_OPDE_REG_LIST_STRIDE:
+ if (detail->data[0].i == (1 << 1))
+ handler (_("the register list must have a stride of %d"
+ " at operand %d -- `%s'"), 1, idx + 1, str);
+ else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
+ handler (_("the register list must have a stride of %d or %d"
+ " at operand %d -- `%s`"), 1,
+ detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
+ else
+ handler (_("invalid register stride at operand %d -- `%s'"),
+ idx + 1, str);
break;
case AARCH64_OPDE_UNALIGNED:
}
}
+/* Return true if the presence of error A against an instruction means
+ that error B should not be reported. This is only used as a first pass,
+ to pick the kind of error that we should report. */
+
+static bool
+better_error_p (operand_error_record *a, operand_error_record *b)
+{
+ /* For errors reported during parsing, prefer errors that relate to
+ later operands, since that implies that the earlier operands were
+ syntactically valid.
+
+ For example, if we see a register R instead of an immediate in
+ operand N, we'll report that as a recoverable "immediate operand
+ required" error. This is because there is often another opcode
+ entry that accepts a register operand N, and any errors about R
+ should be reported against the register forms of the instruction.
+ But if no such register form exists, the recoverable error should
+ still win over a syntax error against operand N-1.
+
+ For these purposes, count an error reported at the end of the
+ assembly string as equivalent to an error reported against the
+ final operand. This means that opcode entries that expect more
+ operands win over "unexpected characters following instruction". */
+ if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
+ && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
+ {
+ int a_index = (a->detail.index < 0
+ ? aarch64_num_of_operands (a->opcode) - 1
+ : a->detail.index);
+ int b_index = (b->detail.index < 0
+ ? aarch64_num_of_operands (b->opcode) - 1
+ : b->detail.index);
+ if (a_index != b_index)
+ return a_index > b_index;
+ }
+ return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
+}
+
/* Process and output the error message about the operand mismatching.
When this function is called, the operand error information had
enum aarch64_operand_error_kind kind;
operand_error_record *curr;
operand_error_record *head = operand_error_report.head;
- operand_error_record *record = NULL;
+ operand_error_record *record;
/* No error to report. */
if (head == NULL)
/* Find the error kind of the highest severity. */
DEBUG_TRACE ("multiple opcode entries with error kind");
- kind = AARCH64_OPDE_NIL;
+ record = NULL;
for (curr = head; curr != NULL; curr = curr->next)
{
gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
curr->detail.data[0].i, curr->detail.data[1].i,
curr->detail.data[2].i);
}
+ else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
+ || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
+ {
+ DEBUG_TRACE ("\t%s [%x]",
+ operand_mismatch_kind_names[curr->detail.kind],
+ curr->detail.data[0].i);
+ }
else
{
DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
}
- if (operand_error_higher_severity_p (curr->detail.kind, kind)
- && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
- kind = curr->detail.kind;
+ if ((!non_fatal_only || curr->detail.non_fatal)
+ && (!record || better_error_p (curr, record)))
+ record = curr;
}
+ kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
/* Pick up one of errors of KIND to report. */
+ record = NULL;
for (curr = head; curr != NULL; curr = curr->next)
{
/* If we don't want to print non-fatal errors then don't consider them
curr->detail.data[0].i, curr->detail.data[1].i,
curr->detail.data[2].i);
}
+ else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
+ || kind == AARCH64_OPDE_REG_LIST_STRIDE)
+ {
+ record->detail.data[0].i |= curr->detail.data[0].i;
+ DEBUG_TRACE ("\t--> %s [%x]",
+ operand_mismatch_kind_names[kind],
+ curr->detail.data[0].i);
+ }
+ /* Pick the variant with the cloest match. */
+ else if (kind == AARCH64_OPDE_INVALID_VARIANT
+ && record->detail.data[0].i > curr->detail.data[0].i)
+ record = curr;
}
}
return true;
}
-/* A primitive log calculator. */
-
-static inline unsigned int
-get_logsz (unsigned int size)
-{
- const unsigned char ls[16] =
- {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
- if (size > 16)
- {
- gas_assert (0);
- return -1;
- }
- gas_assert (ls[size - 1] != (unsigned char)-1);
- return ls[size - 1];
-}
-
/* Determine and return the real reloc type code for an instruction
with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
1, opd0_qlf, 0);
gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
- logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
+ logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
|| inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
}
-/* Check whether a register list REGINFO is valid. The registers must be
- numbered in increasing order (modulo 32), in increments of one or two.
+/* Check whether a register list REGINFO is valid. The registers have type
+ REG_TYPE and must be numbered in increasing order (modulo the register
+ bank size). They must have a consistent stride.
- If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
- increments of two.
-
- Return FALSE if such a register list is invalid, otherwise return TRUE. */
+ Return true if the list is valid, describing it in LIST if so. */
static bool
-reg_list_valid_p (uint32_t reginfo, int accept_alternate)
+reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
+ aarch64_reg_type reg_type)
{
- uint32_t i, nb_regs, prev_regno, incr;
+ uint32_t i, nb_regs, prev_regno, incr, mask;
+ mask = reg_type_mask (reg_type);
nb_regs = 1 + (reginfo & 0x3);
reginfo >>= 2;
prev_regno = reginfo & 0x1f;
- incr = accept_alternate ? 2 : 1;
+ incr = 1;
+
+ list->first_regno = prev_regno;
+ list->num_regs = nb_regs;
for (i = 1; i < nb_regs; ++i)
{
- uint32_t curr_regno;
+ uint32_t curr_regno, curr_incr;
reginfo >>= 5;
curr_regno = reginfo & 0x1f;
- if (curr_regno != ((prev_regno + incr) & 0x1f))
+ curr_incr = (curr_regno - prev_regno) & mask;
+ if (curr_incr == 0)
+ return false;
+ else if (i == 1)
+ incr = curr_incr;
+ else if (curr_incr != incr)
return false;
prev_regno = curr_regno;
}
+ list->stride = incr;
return true;
}
clear_error ();
skip_whitespace (str);
- if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
- AARCH64_FEATURE_SVE
- | AARCH64_FEATURE_SVE2))
- imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
+ if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SME2))
+ imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP_PN;
+ else if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
+ || AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2))
+ imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
else
- imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
+ imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
{
case AARCH64_OPND_Rt_SYS:
case AARCH64_OPND_PAIRREG:
case AARCH64_OPND_SVE_Rm:
- po_int_fp_reg_or_fail (REG_TYPE_R_Z);
+ po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
/* In LS64 load/store instructions Rt register number must be even
and <=22. */
case AARCH64_OPND_SVE_Pn:
case AARCH64_OPND_SVE_Pt:
case AARCH64_OPND_SME_Pm:
- reg_type = REG_TYPE_PN;
+ reg_type = REG_TYPE_P;
goto vector_reg;
case AARCH64_OPND_SVE_Za_5:
case AARCH64_OPND_SVE_Zm_16:
case AARCH64_OPND_SVE_Zn:
case AARCH64_OPND_SVE_Zt:
- reg_type = REG_TYPE_ZN;
+ case AARCH64_OPND_SME_Zm:
+ reg_type = REG_TYPE_Z;
+ goto vector_reg;
+
+ case AARCH64_OPND_SVE_PNd:
+ case AARCH64_OPND_SVE_PNg4_10:
+ case AARCH64_OPND_SVE_PNn:
+ case AARCH64_OPND_SVE_PNt:
+ case AARCH64_OPND_SME_PNd3:
+ case AARCH64_OPND_SME_PNg3:
+ case AARCH64_OPND_SME_PNn:
+ reg_type = REG_TYPE_PN;
goto vector_reg;
case AARCH64_OPND_Va:
case AARCH64_OPND_Vd:
case AARCH64_OPND_Vn:
case AARCH64_OPND_Vm:
- reg_type = REG_TYPE_VN;
+ reg_type = REG_TYPE_V;
vector_reg:
reg = aarch64_reg_parse (&str, reg_type, &vectype);
if (!reg)
goto failure;
info->reg.regno = reg->number;
- if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
+ if ((reg_type == REG_TYPE_P
+ || reg_type == REG_TYPE_PN
+ || reg_type == REG_TYPE_Z)
&& vectype.type == NT_invtype)
- /* Unqualified Pn and Zn registers are allowed in certain
+ /* Unqualified P and Z registers are allowed in certain
contexts. Rely on F_STRICT qualifier checking to catch
invalid uses. */
info->qualifier = AARCH64_OPND_QLF_NIL;
case AARCH64_OPND_VdD1:
case AARCH64_OPND_VnD1:
- reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
+ reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
if (!reg)
goto failure;
if (vectype.type != NT_d || vectype.index != 1)
case AARCH64_OPND_SVE_Zm3_INDEX:
case AARCH64_OPND_SVE_Zm3_22_INDEX:
+ case AARCH64_OPND_SVE_Zm3_19_INDEX:
case AARCH64_OPND_SVE_Zm3_11_INDEX:
case AARCH64_OPND_SVE_Zm4_11_INDEX:
case AARCH64_OPND_SVE_Zm4_INDEX:
case AARCH64_OPND_SVE_Zn_INDEX:
- reg_type = REG_TYPE_ZN;
+ case AARCH64_OPND_SME_Zm_INDEX1:
+ case AARCH64_OPND_SME_Zm_INDEX2:
+ case AARCH64_OPND_SME_Zm_INDEX3_1:
+ case AARCH64_OPND_SME_Zm_INDEX3_2:
+ case AARCH64_OPND_SME_Zm_INDEX3_10:
+ case AARCH64_OPND_SME_Zm_INDEX4_1:
+ case AARCH64_OPND_SME_Zm_INDEX4_10:
+ case AARCH64_OPND_SME_Zn_INDEX1_16:
+ case AARCH64_OPND_SME_Zn_INDEX2_15:
+ case AARCH64_OPND_SME_Zn_INDEX2_16:
+ case AARCH64_OPND_SME_Zn_INDEX3_14:
+ case AARCH64_OPND_SME_Zn_INDEX3_15:
+ case AARCH64_OPND_SME_Zn_INDEX4_14:
+ reg_type = REG_TYPE_Z;
goto vector_reg_index;
case AARCH64_OPND_Ed:
case AARCH64_OPND_Em:
case AARCH64_OPND_Em16:
case AARCH64_OPND_SM3_IMM2:
- reg_type = REG_TYPE_VN;
+ reg_type = REG_TYPE_V;
vector_reg_index:
reg = aarch64_reg_parse (&str, reg_type, &vectype);
if (!reg)
goto failure;
- if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
+ if (!(vectype.defined & NTA_HASINDEX))
goto failure;
+ if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
+ /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
+ info->qualifier = AARCH64_OPND_QLF_NIL;
+ else
+ {
+ if (vectype.type == NT_invtype)
+ goto failure;
+ info->qualifier = vectype_to_qualifier (&vectype);
+ if (info->qualifier == AARCH64_OPND_QLF_NIL)
+ goto failure;
+ }
+
info->reglane.regno = reg->number;
info->reglane.index = vectype.index;
- info->qualifier = vectype_to_qualifier (&vectype);
- if (info->qualifier == AARCH64_OPND_QLF_NIL)
- goto failure;
break;
case AARCH64_OPND_SVE_ZnxN:
case AARCH64_OPND_SVE_ZtxN:
- reg_type = REG_TYPE_ZN;
+ case AARCH64_OPND_SME_Zdnx2:
+ case AARCH64_OPND_SME_Zdnx4:
+ case AARCH64_OPND_SME_Zmx2:
+ case AARCH64_OPND_SME_Zmx4:
+ case AARCH64_OPND_SME_Znx2:
+ case AARCH64_OPND_SME_Znx4:
+ case AARCH64_OPND_SME_Ztx2_STRIDED:
+ case AARCH64_OPND_SME_Ztx4_STRIDED:
+ reg_type = REG_TYPE_Z;
+ goto vector_reg_list;
+
+ case AARCH64_OPND_SME_Pdx2:
+ case AARCH64_OPND_SME_PdxN:
+ reg_type = REG_TYPE_P;
goto vector_reg_list;
case AARCH64_OPND_LVn:
case AARCH64_OPND_LVt:
case AARCH64_OPND_LVt_AL:
case AARCH64_OPND_LEt:
- reg_type = REG_TYPE_VN;
+ reg_type = REG_TYPE_V;
vector_reg_list:
- if (reg_type == REG_TYPE_ZN
+ if (reg_type == REG_TYPE_Z
&& get_opcode_dependent_value (opcode) == 1
&& *str != '{')
{
goto failure;
info->reglist.first_regno = reg->number;
info->reglist.num_regs = 1;
+ info->reglist.stride = 1;
}
else
{
if (val == PARSE_FAIL)
goto failure;
- if (! reg_list_valid_p (val, /* accept_alternate */ 0))
+ if (! reg_list_valid_p (val, &info->reglist, reg_type))
{
set_fatal_syntax_error (_("invalid register list"));
goto failure;
}
- if (vectype.width != 0 && *str != ',')
+ if ((int) vectype.width > 0 && *str != ',')
{
set_fatal_syntax_error
(_("expected element type rather than vector type"));
goto failure;
}
-
- info->reglist.first_regno = (val >> 2) & 0x1f;
- info->reglist.num_regs = (val & 0x3) + 1;
}
if (operands[i] == AARCH64_OPND_LEt)
{
goto failure;
if (!(vectype.defined & NTA_HASTYPE))
{
- if (reg_type == REG_TYPE_ZN)
+ if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
set_fatal_syntax_error (_("missing type suffix"));
goto failure;
}
case AARCH64_OPND_SVE_SHLIMM_PRED:
case AARCH64_OPND_SVE_SHLIMM_UNPRED:
case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
+ case AARCH64_OPND_SME_SHRIMM4:
+ case AARCH64_OPND_SME_SHRIMM5:
case AARCH64_OPND_SVE_SHRIMM_PRED:
case AARCH64_OPND_SVE_SHRIMM_UNPRED:
case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
case AARCH64_OPND_IMM_MOV:
{
char *saved = str;
- if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
- reg_name_p (str, REG_TYPE_VN))
+ if (reg_name_p (str, REG_TYPE_R_ZR_SP)
+ || reg_name_p (str, REG_TYPE_V))
goto failure;
str = saved;
po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
break;
case AARCH64_OPND_SME_PnT_Wm_imm:
- if (!parse_dual_indexed_reg (&str, REG_TYPE_PN,
+ if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
&info->indexed_za, &qualifier, 0))
goto failure;
info->qualifier = qualifier;
inst.base.operands[i].prfop = aarch64_prfops + val;
break;
+ case AARCH64_OPND_RPRFMOP:
+ po_enum_or_fail (aarch64_rprfmop_array);
+ info->imm.value = val;
+ break;
+
case AARCH64_OPND_BARRIER_PSB:
val = parse_barrier_psb (&str, &(info->hint_option));
if (val == PARSE_FAIL)
goto failure;
break;
+ case AARCH64_OPND_SME_ZT0:
+ po_reg_or_fail (REG_TYPE_ZT0);
+ break;
+
+ case AARCH64_OPND_SME_ZT0_INDEX:
+ reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
+ if (!reg || vectype.type != NT_invtype)
+ goto failure;
+ if (!(vectype.defined & NTA_HASINDEX))
+ {
+ set_syntax_error (_("missing register index"));
+ goto failure;
+ }
+ info->imm.value = vectype.index;
+ break;
+
+ case AARCH64_OPND_SME_ZT0_LIST:
+ if (*str != '{')
+ {
+ set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
+ goto failure;
+ }
+ str++;
+ if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
+ goto failure;
+ if (*str != '}')
+ {
+ set_syntax_error (_("expected '}' after ZT0"));
+ goto failure;
+ }
+ str++;
+ break;
+
+ case AARCH64_OPND_SME_PNn3_INDEX1:
+ case AARCH64_OPND_SME_PNn3_INDEX2:
+ reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
+ if (!reg)
+ goto failure;
+ if (!(vectype.defined & NTA_HASINDEX))
+ {
+ set_syntax_error (_("missing register index"));
+ goto failure;
+ }
+ info->reglane.regno = reg->number;
+ info->reglane.index = vectype.index;
+ if (vectype.type == NT_invtype)
+ info->qualifier = AARCH64_OPND_QLF_NIL;
+ else
+ info->qualifier = vectype_to_qualifier (&vectype);
+ break;
+
case AARCH64_OPND_BTI_TARGET:
val = parse_bti_operand (&str, &(info->hint_option));
if (val == PARSE_FAIL)
break;
case AARCH64_OPND_SME_ZA_HV_idx_src:
+ case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
case AARCH64_OPND_SME_ZA_HV_idx_dest:
+ case AARCH64_OPND_SME_ZA_HV_idx_destxN:
case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
? !parse_sme_za_hv_tiles_operand_with_braces (&str,
info->imm.value = val;
break;
- case AARCH64_OPND_SME_ZA_array:
+ case AARCH64_OPND_SME_ZA_array_off1x4:
+ case AARCH64_OPND_SME_ZA_array_off2x2:
+ case AARCH64_OPND_SME_ZA_array_off2x4:
+ case AARCH64_OPND_SME_ZA_array_off3_0:
+ case AARCH64_OPND_SME_ZA_array_off3_5:
+ case AARCH64_OPND_SME_ZA_array_off3x2:
+ case AARCH64_OPND_SME_ZA_array_off4:
if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
&info->indexed_za, &qualifier, 0))
goto failure;
info->qualifier = qualifier;
break;
+ case AARCH64_OPND_SME_VLxN_10:
+ case AARCH64_OPND_SME_VLxN_13:
+ po_strict_enum_or_fail (aarch64_sme_vlxn_array);
+ info->imm.value = val;
+ break;
+
case AARCH64_OPND_MOPS_ADDR_Rd:
case AARCH64_OPND_MOPS_ADDR_Rs:
po_char_or_fail ('[');
&& do_encode (inst_base->opcode, &inst.base, &inst_base->value))
{
/* Check that this instruction is supported for this CPU. */
- if (!opcode->avariant
- || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
+ if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
{
as_bad (_("selected processor does not support `%s'"), str);
return;
REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
- REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
- REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
+ REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
+ REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
/* Floating-point single precision registers. */
REGSET (s, FP_S), REGSET (S, FP_S),
REGSET (q, FP_Q), REGSET (Q, FP_Q),
/* FP/SIMD registers. */
- REGSET (v, VN), REGSET (V, VN),
+ REGSET (v, V), REGSET (V, V),
/* SVE vector registers. */
- REGSET (z, ZN), REGSET (Z, ZN),
+ REGSET (z, Z), REGSET (Z, Z),
+
+ /* SVE predicate(-as-mask) registers. */
+ REGSET16 (p, P), REGSET16 (P, P),
- /* SVE predicate registers. */
- REGSET16 (p, PN), REGSET16 (P, PN),
+ /* SVE predicate-as-counter registers. */
+ REGSET16 (pn, PN), REGSET16 (PN, PN),
/* SME ZA. We model this as a register because it acts syntactically
like ZA0H, supporting qualifier suffixes and indexing. */
REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
/* SME ZA tile registers (vertical slice). */
- REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
+ REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
+
+ /* SME2 ZT0. */
+ REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
};
#undef REGDEF
/* This list should, at a minimum, contain all the cpu names
recognized by GCC. */
static const struct aarch64_cpu_option_table aarch64_cpus[] = {
- {"all", AARCH64_ANY, NULL},
- {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC), "Cortex-A34"},
- {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC), "Cortex-A35"},
- {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC), "Cortex-A53"},
- {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC), "Cortex-A57"},
- {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC), "Cortex-A72"},
- {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC), "Cortex-A73"},
- {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
+ {"all", AARCH64_ALL_FEATURES, NULL},
+ {"cortex-a34", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A34"},
+ {"cortex-a35", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A35"},
+ {"cortex-a53", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A53"},
+ {"cortex-a57", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A57"},
+ {"cortex-a72", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A72"},
+ {"cortex-a73", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A73"},
+ {"cortex-a55", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
"Cortex-A55"},
- {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
+ {"cortex-a75", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
"Cortex-A75"},
- {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
+ {"cortex-a76", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
"Cortex-A76"},
- {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS),
- "Cortex-A76AE"},
- {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS),
- "Cortex-A77"},
- {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS),
- "Cortex-A65"},
- {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS),
- "Cortex-A65AE"},
- {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_F16
- | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS
- | AARCH64_FEATURE_PROFILE),
- "Cortex-A78"},
- {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_F16
- | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS
- | AARCH64_FEATURE_PROFILE),
- "Cortex-A78AE"},
- {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_F16
- | AARCH64_FEATURE_FLAGM
- | AARCH64_FEATURE_PAC
- | AARCH64_FEATURE_PROFILE
- | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_SSBS),
- "Cortex-A78C"},
- {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
- AARCH64_FEATURE_BFLOAT16
- | AARCH64_FEATURE_I8MM
- | AARCH64_FEATURE_MEMTAG
- | AARCH64_FEATURE_SVE2_BITPERM),
- "Cortex-A510"},
- {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
- AARCH64_FEATURE_BFLOAT16
- | AARCH64_FEATURE_I8MM
- | AARCH64_FEATURE_MEMTAG
- | AARCH64_FEATURE_SVE2_BITPERM),
- "Cortex-A710"},
- {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_PROFILE),
- "Ares"},
- {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
- "Samsung Exynos M1"},
- {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
- | AARCH64_FEATURE_RDMA),
- "Qualcomm Falkor"},
- {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS),
- "Neoverse E1"},
- {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_PROFILE),
- "Neoverse N1"},
- {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
- AARCH64_FEATURE_BFLOAT16
- | AARCH64_FEATURE_I8MM
- | AARCH64_FEATURE_F16
- | AARCH64_FEATURE_SVE
- | AARCH64_FEATURE_SVE2
- | AARCH64_FEATURE_SVE2_BITPERM
- | AARCH64_FEATURE_MEMTAG
- | AARCH64_FEATURE_RNG),
- "Neoverse N2"},
- {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
- AARCH64_FEATURE_PROFILE
- | AARCH64_FEATURE_CVADP
- | AARCH64_FEATURE_SVE
- | AARCH64_FEATURE_SSBS
- | AARCH64_FEATURE_RNG
- | AARCH64_FEATURE_F16
- | AARCH64_FEATURE_BFLOAT16
- | AARCH64_FEATURE_I8MM), "Neoverse V1"},
- {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
- | AARCH64_FEATURE_RDMA),
- "Qualcomm QDF24XX"},
- {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
- AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
- "Qualcomm Saphira"},
- {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
- "Cavium ThunderX"},
- {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
- AARCH64_FEATURE_CRYPTO),
- "Broadcom Vulcan"},
+ {"cortex-a76ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
+ SSBS), "Cortex-A76AE"},
+ {"cortex-a77", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
+ SSBS), "Cortex-A77"},
+ {"cortex-a65", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
+ SSBS), "Cortex-A65"},
+ {"cortex-a65ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
+ SSBS), "Cortex-A65AE"},
+ {"cortex-a78", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
+ SSBS, PROFILE), "Cortex-A78"},
+ {"cortex-a78ae", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
+ SSBS, PROFILE), "Cortex-A78AE"},
+ {"cortex-a78c", AARCH64_CPU_FEATURES (V8_2A, 7, DOTPROD, F16, FLAGM,
+ PAC, PROFILE, RCPC, SSBS),
+ "Cortex-A78C"},
+ {"cortex-a510", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
+ SVE2_BITPERM), "Cortex-A510"},
+ {"cortex-a520", AARCH64_CPU_FEATURES (V9_2A, 2, MEMTAG, SVE2_BITPERM),
+ "Cortex-A520"},
+ {"cortex-a710", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
+ SVE2_BITPERM), "Cortex-A710"},
+ {"cortex-a720", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
+ SVE2_BITPERM), "Cortex-A720"},
+ {"ares", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
+ PROFILE), "Ares"},
+ {"exynos-m1", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
+ "Samsung Exynos M1"},
+ {"falkor", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
+ "Qualcomm Falkor"},
+ {"neoverse-e1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
+ SSBS), "Neoverse E1"},
+ {"neoverse-n1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
+ PROFILE), "Neoverse N1"},
+ {"neoverse-n2", AARCH64_CPU_FEATURES (V8_5A, 8, BFLOAT16, I8MM, F16,
+ SVE, SVE2, SVE2_BITPERM, MEMTAG,
+ RNG), "Neoverse N2"},
+ {"neoverse-v1", AARCH64_CPU_FEATURES (V8_4A, 8, PROFILE, CVADP, SVE,
+ SSBS, RNG, F16, BFLOAT16, I8MM),
+ "Neoverse V1"},
+ {"qdf24xx", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
+ "Qualcomm QDF24XX"},
+ {"saphira", AARCH64_CPU_FEATURES (V8_4A, 3, SHA2, AES, PROFILE),
+ "Qualcomm Saphira"},
+ {"thunderx", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
+ "Cavium ThunderX"},
+ {"vulcan", AARCH64_CPU_FEATURES (V8_1A, 2, SHA2, AES),
+ "Broadcom Vulcan"},
/* The 'xgene-1' name is an older name for 'xgene1', which was used
in earlier releases and is superseded by 'xgene1' in all
tools. */
- {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
- {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
- {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
- AARCH64_FEATURE_CRC), "APM X-Gene 2"},
- {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
- {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
- AARCH64_FEATURE_F16
- | AARCH64_FEATURE_RCPC
- | AARCH64_FEATURE_DOTPROD
- | AARCH64_FEATURE_SSBS
- | AARCH64_FEATURE_PROFILE),
- "Cortex-X1"},
- {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
- AARCH64_FEATURE_BFLOAT16
- | AARCH64_FEATURE_I8MM
- | AARCH64_FEATURE_MEMTAG
- | AARCH64_FEATURE_SVE2_BITPERM),
- "Cortex-X2"},
- {"generic", AARCH64_ARCH_V8, NULL},
-
- {NULL, AARCH64_ARCH_NONE, NULL}
+ {"xgene-1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
+ {"xgene1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
+ {"xgene2", AARCH64_CPU_FEATURES (V8A, 1, CRC), "APM X-Gene 2"},
+ {"cortex-r82", AARCH64_ARCH_FEATURES (V8R), "Cortex-R82"},
+ {"cortex-x1", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
+ SSBS, PROFILE), "Cortex-X1"},
+ {"cortex-x2", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
+ SVE2_BITPERM), "Cortex-X2"},
+ {"generic", AARCH64_ARCH_FEATURES (V8A), NULL},
+
+ {NULL, AARCH64_NO_FEATURES, NULL}
};
struct aarch64_arch_option_table
/* This list should, at a minimum, contain all the architecture names
recognized by GCC. */
static const struct aarch64_arch_option_table aarch64_archs[] = {
- {"all", AARCH64_ANY},
- {"armv8-a", AARCH64_ARCH_V8},
- {"armv8.1-a", AARCH64_ARCH_V8_1},
- {"armv8.2-a", AARCH64_ARCH_V8_2},
- {"armv8.3-a", AARCH64_ARCH_V8_3},
- {"armv8.4-a", AARCH64_ARCH_V8_4},
- {"armv8.5-a", AARCH64_ARCH_V8_5},
- {"armv8.6-a", AARCH64_ARCH_V8_6},
- {"armv8.7-a", AARCH64_ARCH_V8_7},
- {"armv8.8-a", AARCH64_ARCH_V8_8},
- {"armv8-r", AARCH64_ARCH_V8_R},
- {"armv9-a", AARCH64_ARCH_V9},
- {"armv9.1-a", AARCH64_ARCH_V9_1},
- {"armv9.2-a", AARCH64_ARCH_V9_2},
- {"armv9.3-a", AARCH64_ARCH_V9_3},
- {NULL, AARCH64_ARCH_NONE}
+ {"all", AARCH64_ALL_FEATURES},
+ {"armv8-a", AARCH64_ARCH_FEATURES (V8A)},
+ {"armv8.1-a", AARCH64_ARCH_FEATURES (V8_1A)},
+ {"armv8.2-a", AARCH64_ARCH_FEATURES (V8_2A)},
+ {"armv8.3-a", AARCH64_ARCH_FEATURES (V8_3A)},
+ {"armv8.4-a", AARCH64_ARCH_FEATURES (V8_4A)},
+ {"armv8.5-a", AARCH64_ARCH_FEATURES (V8_5A)},
+ {"armv8.6-a", AARCH64_ARCH_FEATURES (V8_6A)},
+ {"armv8.7-a", AARCH64_ARCH_FEATURES (V8_7A)},
+ {"armv8.8-a", AARCH64_ARCH_FEATURES (V8_8A)},
+ {"armv8-r", AARCH64_ARCH_FEATURES (V8R)},
+ {"armv9-a", AARCH64_ARCH_FEATURES (V9A)},
+ {"armv9.1-a", AARCH64_ARCH_FEATURES (V9_1A)},
+ {"armv9.2-a", AARCH64_ARCH_FEATURES (V9_2A)},
+ {"armv9.3-a", AARCH64_ARCH_FEATURES (V9_3A)},
+ {NULL, AARCH64_NO_FEATURES}
};
/* ISA extensions. */
};
static const struct aarch64_option_cpu_value_table aarch64_features[] = {
- {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
- AARCH64_ARCH_NONE},
- {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
- {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
- AARCH64_ARCH_NONE},
- {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
- AARCH64_ARCH_NONE},
- {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
- {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
- AARCH64_ARCH_NONE},
- {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
- AARCH64_ARCH_NONE},
- {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
- AARCH64_ARCH_NONE},
- {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
- {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
- {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
- {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
- AARCH64_ARCH_NONE},
- {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
- {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
- AARCH64_ARCH_NONE},
- {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_F16
- | AARCH64_FEATURE_SIMD, 0)},
- {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
- AARCH64_ARCH_NONE},
- {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
- {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
- {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
- AARCH64_ARCH_NONE},
- {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
- AARCH64_ARCH_NONE},
- {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
- {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
- {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
- {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
- AARCH64_ARCH_NONE},
- {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
- AARCH64_ARCH_NONE},
- {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
- AARCH64_ARCH_NONE},
- {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
- {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE2
- | AARCH64_FEATURE_SM4, 0)},
- {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE2
- | AARCH64_FEATURE_AES, 0)},
- {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE2
- | AARCH64_FEATURE_SHA3, 0)},
- {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
- {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE2
- | AARCH64_FEATURE_BFLOAT16, 0)},
- {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
- {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
- {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
- {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
- {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
- {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
- {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
- {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
- AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
- {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
- AARCH64_ARCH_NONE},
- {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
- AARCH64_ARCH_NONE},
- {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
- AARCH64_ARCH_NONE},
- {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
- AARCH64_ARCH_NONE},
- {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
- AARCH64_ARCH_NONE},
- {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
- AARCH64_ARCH_NONE},
- {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
+ {"crc", AARCH64_FEATURE (CRC), AARCH64_NO_FEATURES},
+ {"crypto", AARCH64_FEATURES (2, AES, SHA2),
+ AARCH64_FEATURE (SIMD)},
+ {"fp", AARCH64_FEATURE (FP), AARCH64_NO_FEATURES},
+ {"lse", AARCH64_FEATURE (LSE), AARCH64_NO_FEATURES},
+ {"simd", AARCH64_FEATURE (SIMD), AARCH64_FEATURE (FP)},
+ {"pan", AARCH64_FEATURE (PAN), AARCH64_NO_FEATURES},
+ {"lor", AARCH64_FEATURE (LOR), AARCH64_NO_FEATURES},
+ {"ras", AARCH64_FEATURE (RAS), AARCH64_NO_FEATURES},
+ {"rdma", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
+ {"fp16", AARCH64_FEATURE (F16), AARCH64_FEATURE (FP)},
+ {"fp16fml", AARCH64_FEATURE (F16_FML), AARCH64_FEATURE (F16)},
+ {"profile", AARCH64_FEATURE (PROFILE), AARCH64_NO_FEATURES},
+ {"sve", AARCH64_FEATURE (SVE), AARCH64_FEATURE (COMPNUM)},
+ {"tme", AARCH64_FEATURE (TME), AARCH64_NO_FEATURES},
+ {"compnum", AARCH64_FEATURE (COMPNUM),
+ AARCH64_FEATURES (2, F16, SIMD)},
+ {"rcpc", AARCH64_FEATURE (RCPC), AARCH64_NO_FEATURES},
+ {"dotprod", AARCH64_FEATURE (DOTPROD), AARCH64_FEATURE (SIMD)},
+ {"sha2", AARCH64_FEATURE (SHA2), AARCH64_FEATURE (FP)},
+ {"sb", AARCH64_FEATURE (SB), AARCH64_NO_FEATURES},
+ {"predres", AARCH64_FEATURE (PREDRES), AARCH64_NO_FEATURES},
+ {"aes", AARCH64_FEATURE (AES), AARCH64_FEATURE (SIMD)},
+ {"sm4", AARCH64_FEATURE (SM4), AARCH64_FEATURE (SIMD)},
+ {"sha3", AARCH64_FEATURE (SHA3), AARCH64_FEATURE (SHA2)},
+ {"rng", AARCH64_FEATURE (RNG), AARCH64_NO_FEATURES},
+ {"ssbs", AARCH64_FEATURE (SSBS), AARCH64_NO_FEATURES},
+ {"memtag", AARCH64_FEATURE (MEMTAG), AARCH64_NO_FEATURES},
+ {"sve2", AARCH64_FEATURE (SVE2), AARCH64_FEATURE (SVE)},
+ {"sve2-sm4", AARCH64_FEATURE (SVE2_SM4),
+ AARCH64_FEATURES (2, SVE2, SM4)},
+ {"sve2-aes", AARCH64_FEATURE (SVE2_AES),
+ AARCH64_FEATURES (2, SVE2, AES)},
+ {"sve2-sha3", AARCH64_FEATURE (SVE2_SHA3),
+ AARCH64_FEATURES (2, SVE2, SHA3)},
+ {"sve2-bitperm", AARCH64_FEATURE (SVE2_BITPERM),
+ AARCH64_FEATURE (SVE2)},
+ {"sme", AARCH64_FEATURE (SME),
+ AARCH64_FEATURES (2, SVE2, BFLOAT16)},
+ {"sme-f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
+ {"sme-f64f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
+ {"sme-i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
+ {"sme-i16i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
+ {"sme2", AARCH64_FEATURE (SME2), AARCH64_FEATURE (SME)},
+ {"bf16", AARCH64_FEATURE (BFLOAT16), AARCH64_FEATURE (FP)},
+ {"i8mm", AARCH64_FEATURE (I8MM), AARCH64_FEATURE (SIMD)},
+ {"f32mm", AARCH64_FEATURE (F32MM), AARCH64_FEATURE (SVE)},
+ {"f64mm", AARCH64_FEATURE (F64MM), AARCH64_FEATURE (SVE)},
+ {"ls64", AARCH64_FEATURE (LS64), AARCH64_NO_FEATURES},
+ {"flagm", AARCH64_FEATURE (FLAGM), AARCH64_NO_FEATURES},
+ {"pauth", AARCH64_FEATURE (PAC), AARCH64_NO_FEATURES},
+ {"mops", AARCH64_FEATURE (MOPS), AARCH64_NO_FEATURES},
+ {"hbc", AARCH64_FEATURE (HBC), AARCH64_NO_FEATURES},
+ {"cssc", AARCH64_FEATURE (CSSC), AARCH64_NO_FEATURES},
+ {NULL, AARCH64_NO_FEATURES, AARCH64_NO_FEATURES},
};
struct aarch64_long_option_table
aarch64_feature_disable_set (aarch64_feature_set set)
{
const struct aarch64_option_cpu_value_table *opt;
- aarch64_feature_set prev = 0;
+ aarch64_feature_set prev = AARCH64_NO_FEATURES;
- while (prev != set) {
- prev = set;
- for (opt = aarch64_features; opt->name != NULL; opt++)
- if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
- AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
- }
+ while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
+ {
+ prev = set;
+ for (opt = aarch64_features; opt->name != NULL; opt++)
+ if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
+ AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
+ }
return set;
}
aarch64_feature_enable_set (aarch64_feature_set set)
{
const struct aarch64_option_cpu_value_table *opt;
- aarch64_feature_set prev = 0;
+ aarch64_feature_set prev = AARCH64_NO_FEATURES;
- while (prev != set) {
- prev = set;
- for (opt = aarch64_features; opt->name != NULL; opt++)
- if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
- AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
- }
+ while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
+ {
+ prev = set;
+ for (opt = aarch64_features; opt->name != NULL; opt++)
+ if (AARCH64_CPU_HAS_ALL_FEATURES (set, opt->value))
+ AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
+ }
return set;
}
else
{
set = aarch64_feature_disable_set (opt->value);
- AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
+ AARCH64_CLEAR_FEATURES (*ext_set, *ext_set, set);
}
break;
}