/* aarch64-opc.c -- AArch64 opcode support.
- Copyright (C) 2009-2020 Free Software Foundation, Inc.
+ Copyright (C) 2009-2023 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of the GNU opcodes library.
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
-#include "bfd_stdint.h"
+#include <stdint.h>
#include <stdarg.h>
#include <inttypes.h>
#include "aarch64-opc.h"
#ifdef DEBUG_AARCH64
-int debug_dump = FALSE;
+int debug_dump = false;
#endif /* DEBUG_AARCH64 */
/* The enumeration strings associated with each value of a 5-bit SVE
0
};
+/* The enumeration strings associated with each value of a 6-bit RPRFM
+ operation. */
+const char *const aarch64_rprfmop_array[64] = {
+ "pldkeep",
+ "pstkeep",
+ 0,
+ 0,
+ "pldstrm",
+ "pststrm"
+};
+
+/* Vector length multiples for a predicate-as-counter operand. Used in things
+ like AARCH64_OPND_SME_VLxN_10. */
+const char *const aarch64_sme_vlxn_array[2] = {
+ "vlx2",
+ "vlx4"
+};
+
/* Helper functions to determine which operand to be used to encode/decode
the size:Q fields for AdvSIMD instructions. */
-static inline bfd_boolean
+static inline bool
vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
{
- return ((qualifier >= AARCH64_OPND_QLF_V_8B
- && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
- : FALSE);
+ return (qualifier >= AARCH64_OPND_QLF_V_8B
+ && qualifier <= AARCH64_OPND_QLF_V_1Q);
}
-static inline bfd_boolean
+static inline bool
fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
{
- return ((qualifier >= AARCH64_OPND_QLF_S_B
- && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
- : FALSE);
+ return (qualifier >= AARCH64_OPND_QLF_S_B
+ && qualifier <= AARCH64_OPND_QLF_S_Q);
}
enum data_pattern
static enum data_pattern
get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
{
- if (vector_qualifier_p (qualifiers[0]) == TRUE)
+ if (vector_qualifier_p (qualifiers[0]))
{
/* e.g. v.4s, v.4s, v.4s
or v.4h, v.4h, v.h[3]. */
if (qualifiers[0] == qualifiers[1]
- && vector_qualifier_p (qualifiers[2]) == TRUE
+ && vector_qualifier_p (qualifiers[2])
&& (aarch64_get_qualifier_esize (qualifiers[0])
== aarch64_get_qualifier_esize (qualifiers[1]))
&& (aarch64_get_qualifier_esize (qualifiers[0])
/* e.g. v.8h, v.8b, v.8b.
or v.4s, v.4h, v.h[2].
or v.8h, v.16b. */
- if (vector_qualifier_p (qualifiers[1]) == TRUE
+ if (vector_qualifier_p (qualifiers[1])
&& aarch64_get_qualifier_esize (qualifiers[0]) != 0
&& (aarch64_get_qualifier_esize (qualifiers[0])
== aarch64_get_qualifier_esize (qualifiers[1]) << 1))
return DP_VECTOR_LONG;
/* e.g. v.8h, v.8h, v.8b. */
if (qualifiers[0] == qualifiers[1]
- && vector_qualifier_p (qualifiers[2]) == TRUE
+ && vector_qualifier_p (qualifiers[2])
&& aarch64_get_qualifier_esize (qualifiers[0]) != 0
&& (aarch64_get_qualifier_esize (qualifiers[0])
== aarch64_get_qualifier_esize (qualifiers[2]) << 1)
== aarch64_get_qualifier_esize (qualifiers[1])))
return DP_VECTOR_WIDE;
}
- else if (fp_qualifier_p (qualifiers[0]) == TRUE)
+ else if (fp_qualifier_p (qualifiers[0]))
{
/* e.g. SADDLV <V><d>, <Vn>.<T>. */
- if (vector_qualifier_p (qualifiers[1]) == TRUE
+ if (vector_qualifier_p (qualifiers[1])
&& qualifiers[2] == AARCH64_OPND_QLF_NIL)
return DP_VECTOR_ACROSS_LANES;
}
significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
}
\f
+/* Instruction bit-fields.
++ Keep synced with 'enum aarch64_field_kind'. */
const aarch64_field fields[] =
{
{ 0, 0 }, /* NIL. */
- { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
- { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
- { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
- { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
- { 5, 19 }, /* imm19: e.g. in CBZ. */
- { 5, 19 }, /* immhi: e.g. in ADRP. */
- { 29, 2 }, /* immlo: e.g. in ADRP. */
- { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
- { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
- { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
- { 30, 1 }, /* Q: in most AdvSIMD instructions. */
- { 0, 5 }, /* Rt: in load/store instructions. */
- { 0, 5 }, /* Rd: in many integer instructions. */
- { 5, 5 }, /* Rn: in many integer instructions. */
- { 10, 5 }, /* Rt2: in load/store pair instructions. */
- { 10, 5 }, /* Ra: in fp instructions. */
- { 5, 3 }, /* op2: in the system instructions. */
{ 8, 4 }, /* CRm: in the system instructions. */
+ { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
{ 12, 4 }, /* CRn: in the system instructions. */
- { 16, 3 }, /* op1: in the system instructions. */
- { 19, 2 }, /* op0: in the system instructions. */
- { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
- { 12, 4 }, /* cond: condition flags as a source operand. */
- { 12, 4 }, /* opcode: in advsimd load/store instructions. */
- { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
- { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
- { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
- { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
- { 16, 5 }, /* Rs: in load/store exclusive instructions. */
- { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
- { 12, 1 }, /* S: in load/store reg offset instructions. */
- { 21, 2 }, /* hw: in move wide constant instructions. */
- { 22, 2 }, /* opc: in load/store reg offset instructions. */
- { 23, 1 }, /* opc1: in load/store reg offset instructions. */
- { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
- { 22, 2 }, /* type: floating point type field in fp data inst. */
- { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
- { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
- { 15, 6 }, /* imm6_2: in rmif instructions. */
- { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
- { 0, 4 }, /* imm4_2: in rmif instructions. */
- { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
- { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
- { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
- { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
- { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
- { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
- { 5, 14 }, /* imm14: in test bit and branch instructions. */
- { 5, 16 }, /* imm16: in exception instructions. */
- { 0, 16 }, /* imm16_2: in udf instruction. */
- { 0, 26 }, /* imm26: in unconditional branch instructions. */
- { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
- { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
- { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
- { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
- { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
- { 22, 1 }, /* N: in logical (immediate) instructions. */
- { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
- { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
- { 31, 1 }, /* sf: in integer data processing instructions. */
- { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
+ { 10, 8 }, /* CSSC_imm8. */
{ 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
{ 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
{ 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
- { 31, 1 }, /* b5: in the test bit and branch instructions. */
- { 19, 5 }, /* b40: in the test bit and branch instructions. */
- { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
+ { 22, 1 }, /* N: in logical (immediate) instructions. */
+ { 30, 1 }, /* Q: in most AdvSIMD instructions. */
+ { 10, 5 }, /* Ra: in fp instructions. */
+ { 0, 5 }, /* Rd: in many integer instructions. */
+ { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
+ { 5, 5 }, /* Rn: in many integer instructions. */
+ { 16, 5 }, /* Rs: in load/store exclusive instructions. */
+ { 0, 5 }, /* Rt: in load/store instructions. */
+ { 10, 5 }, /* Rt2: in load/store pair instructions. */
+ { 12, 1 }, /* S: in load/store reg offset instructions. */
+ { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
+ { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
+ { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
+ { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
+ { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
+ { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
+ { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
+ { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
+ { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
+ { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
+ { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
+ { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
+ { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
+ { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
+ { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
+ { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
+ { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
+ { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
+ { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
+ { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
+ { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
+ { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
+ { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
+ { 23, 1 }, /* SME_i1: immediate field, bit 23. */
+ { 12, 2 }, /* SME_size_12: bits [13:12]. */
+ { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
+ { 23, 1 }, /* SME_sz_23: bit [23]. */
+ { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
+ { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
+ { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
{ 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
{ 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
{ 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
{ 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
{ 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
{ 5, 1 }, /* SVE_i1: single-bit immediate. */
+ { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
{ 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
- { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
{ 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
- { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
+ { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
{ 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
{ 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
{ 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
{ 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
{ 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
{ 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
- { 22, 1 }, /* SVE_sz: 1-bit element size select. */
{ 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
+ { 22, 1 }, /* SVE_sz: 1-bit element size select. */
{ 30, 1 }, /* SVE_sz2: 1-bit element size select. */
{ 16, 4 }, /* SVE_tsz: triangular size select. */
{ 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
{ 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
{ 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
{ 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
+ { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
+ { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
+ { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
+ { 19, 5 }, /* b40: in the test bit and branch instructions. */
+ { 31, 1 }, /* b5: in the test bit and branch instructions. */
+ { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
+ { 12, 4 }, /* cond: condition flags as a source operand. */
+ { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
+ { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
+ { 21, 2 }, /* hw: in move wide constant instructions. */
+ { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
+ { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
+ { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
+ { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
+ { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
+ { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
+ { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
+ { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
+ { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
+ { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
+ { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
+ { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
+ { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
+ { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
+ { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
+ { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
+ { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
+ { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
+ { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
+ { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
+ { 0, 4 }, /* imm4_0: in rmif instructions. */
+ { 5, 4 }, /* imm4_5: in SME instructions. */
+ { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
+ { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
+ { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
+ { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
+ { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
+ { 15, 6 }, /* imm6_15: in rmif instructions. */
+ { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
+ { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
+ { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
+ { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
+ { 5, 14 }, /* imm14: in test bit and branch instructions. */
+ { 0, 16 }, /* imm16_0: in udf instruction. */
+ { 5, 16 }, /* imm16_5: in exception instructions. */
+ { 5, 19 }, /* imm19: e.g. in CBZ. */
+ { 0, 26 }, /* imm26: in unconditional branch instructions. */
+ { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
+ { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
+ { 5, 19 }, /* immhi: e.g. in ADRP. */
+ { 29, 2 }, /* immlo: e.g. in ADRP. */
+ { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
+ { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
+ { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
+ { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
+ { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
+ { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
+ { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
+ { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
+ { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
+ { 19, 2 }, /* op0: in the system instructions. */
+ { 16, 3 }, /* op1: in the system instructions. */
+ { 5, 3 }, /* op2: in the system instructions. */
+ { 22, 2 }, /* opc: in load/store reg offset instructions. */
+ { 23, 1 }, /* opc1: in load/store reg offset instructions. */
+ { 12, 4 }, /* opcode: in advsimd load/store instructions. */
+ { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
{ 11, 2 }, /* rotate1: FCMLA immediate rotate. */
{ 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
{ 12, 1 }, /* rotate3: FCADD immediate rotate. */
- { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
+ { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
+ { 31, 1 }, /* sf: in integer data processing instructions. */
+ { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
+ { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
{ 22, 1 }, /* sz: 1-bit element size select. */
- { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
+ { 22, 2 }, /* type: floating point type field in fp data inst. */
+ { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
};
enum aarch64_operand_class
enum aarch64_modifier_kind
aarch64_get_operand_modifier_from_value (aarch64_insn value,
- bfd_boolean extend_p)
+ bool extend_p)
{
- if (extend_p == TRUE)
+ if (extend_p)
return AARCH64_MOD_UXTB + value;
else
return AARCH64_MOD_LSL - value;
}
-bfd_boolean
+bool
aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
{
- return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
- ? TRUE : FALSE;
+ return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
}
-static inline bfd_boolean
+static inline bool
aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
{
- return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
- ? TRUE : FALSE;
+ return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
}
const struct aarch64_name_value_pair aarch64_barrier_options[16] =
{0, 0, 0, "retrieving", 0},
};
-static inline bfd_boolean
+static inline bool
operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
{
- return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
- ? TRUE : FALSE;
+ return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
}
-static inline bfd_boolean
+static inline bool
qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
{
- return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
- ? TRUE : FALSE;
+ return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
}
const char*
unsigned char
aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
{
- assert (operand_variant_qualifier_p (qualifier) == TRUE);
+ assert (operand_variant_qualifier_p (qualifier));
return aarch64_opnd_qualifiers[qualifier].data0;
}
unsigned char
aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
{
- assert (operand_variant_qualifier_p (qualifier) == TRUE);
+ assert (operand_variant_qualifier_p (qualifier));
return aarch64_opnd_qualifiers[qualifier].data1;
}
aarch64_insn
aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
{
- assert (operand_variant_qualifier_p (qualifier) == TRUE);
+ assert (operand_variant_qualifier_p (qualifier));
return aarch64_opnd_qualifiers[qualifier].data2;
}
static int
get_lower_bound (aarch64_opnd_qualifier_t qualifier)
{
- assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
+ assert (qualifier_value_in_range_constraint_p (qualifier));
return aarch64_opnd_qualifiers[qualifier].data0;
}
static int
get_upper_bound (aarch64_opnd_qualifier_t qualifier)
{
- assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
+ assert (qualifier_value_in_range_constraint_p (qualifier));
return aarch64_opnd_qualifiers[qualifier].data1;
}
/* This function checks if the given instruction INSN is a destructive
instruction based on the usage of the registers. It does not recognize
unary destructive instructions. */
-bfd_boolean
+bool
aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
{
int i = 0;
const enum aarch64_opnd *opnds = opcode->operands;
if (opnds[0] == AARCH64_OPND_NIL)
- return FALSE;
+ return false;
while (opnds[++i] != AARCH64_OPND_NIL)
if (opnds[i] == opnds[0])
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
/* TODO improve this, we can have an extra field at the runtime to
/* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
+ Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
+ This is always 0 if the function succeeds.
+
N.B. on the entry, it is very likely that only some operands in *INST
have had their qualifiers been established.
int
aarch64_find_best_match (const aarch64_inst *inst,
const aarch64_opnd_qualifier_seq_t *qualifiers_list,
- int stop_at, aarch64_opnd_qualifier_t *ret)
+ int stop_at, aarch64_opnd_qualifier_t *ret,
+ int *invalid_count)
{
- int found = 0;
- int i, num_opnds;
+ int i, num_opnds, invalid, min_invalid;
const aarch64_opnd_qualifier_t *qualifiers;
num_opnds = aarch64_num_of_operands (inst->opcode);
if (num_opnds == 0)
{
DEBUG_TRACE ("SUCCEED: no operand");
+ *invalid_count = 0;
return 1;
}
stop_at = num_opnds - 1;
/* For each pattern. */
+ min_invalid = num_opnds;
for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
{
int j;
qualifiers = *qualifiers_list;
/* Start as positive. */
- found = 1;
+ invalid = 0;
DEBUG_TRACE ("%d", i);
#ifdef DEBUG_AARCH64
dump_match_qualifiers (inst->operands, qualifiers);
#endif
- /* Most opcodes has much fewer patterns in the list.
- First NIL qualifier indicates the end in the list. */
- if (empty_qualifier_sequence_p (qualifiers) == TRUE)
- {
- DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
- if (i)
- found = 0;
- break;
- }
+ /* The first entry should be taken literally, even if it's an empty
+ qualifier sequence. (This matters for strict testing.) In other
+ positions an empty sequence acts as a terminator. */
+ if (i > 0 && empty_qualifier_sequence_p (qualifiers))
+ break;
for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
{
- if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
+ if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
+ && !(inst->opcode->flags & F_STRICT))
{
/* Either the operand does not have qualifier, or the qualifier
for the operand needs to be deduced from the qualifier
if (operand_also_qualified_p (inst->operands + j, *qualifiers))
continue;
else
- {
- found = 0;
- break;
- }
+ invalid += 1;
}
else
continue; /* Equal qualifiers are certainly matched. */
}
+ if (min_invalid > invalid)
+ min_invalid = invalid;
+
/* Qualifiers established. */
- if (found == 1)
+ if (min_invalid == 0)
break;
}
- if (found == 1)
+ *invalid_count = min_invalid;
+ if (min_invalid == 0)
{
/* Fill the result in *RET. */
int j;
Return 1 if the operand qualifier(s) in *INST match one of the qualifier
sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
- if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
+ Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
+ This is always 0 if the function succeeds.
+
+ if UPDATE_P, update the qualifier(s) in *INST after the matching
succeeds. */
static int
-match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
+match_operands_qualifier (aarch64_inst *inst, bool update_p,
+ int *invalid_count)
{
- int i, nops;
+ int i;
aarch64_opnd_qualifier_seq_t qualifiers;
if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
- qualifiers))
+ qualifiers, invalid_count))
{
DEBUG_TRACE ("matching FAIL");
return 0;
}
- if (inst->opcode->flags & F_STRICT)
- {
- /* Require an exact qualifier match, even for NIL qualifiers. */
- nops = aarch64_num_of_operands (inst->opcode);
- for (i = 0; i < nops; ++i)
- if (inst->operands[i].qualifier != qualifiers[i])
- return FALSE;
- }
-
/* Update the qualifiers. */
- if (update_p == TRUE)
+ if (update_p)
for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
{
if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
amount will be returned in *SHIFT_AMOUNT. */
-bfd_boolean
+bool
aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
{
int amount;
permitted. */
if (value >> 32 != 0 && value >> 32 != 0xffffffff)
/* Immediate out of range. */
- return FALSE;
+ return false;
value &= 0xffffffff;
}
if (amount == -1)
{
- DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
- return FALSE;
+ DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
+ return false;
}
if (shift_amount != NULL)
*shift_amount = amount;
- DEBUG_TRACE ("exit TRUE with amount %d", amount);
+ DEBUG_TRACE ("exit true with amount %d", amount);
- return TRUE;
+ return true;
}
/* Build the accepted values for immediate logical SIMD instructions.
If ENCODING is not NULL, on the return of TRUE, the standard encoding for
VALUE will be returned in *ENCODING. */
-bfd_boolean
+bool
aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
{
simd_imm_encoding imm_enc;
const simd_imm_encoding *imm_encoding;
- static bfd_boolean initialized = FALSE;
+ static bool initialized = false;
uint64_t upper;
int i;
if (!initialized)
{
build_immediate_table ();
- initialized = TRUE;
+ initialized = true;
}
/* Allow all zeros or all ones in top bits, so that
constant expressions like ~1 are permitted. */
upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
if ((value & ~upper) != value && (value | upper) != value)
- return FALSE;
+ return false;
/* Replicate to a full 64-bit value. */
value &= ~upper;
sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
if (imm_encoding == NULL)
{
- DEBUG_TRACE ("exit with FALSE");
- return FALSE;
+ DEBUG_TRACE ("exit with false");
+ return false;
}
if (encoding != NULL)
*encoding = imm_encoding->encoding;
- DEBUG_TRACE ("exit with TRUE");
- return TRUE;
+ DEBUG_TRACE ("exit with true");
+ return true;
}
/* If 64-bit immediate IMM is in the format of
set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
}
+static inline void
+set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
+ const char *prefix, int lower_bound, int upper_bound)
+{
+ if (mismatch_detail == NULL)
+ return;
+ set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
+ mismatch_detail->data[0].s = prefix;
+ mismatch_detail->data[1].i = lower_bound;
+ mismatch_detail->data[2].i = upper_bound;
+}
+
static inline void
set_out_of_range_error (aarch64_operand_error *mismatch_detail,
int idx, int lower_bound, int upper_bound,
if (mismatch_detail == NULL)
return;
set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
- mismatch_detail->data[0] = lower_bound;
- mismatch_detail->data[1] = upper_bound;
+ mismatch_detail->data[0].i = lower_bound;
+ mismatch_detail->data[1].i = upper_bound;
}
static inline void
if (mismatch_detail == NULL)
return;
set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
- mismatch_detail->data[0] = alignment;
+ mismatch_detail->data[0].i = alignment;
}
static inline void
-set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
- int expected_num)
+set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
+ int expected_num)
{
if (mismatch_detail == NULL)
return;
- set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
- mismatch_detail->data[0] = expected_num;
+ set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
+ mismatch_detail->data[0].i = 1 << expected_num;
+}
+
+static inline void
+set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
+ int expected_num)
+{
+ if (mismatch_detail == NULL)
+ return;
+ set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
+ mismatch_detail->data[0].i = 1 << expected_num;
+}
+
+static inline void
+set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
+ int idx, int expected)
+{
+ if (mismatch_detail == NULL)
+ return;
+ set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
+ mismatch_detail->data[0].i = expected;
}
static inline void
set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
}
+/* Check that indexed register operand OPND has a register in the range
+ [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
+ PREFIX is the register prefix, such as "z" for SVE vector registers. */
+
+static bool
+check_reglane (const aarch64_opnd_info *opnd,
+ aarch64_operand_error *mismatch_detail, int idx,
+ const char *prefix, int min_regno, int max_regno,
+ int min_index, int max_index)
+{
+ if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
+ {
+ set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
+ max_regno);
+ return false;
+ }
+ if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
+ {
+ set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
+ max_index);
+ return false;
+ }
+ return true;
+}
+
+/* Check that register list operand OPND has NUM_REGS registers and a
+ register stride of STRIDE. */
+
+static bool
+check_reglist (const aarch64_opnd_info *opnd,
+ aarch64_operand_error *mismatch_detail, int idx,
+ int num_regs, int stride)
+{
+ if (opnd->reglist.num_regs != num_regs)
+ {
+ set_reg_list_length_error (mismatch_detail, idx, num_regs);
+ return false;
+ }
+ if (opnd->reglist.stride != stride)
+ {
+ set_reg_list_stride_error (mismatch_detail, idx, stride);
+ return false;
+ }
+ return true;
+}
+
+/* Check that indexed ZA operand OPND has:
+
+ - a selection register in the range [MIN_WREG, MIN_WREG + 3]
+
+ - RANGE_SIZE consecutive immediate offsets.
+
+ - an initial immediate offset that is a multiple of RANGE_SIZE
+ in the range [0, MAX_VALUE * RANGE_SIZE]
+
+ - a vector group size of GROUP_SIZE. */
+
+static bool
+check_za_access (const aarch64_opnd_info *opnd,
+ aarch64_operand_error *mismatch_detail, int idx,
+ int min_wreg, int max_value, unsigned int range_size,
+ int group_size)
+{
+ if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
+ {
+ if (min_wreg == 12)
+ set_other_error (mismatch_detail, idx,
+ _("expected a selection register in the"
+ " range w12-w15"));
+ else if (min_wreg == 8)
+ set_other_error (mismatch_detail, idx,
+ _("expected a selection register in the"
+ " range w8-w11"));
+ else
+ abort ();
+ return false;
+ }
+
+ int max_index = max_value * range_size;
+ if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
+ {
+ set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
+ return false;
+ }
+
+ if ((opnd->indexed_za.index.imm % range_size) != 0)
+ {
+ assert (range_size == 2 || range_size == 4);
+ set_other_error (mismatch_detail, idx,
+ range_size == 2
+ ? _("starting offset is not a multiple of 2")
+ : _("starting offset is not a multiple of 4"));
+ return false;
+ }
+
+ if (opnd->indexed_za.index.countm1 != range_size - 1)
+ {
+ if (range_size == 1)
+ set_other_error (mismatch_detail, idx,
+ _("expected a single offset rather than"
+ " a range"));
+ else if (range_size == 2)
+ set_other_error (mismatch_detail, idx,
+ _("expected a range of two offsets"));
+ else if (range_size == 4)
+ set_other_error (mismatch_detail, idx,
+ _("expected a range of four offsets"));
+ else
+ abort ();
+ return false;
+ }
+
+ /* The vector group specifier is optional in assembly code. */
+ if (opnd->indexed_za.group_size != 0
+ && opnd->indexed_za.group_size != group_size)
+ {
+ set_invalid_vg_size (mismatch_detail, idx, group_size);
+ return false;
+ }
+
+ return true;
+}
+
/* General constraint checking based on operand code.
Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
uint64_t uvalue, mask;
const aarch64_opnd_info *opnd = opnds + idx;
aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
+ int i;
assert (opcode->operands[idx] == opnd->type && opnd->type == type);
if (!aarch64_stack_pointer_p (opnd))
{
set_other_error (mismatch_detail, idx,
- _("stack pointer register expected"));
+ _("stack pointer register expected"));
return 0;
}
break;
{
case AARCH64_OPND_SVE_Zm3_INDEX:
case AARCH64_OPND_SVE_Zm3_22_INDEX:
+ case AARCH64_OPND_SVE_Zm3_19_INDEX:
case AARCH64_OPND_SVE_Zm3_11_INDEX:
case AARCH64_OPND_SVE_Zm4_11_INDEX:
case AARCH64_OPND_SVE_Zm4_INDEX:
size = get_operand_fields_width (get_operand_from_code (type));
shift = get_operand_specific_data (&aarch64_operands[type]);
- mask = (1 << shift) - 1;
- if (opnd->reg.regno > mask)
- {
- assert (mask == 7 || mask == 15);
- set_other_error (mismatch_detail, idx,
- mask == 15
- ? _("z0-z15 expected")
- : _("z0-z7 expected"));
- return 0;
- }
- mask = (1u << (size - shift)) - 1;
- if (!value_in_range_p (opnd->reglane.index, 0, mask))
+ if (!check_reglane (opnd, mismatch_detail, idx,
+ "z", 0, (1 << shift) - 1,
+ 0, (1u << (size - shift)) - 1))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SVE_Zn_INDEX:
+ size = aarch64_get_qualifier_esize (opnd->qualifier);
+ if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
+ 0, 64 / size - 1))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_PNn3_INDEX1:
+ case AARCH64_OPND_SME_PNn3_INDEX2:
+ size = get_operand_field_width (get_operand_from_code (type), 1);
+ if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
+ 0, (1 << size) - 1))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_Zn_INDEX1_16:
+ case AARCH64_OPND_SME_Zn_INDEX2_15:
+ case AARCH64_OPND_SME_Zn_INDEX2_16:
+ case AARCH64_OPND_SME_Zn_INDEX3_14:
+ case AARCH64_OPND_SME_Zn_INDEX3_15:
+ case AARCH64_OPND_SME_Zn_INDEX4_14:
+ size = get_operand_fields_width (get_operand_from_code (type)) - 5;
+ if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
+ 0, (1 << size) - 1))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_Zm_INDEX1:
+ case AARCH64_OPND_SME_Zm_INDEX2:
+ case AARCH64_OPND_SME_Zm_INDEX3_1:
+ case AARCH64_OPND_SME_Zm_INDEX3_2:
+ case AARCH64_OPND_SME_Zm_INDEX3_10:
+ case AARCH64_OPND_SME_Zm_INDEX4_1:
+ case AARCH64_OPND_SME_Zm_INDEX4_10:
+ size = get_operand_fields_width (get_operand_from_code (type)) - 4;
+ if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 15,
+ 0, (1 << size) - 1))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_Zm:
+ if (opnd->reg.regno > 15)
{
- set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
+ set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
return 0;
}
break;
- case AARCH64_OPND_SVE_Zn_INDEX:
+ case AARCH64_OPND_SME_PnT_Wm_imm:
size = aarch64_get_qualifier_esize (opnd->qualifier);
- if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
+ max_value = 16 / size - 1;
+ if (!check_za_access (opnd, mismatch_detail, idx,
+ 12, max_value, 1, 0))
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case AARCH64_OPND_CLASS_SVE_REGLIST:
+ switch (type)
+ {
+ case AARCH64_OPND_SME_Pdx2:
+ case AARCH64_OPND_SME_Zdnx2:
+ case AARCH64_OPND_SME_Zdnx4:
+ case AARCH64_OPND_SME_Zmx2:
+ case AARCH64_OPND_SME_Zmx4:
+ case AARCH64_OPND_SME_Znx2:
+ case AARCH64_OPND_SME_Znx4:
+ num = get_operand_specific_data (&aarch64_operands[type]);
+ if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
+ return 0;
+ if ((opnd->reglist.first_regno % num) != 0)
{
- set_elem_idx_out_of_range_error (mismatch_detail, idx,
- 0, 64 / size - 1);
+ set_other_error (mismatch_detail, idx,
+ _("start register out of range"));
return 0;
}
break;
- case AARCH64_OPND_SVE_ZnxN:
- case AARCH64_OPND_SVE_ZtxN:
- if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
+ case AARCH64_OPND_SME_Ztx2_STRIDED:
+ case AARCH64_OPND_SME_Ztx4_STRIDED:
+ /* 2-register lists have a stride of 8 and 4-register lists
+ have a stride of 4. */
+ num = get_operand_specific_data (&aarch64_operands[type]);
+ if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
+ return 0;
+ num = 16 | (opnd->reglist.stride - 1);
+ if ((opnd->reglist.first_regno & ~num) != 0)
{
set_other_error (mismatch_detail, idx,
- _("invalid register list"));
+ _("start register out of range"));
return 0;
}
break;
+ case AARCH64_OPND_SME_PdxN:
+ case AARCH64_OPND_SVE_ZnxN:
+ case AARCH64_OPND_SVE_ZtxN:
+ num = get_opcode_dependent_value (opcode);
+ if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
+ return 0;
+ break;
+
default:
+ abort ();
+ }
+ break;
+
+ case AARCH64_OPND_CLASS_ZA_ACCESS:
+ switch (type)
+ {
+ case AARCH64_OPND_SME_ZA_HV_idx_src:
+ case AARCH64_OPND_SME_ZA_HV_idx_dest:
+ case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
+ size = aarch64_get_qualifier_esize (opnd->qualifier);
+ max_value = 16 / size - 1;
+ if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
+ get_opcode_dependent_value (opcode)))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_ZA_array_off4:
+ if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
+ get_opcode_dependent_value (opcode)))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_ZA_array_off3_0:
+ case AARCH64_OPND_SME_ZA_array_off3_5:
+ if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
+ get_opcode_dependent_value (opcode)))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_ZA_array_off1x4:
+ if (!check_za_access (opnd, mismatch_detail, idx, 8, 1, 4,
+ get_opcode_dependent_value (opcode)))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_ZA_array_off2x2:
+ if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 2,
+ get_opcode_dependent_value (opcode)))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_ZA_array_off2x4:
+ if (!check_za_access (opnd, mismatch_detail, idx, 8, 3, 4,
+ get_opcode_dependent_value (opcode)))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_ZA_array_off3x2:
+ if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 2,
+ get_opcode_dependent_value (opcode)))
+ return 0;
+ break;
+
+ case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
+ case AARCH64_OPND_SME_ZA_HV_idx_destxN:
+ size = aarch64_get_qualifier_esize (opnd->qualifier);
+ num = get_opcode_dependent_value (opcode);
+ max_value = 16 / num / size;
+ if (max_value > 0)
+ max_value -= 1;
+ if (!check_za_access (opnd, mismatch_detail, idx,
+ 12, max_value, num, 0))
+ return 0;
break;
+
+ default:
+ abort ();
}
break;
case AARCH64_OPND_CLASS_PRED_REG:
- if (opnd->reg.regno >= 8
- && get_operand_fields_width (get_operand_from_code (type)) == 3)
+ switch (type)
{
- set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
- return 0;
+ case AARCH64_OPND_SME_PNd3:
+ case AARCH64_OPND_SME_PNg3:
+ if (opnd->reg.regno < 8)
+ {
+ set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
+ return 0;
+ }
+ break;
+
+ default:
+ if (opnd->reg.regno >= 8
+ && get_operand_fields_width (get_operand_from_code (type)) == 3)
+ {
+ set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
+ return 0;
+ }
+ break;
}
break;
}
break;
+ case AARCH64_OPND_SME_ADDR_RI_U4xVL:
+ if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
+ {
+ set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
+ return 0;
+ }
+ break;
+
case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
case AARCH64_OPND_SVE_ADDR_RR_LSL1:
case AARCH64_OPND_SVE_ADDR_RR_LSL2:
case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL4:
case AARCH64_OPND_SVE_ADDR_RX:
case AARCH64_OPND_SVE_ADDR_RX_LSL1:
case AARCH64_OPND_SVE_ADDR_RX_LSL2:
assert (num >= 1 && num <= 4);
/* Unless LD1/ST1, the number of registers should be equal to that
of the structure elements. */
- if (num != 1 && opnd->reglist.num_regs != num)
- {
- set_reg_list_error (mismatch_detail, idx, num);
- return 0;
- }
+ if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
+ return 0;
break;
case AARCH64_OPND_LVt_AL:
case AARCH64_OPND_LEt:
assert (num >= 1 && num <= 4);
/* The number of registers should be equal to that of the structure
elements. */
- if (opnd->reglist.num_regs != num)
- {
- set_reg_list_error (mismatch_detail, idx, num);
- return 0;
- }
+ if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
+ return 0;
break;
default:
break;
}
+ if (opnd->reglist.stride != 1)
+ {
+ set_reg_list_stride_error (mismatch_detail, idx, 1);
+ return 0;
+ }
break;
case AARCH64_OPND_CLASS_IMMEDIATE:
case AARCH64_OPND_SVE_UIMM7:
case AARCH64_OPND_SVE_UIMM8:
case AARCH64_OPND_SVE_UIMM8_53:
+ case AARCH64_OPND_CSSC_UIMM8:
size = get_operand_fields_width (get_operand_from_code (type));
assert (size < 32);
if (!value_fit_unsigned_field_p (opnd->imm.value, size))
case AARCH64_OPND_SVE_SIMM5B:
case AARCH64_OPND_SVE_SIMM6:
case AARCH64_OPND_SVE_SIMM8:
+ case AARCH64_OPND_CSSC_SIMM8:
size = get_operand_fields_width (get_operand_from_code (type));
assert (size < 32);
if (!value_fit_signed_field_p (opnd->imm.value, size))
}
break;
+ case AARCH64_OPND_SME_SHRIMM4:
+ size = 1 << get_operand_fields_width (get_operand_from_code (type));
+ if (!value_in_range_p (opnd->imm.value, 1, size))
+ {
+ set_imm_out_of_range_error (mismatch_detail, idx, 1, size);
+ return 0;
+ }
+ break;
+
+ case AARCH64_OPND_SME_SHRIMM5:
case AARCH64_OPND_SVE_SHRIMM_PRED:
case AARCH64_OPND_SVE_SHRIMM_UNPRED:
case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
}
break;
+ case AARCH64_OPND_SME_ZT0_INDEX:
+ if (!value_in_range_p (opnd->imm.value, 0, 56))
+ {
+ set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
+ return 0;
+ }
+ if (opnd->imm.value % 8 != 0)
+ {
+ set_other_error (mismatch_detail, idx,
+ _("byte index must be a multiple of 8"));
+ return 0;
+ }
+ break;
+
default:
break;
}
switch (type)
{
case AARCH64_OPND_PSTATEFIELD:
+ for (i = 0; aarch64_pstatefields[i].name; ++i)
+ if (aarch64_pstatefields[i].value == opnd->pstatefield)
+ break;
+ assert (aarch64_pstatefields[i].name);
assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
- /* MSR UAO, #uimm4
- MSR PAN, #uimm4
- MSR SSBS,#uimm4
- The immediate must be #0 or #1. */
- if ((opnd->pstatefield == 0x03 /* UAO. */
- || opnd->pstatefield == 0x04 /* PAN. */
- || opnd->pstatefield == 0x19 /* SSBS. */
- || opnd->pstatefield == 0x1a) /* DIT. */
- && opnds[1].imm.value > 1)
+ max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
+ if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
{
- set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
+ set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
return 0;
}
- /* MSR SPSel, #uimm4
- Uses uimm4 as a control value to select the stack pointer: if
- bit 0 is set it selects the current exception level's stack
- pointer, if bit 0 is clear it selects shared EL0 stack pointer.
- Bits 1 to 3 of uimm4 are reserved and should be zero. */
- if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
+ break;
+ case AARCH64_OPND_PRFOP:
+ if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
{
- set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
+ set_other_error (mismatch_detail, idx,
+ _("the register-index form of PRFM does"
+ " not accept opcodes in the range 24-31"));
return 0;
}
break;
DEBUG_TRACE ("enter");
- /* Check for cases where a source register needs to be the same as the
- destination register. Do this before matching qualifiers since if
- an instruction has both invalid tying and invalid qualifiers,
- the error about qualifiers would suggest several alternative
- instructions that also have invalid tying. */
i = inst->opcode->tied_operand;
- if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
+
+ if (i > 0)
{
- if (mismatch_detail)
- {
- mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
- mismatch_detail->index = i;
- mismatch_detail->error = NULL;
- }
- return 0;
+ /* Check for tied_operands with specific opcode iclass. */
+ switch (inst->opcode->iclass)
+ {
+ /* For SME LDR and STR instructions #imm must have the same numerical
+ value for both operands.
+ */
+ case sme_ldr:
+ case sme_str:
+ assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
+ assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
+ if (inst->operands[0].indexed_za.index.imm
+ != inst->operands[1].addr.offset.imm)
+ {
+ if (mismatch_detail)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
+ mismatch_detail->index = i;
+ }
+ return 0;
+ }
+ break;
+
+ default:
+ {
+ /* Check for cases where a source register needs to be the
+ same as the destination register. Do this before
+ matching qualifiers since if an instruction has both
+ invalid tying and invalid qualifiers, the error about
+ qualifiers would suggest several alternative instructions
+ that also have invalid tying. */
+ enum aarch64_operand_class op_class1
+ = aarch64_get_operand_class (inst->operands[0].type);
+ enum aarch64_operand_class op_class2
+ = aarch64_get_operand_class (inst->operands[i].type);
+ assert (op_class1 == op_class2);
+ if (op_class1 == AARCH64_OPND_CLASS_SVE_REGLIST
+ ? ((inst->operands[0].reglist.first_regno
+ != inst->operands[i].reglist.first_regno)
+ || (inst->operands[0].reglist.num_regs
+ != inst->operands[i].reglist.num_regs)
+ || (inst->operands[0].reglist.stride
+ != inst->operands[i].reglist.stride))
+ : (inst->operands[0].reg.regno
+ != inst->operands[i].reg.regno))
+ {
+ if (mismatch_detail)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
+ mismatch_detail->index = i;
+ mismatch_detail->error = NULL;
+ }
+ return 0;
+ }
+ break;
+ }
+ }
}
/* Match operands' qualifier.
constraint checking will carried out by operand_general_constraint_met_p,
which has be to called after this in order to get all of the operands'
qualifiers established. */
- if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
+ int invalid_count;
+ if (match_operands_qualifier (inst, true /* update_p */,
+ &invalid_count) == 0)
{
DEBUG_TRACE ("FAIL on operand qualifier matching");
if (mismatch_detail)
mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
mismatch_detail->index = -1;
mismatch_detail->error = NULL;
+ mismatch_detail->data[0].i = invalid_count;
}
return 0;
}
return imm;
}
+/* Return a string based on FMT with the register style applied. */
+
+static const char *
+style_reg (struct aarch64_styler *styler, const char *fmt, ...)
+{
+ const char *txt;
+ va_list ap;
+
+ va_start (ap, fmt);
+ txt = styler->apply_style (styler, dis_style_register, fmt, ap);
+ va_end (ap);
+
+ return txt;
+}
+
+/* Return a string based on FMT with the immediate style applied. */
+
+static const char *
+style_imm (struct aarch64_styler *styler, const char *fmt, ...)
+{
+ const char *txt;
+ va_list ap;
+
+ va_start (ap, fmt);
+ txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
+ va_end (ap);
+
+ return txt;
+}
+
+/* Return a string based on FMT with the sub-mnemonic style applied. */
+
+static const char *
+style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
+{
+ const char *txt;
+ va_list ap;
+
+ va_start (ap, fmt);
+ txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
+ va_end (ap);
+
+ return txt;
+}
+
+/* Return a string based on FMT with the address style applied. */
+
+static const char *
+style_addr (struct aarch64_styler *styler, const char *fmt, ...)
+{
+ const char *txt;
+ va_list ap;
+
+ va_start (ap, fmt);
+ txt = styler->apply_style (styler, dis_style_address, fmt, ap);
+ va_end (ap);
+
+ return txt;
+}
+
/* Produce the string representation of the register list operand *OPND
in the buffer pointed by BUF of size SIZE. PREFIX is the part of
the register name that comes before the register number, such as "v". */
static void
print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
- const char *prefix)
+ const char *prefix, struct aarch64_styler *styler)
{
+ const int mask = (prefix[0] == 'p' ? 15 : 31);
const int num_regs = opnd->reglist.num_regs;
+ const int stride = opnd->reglist.stride;
const int first_reg = opnd->reglist.first_regno;
- const int last_reg = (first_reg + num_regs - 1) & 0x1f;
+ const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
- char tb[8]; /* Temporary buffer. */
+ char tb[16]; /* Temporary buffer. */
assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
assert (num_regs >= 1 && num_regs <= 4);
/* Prepare the index if any. */
if (opnd->reglist.has_index)
/* PR 21096: The %100 is to silence a warning about possible truncation. */
- snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
+ snprintf (tb, sizeof (tb), "[%s]",
+ style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
else
tb[0] = '\0';
/* The hyphenated form is preferred for disassembly if there are
more than two registers in the list, and the register numbers
are monotonically increasing in increments of one. */
- if (num_regs > 2 && last_reg > first_reg)
- snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
- prefix, last_reg, qlf_name, tb);
+ if (stride == 1 && num_regs > 1)
+ snprintf (buf, size, "{%s-%s}%s",
+ style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
+ style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
else
{
const int reg0 = first_reg;
- const int reg1 = (first_reg + 1) & 0x1f;
- const int reg2 = (first_reg + 2) & 0x1f;
- const int reg3 = (first_reg + 3) & 0x1f;
+ const int reg1 = (first_reg + stride) & mask;
+ const int reg2 = (first_reg + stride * 2) & mask;
+ const int reg3 = (first_reg + stride * 3) & mask;
switch (num_regs)
{
case 1:
- snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
+ snprintf (buf, size, "{%s}%s",
+ style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
+ tb);
break;
case 2:
- snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
- prefix, reg1, qlf_name, tb);
+ snprintf (buf, size, "{%s, %s}%s",
+ style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
+ style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
+ tb);
break;
case 3:
- snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
- prefix, reg0, qlf_name, prefix, reg1, qlf_name,
- prefix, reg2, qlf_name, tb);
+ snprintf (buf, size, "{%s, %s, %s}%s",
+ style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
+ style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
+ style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
+ tb);
break;
case 4:
- snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
- prefix, reg0, qlf_name, prefix, reg1, qlf_name,
- prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
+ snprintf (buf, size, "{%s, %s, %s, %s}%s",
+ style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
+ style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
+ style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
+ style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
+ tb);
break;
}
}
static void
print_immediate_offset_address (char *buf, size_t size,
const aarch64_opnd_info *opnd,
- const char *base)
+ const char *base,
+ struct aarch64_styler *styler)
{
if (opnd->addr.writeback)
{
if (opnd->addr.preind)
{
if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
- snprintf (buf, size, "[%s]!", base);
+ snprintf (buf, size, "[%s]!", style_reg (styler, base));
else
- snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
+ snprintf (buf, size, "[%s, %s]!",
+ style_reg (styler, base),
+ style_imm (styler, "#%d", opnd->addr.offset.imm));
}
else
- snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
+ snprintf (buf, size, "[%s], %s",
+ style_reg (styler, base),
+ style_imm (styler, "#%d", opnd->addr.offset.imm));
}
else
{
if (opnd->shifter.operator_present)
{
assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
- snprintf (buf, size, "[%s, #%d, mul vl]",
- base, opnd->addr.offset.imm);
+ snprintf (buf, size, "[%s, %s, %s]",
+ style_reg (styler, base),
+ style_imm (styler, "#%d", opnd->addr.offset.imm),
+ style_sub_mnem (styler, "mul vl"));
}
else if (opnd->addr.offset.imm)
- snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
+ snprintf (buf, size, "[%s, %s]",
+ style_reg (styler, base),
+ style_imm (styler, "#%d", opnd->addr.offset.imm));
else
- snprintf (buf, size, "[%s]", base);
+ snprintf (buf, size, "[%s]", style_reg (styler, base));
}
}
static void
print_register_offset_address (char *buf, size_t size,
const aarch64_opnd_info *opnd,
- const char *base, const char *offset)
+ const char *base, const char *offset,
+ struct aarch64_styler *styler)
{
- char tb[16]; /* Temporary buffer. */
- bfd_boolean print_extend_p = TRUE;
- bfd_boolean print_amount_p = TRUE;
+ char tb[32]; /* Temporary buffer. */
+ bool print_extend_p = true;
+ bool print_amount_p = true;
const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
{
/* Not print the shift/extend amount when the amount is zero and
when it is not the special case of 8-bit load/store instruction. */
- print_amount_p = FALSE;
+ print_amount_p = false;
/* Likewise, no need to print the shift operator LSL in such a
situation. */
if (opnd->shifter.kind == AARCH64_MOD_LSL)
- print_extend_p = FALSE;
+ print_extend_p = false;
}
/* Prepare for the extend/shift. */
if (print_extend_p)
{
if (print_amount_p)
- snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
+ snprintf (tb, sizeof (tb), ", %s %s",
+ style_sub_mnem (styler, shift_name),
+ style_imm (styler, "#%" PRIi64,
/* PR 21096: The %100 is to silence a warning about possible truncation. */
- (opnd->shifter.amount % 100));
+ (opnd->shifter.amount % 100)));
else
- snprintf (tb, sizeof (tb), ", %s", shift_name);
+ snprintf (tb, sizeof (tb), ", %s",
+ style_sub_mnem (styler, shift_name));
}
else
tb[0] = '\0';
- snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
+ snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
+ style_reg (styler, offset), tb);
+}
+
+/* Print ZA tiles from imm8 in ZERO instruction.
+
+ The preferred disassembly of this instruction uses the shortest list of tile
+ names that represent the encoded immediate mask.
+
+ For example:
+ * An all-ones immediate is disassembled as {ZA}.
+ * An all-zeros immediate is disassembled as an empty list { }.
+*/
+static void
+print_sme_za_list (char *buf, size_t size, int mask,
+ struct aarch64_styler *styler)
+{
+ const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
+ "za1.s", "za2.s", "za3.s", "za0.d",
+ "za1.d", "za2.d", "za3.d", "za4.d",
+ "za5.d", "za6.d", "za7.d", " " };
+ const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
+ 0x22, 0x44, 0x88, 0x01,
+ 0x02, 0x04, 0x08, 0x10,
+ 0x20, 0x40, 0x80, 0x00 };
+ int i, k;
+ const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
+
+ k = snprintf (buf, size, "{");
+ for (i = 0; i < ZAN_SIZE; i++)
+ {
+ if ((mask & zan_v[i]) == zan_v[i])
+ {
+ mask &= ~zan_v[i];
+ if (k > 1)
+ k += snprintf (buf + k, size - k, ", ");
+
+ k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
+ }
+ if (mask == 0)
+ break;
+ }
+ snprintf (buf + k, size - k, "}");
}
/* Generate the string representation of the operand OPNDS[IDX] for OPCODE
const aarch64_opcode *opcode,
const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
bfd_vma *address, char** notes,
- aarch64_feature_set features)
+ char *comment, size_t comment_size,
+ aarch64_feature_set features,
+ struct aarch64_styler *styler)
{
unsigned int i, num_conds;
const char *name = NULL;
enum aarch64_modifier_kind kind;
uint64_t addr, enum_value;
+ if (comment != NULL)
+ {
+ assert (comment_size > 0);
+ comment[0] = '\0';
+ }
+ else
+ assert (comment_size == 0);
+
buf[0] = '\0';
if (pcrel_p)
*pcrel_p = 0;
assert (opnd->qualifier == AARCH64_OPND_QLF_W
|| opnd->qualifier == AARCH64_OPND_QLF_X);
snprintf (buf, size, "%s",
- get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
+ style_reg (styler, get_int_reg_name (opnd->reg.regno,
+ opnd->qualifier, 0)));
break;
case AARCH64_OPND_Rd_SP:
|| opnd->qualifier == AARCH64_OPND_QLF_X
|| opnd->qualifier == AARCH64_OPND_QLF_SP);
snprintf (buf, size, "%s",
- get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
+ style_reg (styler, get_int_reg_name (opnd->reg.regno,
+ opnd->qualifier, 1)));
break;
case AARCH64_OPND_Rm_EXT:
{
/* Shifter omitted. */
snprintf (buf, size, "%s",
- get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
+ style_reg (styler,
+ get_int_reg_name (opnd->reg.regno,
+ opnd->qualifier, 0)));
break;
}
}
if (opnd->shifter.amount)
- snprintf (buf, size, "%s, %s #%" PRIi64,
- get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
- aarch64_operand_modifiers[kind].name,
- opnd->shifter.amount);
+ snprintf (buf, size, "%s, %s %s",
+ style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
+ style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
+ style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
else
snprintf (buf, size, "%s, %s",
- get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
- aarch64_operand_modifiers[kind].name);
+ style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
+ style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
break;
case AARCH64_OPND_Rm_SFT:
|| opnd->qualifier == AARCH64_OPND_QLF_X);
if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
snprintf (buf, size, "%s",
- get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
+ style_reg (styler, get_int_reg_name (opnd->reg.regno,
+ opnd->qualifier, 0)));
else
- snprintf (buf, size, "%s, %s #%" PRIi64,
- get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
- aarch64_operand_modifiers[opnd->shifter.kind].name,
- opnd->shifter.amount);
+ snprintf (buf, size, "%s, %s %s",
+ style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
+ style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
+ style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
break;
case AARCH64_OPND_Fd:
case AARCH64_OPND_SVE_Vd:
case AARCH64_OPND_SVE_Vm:
case AARCH64_OPND_SVE_Vn:
- snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
- opnd->reg.regno);
+ snprintf (buf, size, "%s",
+ style_reg (styler, "%s%d",
+ aarch64_get_qualifier_name (opnd->qualifier),
+ opnd->reg.regno));
break;
case AARCH64_OPND_Va:
case AARCH64_OPND_Vd:
case AARCH64_OPND_Vn:
case AARCH64_OPND_Vm:
- snprintf (buf, size, "v%d.%s", opnd->reg.regno,
- aarch64_get_qualifier_name (opnd->qualifier));
+ snprintf (buf, size, "%s",
+ style_reg (styler, "v%d.%s", opnd->reg.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)));
break;
case AARCH64_OPND_Ed:
case AARCH64_OPND_Em:
case AARCH64_OPND_Em16:
case AARCH64_OPND_SM3_IMM2:
- snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
- aarch64_get_qualifier_name (opnd->qualifier),
- opnd->reglane.index);
+ snprintf (buf, size, "%s[%s]",
+ style_reg (styler, "v%d.%s", opnd->reglane.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)),
+ style_imm (styler, "%" PRIi64, opnd->reglane.index));
break;
case AARCH64_OPND_VdD1:
case AARCH64_OPND_VnD1:
- snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
+ snprintf (buf, size, "%s[%s]",
+ style_reg (styler, "v%d.d", opnd->reg.regno),
+ style_imm (styler, "1"));
break;
case AARCH64_OPND_LVn:
case AARCH64_OPND_LVt:
case AARCH64_OPND_LVt_AL:
case AARCH64_OPND_LEt:
- print_register_list (buf, size, opnd, "v");
+ print_register_list (buf, size, opnd, "v", styler);
break;
case AARCH64_OPND_SVE_Pd:
case AARCH64_OPND_SVE_Pm:
case AARCH64_OPND_SVE_Pn:
case AARCH64_OPND_SVE_Pt:
+ case AARCH64_OPND_SME_Pm:
if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
- snprintf (buf, size, "p%d", opnd->reg.regno);
+ snprintf (buf, size, "%s",
+ style_reg (styler, "p%d", opnd->reg.regno));
else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
|| opnd->qualifier == AARCH64_OPND_QLF_P_M)
- snprintf (buf, size, "p%d/%s", opnd->reg.regno,
- aarch64_get_qualifier_name (opnd->qualifier));
+ snprintf (buf, size, "%s",
+ style_reg (styler, "p%d/%s", opnd->reg.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)));
else
- snprintf (buf, size, "p%d.%s", opnd->reg.regno,
- aarch64_get_qualifier_name (opnd->qualifier));
+ snprintf (buf, size, "%s",
+ style_reg (styler, "p%d.%s", opnd->reg.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)));
+ break;
+
+ case AARCH64_OPND_SVE_PNd:
+ case AARCH64_OPND_SVE_PNg4_10:
+ case AARCH64_OPND_SVE_PNn:
+ case AARCH64_OPND_SVE_PNt:
+ case AARCH64_OPND_SME_PNd3:
+ case AARCH64_OPND_SME_PNg3:
+ case AARCH64_OPND_SME_PNn:
+ if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
+ snprintf (buf, size, "%s",
+ style_reg (styler, "pn%d", opnd->reg.regno));
+ else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
+ || opnd->qualifier == AARCH64_OPND_QLF_P_M)
+ snprintf (buf, size, "%s",
+ style_reg (styler, "pn%d/%s", opnd->reg.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)));
+ else
+ snprintf (buf, size, "%s",
+ style_reg (styler, "pn%d.%s", opnd->reg.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)));
+ break;
+
+ case AARCH64_OPND_SME_Pdx2:
+ case AARCH64_OPND_SME_PdxN:
+ print_register_list (buf, size, opnd, "p", styler);
+ break;
+
+ case AARCH64_OPND_SME_PNn3_INDEX1:
+ case AARCH64_OPND_SME_PNn3_INDEX2:
+ snprintf (buf, size, "%s[%s]",
+ style_reg (styler, "pn%d", opnd->reglane.regno),
+ style_imm (styler, "%" PRIi64, opnd->reglane.index));
break;
case AARCH64_OPND_SVE_Za_5:
case AARCH64_OPND_SVE_Zm_16:
case AARCH64_OPND_SVE_Zn:
case AARCH64_OPND_SVE_Zt:
+ case AARCH64_OPND_SME_Zm:
if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
- snprintf (buf, size, "z%d", opnd->reg.regno);
+ snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
else
- snprintf (buf, size, "z%d.%s", opnd->reg.regno,
- aarch64_get_qualifier_name (opnd->qualifier));
+ snprintf (buf, size, "%s",
+ style_reg (styler, "z%d.%s", opnd->reg.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)));
break;
case AARCH64_OPND_SVE_ZnxN:
case AARCH64_OPND_SVE_ZtxN:
- print_register_list (buf, size, opnd, "z");
+ case AARCH64_OPND_SME_Zdnx2:
+ case AARCH64_OPND_SME_Zdnx4:
+ case AARCH64_OPND_SME_Zmx2:
+ case AARCH64_OPND_SME_Zmx4:
+ case AARCH64_OPND_SME_Znx2:
+ case AARCH64_OPND_SME_Znx4:
+ case AARCH64_OPND_SME_Ztx2_STRIDED:
+ case AARCH64_OPND_SME_Ztx4_STRIDED:
+ print_register_list (buf, size, opnd, "z", styler);
break;
case AARCH64_OPND_SVE_Zm3_INDEX:
case AARCH64_OPND_SVE_Zm3_22_INDEX:
+ case AARCH64_OPND_SVE_Zm3_19_INDEX:
case AARCH64_OPND_SVE_Zm3_11_INDEX:
case AARCH64_OPND_SVE_Zm4_11_INDEX:
case AARCH64_OPND_SVE_Zm4_INDEX:
case AARCH64_OPND_SVE_Zn_INDEX:
- snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
- aarch64_get_qualifier_name (opnd->qualifier),
- opnd->reglane.index);
+ case AARCH64_OPND_SME_Zm_INDEX1:
+ case AARCH64_OPND_SME_Zm_INDEX2:
+ case AARCH64_OPND_SME_Zm_INDEX3_1:
+ case AARCH64_OPND_SME_Zm_INDEX3_2:
+ case AARCH64_OPND_SME_Zm_INDEX3_10:
+ case AARCH64_OPND_SME_Zm_INDEX4_1:
+ case AARCH64_OPND_SME_Zm_INDEX4_10:
+ case AARCH64_OPND_SME_Zn_INDEX1_16:
+ case AARCH64_OPND_SME_Zn_INDEX2_15:
+ case AARCH64_OPND_SME_Zn_INDEX2_16:
+ case AARCH64_OPND_SME_Zn_INDEX3_14:
+ case AARCH64_OPND_SME_Zn_INDEX3_15:
+ case AARCH64_OPND_SME_Zn_INDEX4_14:
+ snprintf (buf, size, "%s[%s]",
+ (opnd->qualifier == AARCH64_OPND_QLF_NIL
+ ? style_reg (styler, "z%d", opnd->reglane.regno)
+ : style_reg (styler, "z%d.%s", opnd->reglane.regno,
+ aarch64_get_qualifier_name (opnd->qualifier))),
+ style_imm (styler, "%" PRIi64, opnd->reglane.index));
+ break;
+
+ case AARCH64_OPND_SME_ZAda_2b:
+ case AARCH64_OPND_SME_ZAda_3b:
+ snprintf (buf, size, "%s",
+ style_reg (styler, "za%d.%s", opnd->reg.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)));
+ break;
+
+ case AARCH64_OPND_SME_ZA_HV_idx_src:
+ case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
+ case AARCH64_OPND_SME_ZA_HV_idx_dest:
+ case AARCH64_OPND_SME_ZA_HV_idx_destxN:
+ case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
+ snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
+ opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
+ style_reg (styler, "za%d%c.%s",
+ opnd->indexed_za.regno,
+ opnd->indexed_za.v == 1 ? 'v' : 'h',
+ aarch64_get_qualifier_name (opnd->qualifier)),
+ style_reg (styler, "w%d", opnd->indexed_za.index.regno),
+ style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
+ opnd->indexed_za.index.countm1 ? ":" : "",
+ (opnd->indexed_za.index.countm1
+ ? style_imm (styler, "%d",
+ opnd->indexed_za.index.imm
+ + opnd->indexed_za.index.countm1)
+ : ""),
+ opnd->indexed_za.group_size ? ", " : "",
+ opnd->indexed_za.group_size == 2
+ ? style_sub_mnem (styler, "vgx2")
+ : opnd->indexed_za.group_size == 4
+ ? style_sub_mnem (styler, "vgx4") : "",
+ opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
+ break;
+
+ case AARCH64_OPND_SME_list_of_64bit_tiles:
+ print_sme_za_list (buf, size, opnd->reg.regno, styler);
+ break;
+
+ case AARCH64_OPND_SME_ZA_array_off1x4:
+ case AARCH64_OPND_SME_ZA_array_off2x2:
+ case AARCH64_OPND_SME_ZA_array_off2x4:
+ case AARCH64_OPND_SME_ZA_array_off3_0:
+ case AARCH64_OPND_SME_ZA_array_off3_5:
+ case AARCH64_OPND_SME_ZA_array_off3x2:
+ case AARCH64_OPND_SME_ZA_array_off4:
+ snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
+ style_reg (styler, "za%s%s",
+ opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
+ (opnd->qualifier == AARCH64_OPND_QLF_NIL
+ ? ""
+ : aarch64_get_qualifier_name (opnd->qualifier))),
+ style_reg (styler, "w%d", opnd->indexed_za.index.regno),
+ style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
+ opnd->indexed_za.index.countm1 ? ":" : "",
+ (opnd->indexed_za.index.countm1
+ ? style_imm (styler, "%d",
+ opnd->indexed_za.index.imm
+ + opnd->indexed_za.index.countm1)
+ : ""),
+ opnd->indexed_za.group_size ? ", " : "",
+ opnd->indexed_za.group_size == 2
+ ? style_sub_mnem (styler, "vgx2")
+ : opnd->indexed_za.group_size == 4
+ ? style_sub_mnem (styler, "vgx4") : "");
+ break;
+
+ case AARCH64_OPND_SME_SM_ZA:
+ snprintf (buf, size, "%s",
+ style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
+ break;
+
+ case AARCH64_OPND_SME_PnT_Wm_imm:
+ snprintf (buf, size, "%s[%s, %s]",
+ style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
+ aarch64_get_qualifier_name (opnd->qualifier)),
+ style_reg (styler, "w%d", opnd->indexed_za.index.regno),
+ style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
+ break;
+
+ case AARCH64_OPND_SME_VLxN_10:
+ case AARCH64_OPND_SME_VLxN_13:
+ enum_value = opnd->imm.value;
+ assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
+ snprintf (buf, size, "%s",
+ style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
break;
case AARCH64_OPND_CRn:
case AARCH64_OPND_CRm:
- snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_reg (styler, "C%" PRIi64, opnd->imm.value));
break;
case AARCH64_OPND_IDX:
case AARCH64_OPND_FBITS:
case AARCH64_OPND_TME_UIMM16:
case AARCH64_OPND_SIMM5:
+ case AARCH64_OPND_SME_SHRIMM4:
+ case AARCH64_OPND_SME_SHRIMM5:
case AARCH64_OPND_SVE_SHLIMM_PRED:
case AARCH64_OPND_SVE_SHLIMM_UNPRED:
case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
case AARCH64_OPND_SVE_IMM_ROT1:
case AARCH64_OPND_SVE_IMM_ROT2:
case AARCH64_OPND_SVE_IMM_ROT3:
- snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+ case AARCH64_OPND_CSSC_SIMM8:
+ case AARCH64_OPND_CSSC_UIMM8:
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#%" PRIi64, opnd->imm.value));
break;
case AARCH64_OPND_SVE_I1_HALF_ONE:
{
single_conv_t c;
c.i = opnd->imm.value;
- snprintf (buf, size, "#%.1f", c.f);
+ snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
break;
}
enum_value = opnd->imm.value;
assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
if (aarch64_sve_pattern_array[enum_value])
- snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
+ snprintf (buf, size, "%s",
+ style_reg (styler, aarch64_sve_pattern_array[enum_value]));
else
- snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#%" PRIi64, opnd->imm.value));
break;
case AARCH64_OPND_SVE_PATTERN_SCALED:
enum_value = opnd->imm.value;
assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
if (aarch64_sve_pattern_array[opnd->imm.value])
- snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
+ snprintf (buf, size, "%s",
+ style_reg (styler,
+ aarch64_sve_pattern_array[opnd->imm.value]));
else
- snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#%" PRIi64, opnd->imm.value));
if (opnd->shifter.operator_present)
{
size_t len = strlen (buf);
- snprintf (buf + len, size - len, ", %s #%" PRIi64,
- aarch64_operand_modifiers[opnd->shifter.kind].name,
- opnd->shifter.amount);
+ const char *shift_name
+ = aarch64_operand_modifiers[opnd->shifter.kind].name;
+ snprintf (buf + len, size - len, ", %s %s",
+ style_sub_mnem (styler, shift_name),
+ style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
}
break;
enum_value = opnd->imm.value;
assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
if (aarch64_sve_prfop_array[enum_value])
- snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
+ snprintf (buf, size, "%s",
+ style_reg (styler, aarch64_sve_prfop_array[enum_value]));
else
- snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#%" PRIi64, opnd->imm.value));
break;
case AARCH64_OPND_IMM_MOV:
case 4: /* e.g. MOV Wd, #<imm32>. */
{
int imm32 = opnd->imm.value;
- snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#0x%-20x", imm32));
+ snprintf (comment, comment_size, "#%d", imm32);
}
break;
case 8: /* e.g. MOV Xd, #<imm64>. */
- snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
- opnd->imm.value, opnd->imm.value);
+ snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
+ opnd->imm.value));
+ snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
+ break;
+ default:
+ snprintf (buf, size, "<invalid>");
break;
- default: assert (0);
}
break;
case AARCH64_OPND_FPIMM0:
- snprintf (buf, size, "#0.0");
+ snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
break;
case AARCH64_OPND_LIMM:
case AARCH64_OPND_SVE_LIMM:
case AARCH64_OPND_SVE_LIMM_MOV:
if (opnd->shifter.amount)
- snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
- opnd->shifter.amount);
+ snprintf (buf, size, "%s, %s %s",
+ style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
+ style_sub_mnem (styler, "lsl"),
+ style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
else
- snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
break;
case AARCH64_OPND_SIMD_IMM:
case AARCH64_OPND_SIMD_IMM_SFT:
if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
|| opnd->shifter.kind == AARCH64_MOD_NONE)
- snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
else
- snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
- aarch64_operand_modifiers[opnd->shifter.kind].name,
- opnd->shifter.amount);
+ snprintf (buf, size, "%s, %s %s",
+ style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
+ style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
+ style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
break;
case AARCH64_OPND_SVE_AIMM:
case AARCH64_OPND_SVE_ASIMM:
if (opnd->shifter.amount)
- snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
- opnd->shifter.amount);
+ snprintf (buf, size, "%s, %s %s",
+ style_imm (styler, "#%" PRIi64, opnd->imm.value),
+ style_sub_mnem (styler, "lsl"),
+ style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
else
- snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#%" PRIi64, opnd->imm.value));
break;
case AARCH64_OPND_FPIMM:
{
half_conv_t c;
c.i = expand_fp_imm (2, opnd->imm.value);
- snprintf (buf, size, "#%.18e", c.f);
+ snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
}
break;
case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
{
single_conv_t c;
c.i = expand_fp_imm (4, opnd->imm.value);
- snprintf (buf, size, "#%.18e", c.f);
+ snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
}
break;
case 8: /* e.g. FMOV <Sd>, #<imm>. */
{
double_conv_t c;
c.i = expand_fp_imm (8, opnd->imm.value);
- snprintf (buf, size, "#%.18e", c.d);
+ snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
}
break;
- default: assert (0);
+ default:
+ snprintf (buf, size, "<invalid>");
+ break;
}
break;
case AARCH64_OPND_UIMM4_ADDG:
case AARCH64_OPND_UIMM7:
case AARCH64_OPND_UIMM10:
- if (optional_operand_p (opcode, idx) == TRUE
+ if (optional_operand_p (opcode, idx)
&& (opnd->imm.value ==
(int64_t) get_optional_operand_default_value (opcode)))
/* Omit the operand, e.g. DCPS1. */
break;
- snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
break;
case AARCH64_OPND_COND:
case AARCH64_OPND_COND1:
- snprintf (buf, size, "%s", opnd->cond->names[0]);
+ snprintf (buf, size, "%s",
+ style_sub_mnem (styler, opnd->cond->names[0]));
num_conds = ARRAY_SIZE (opnd->cond->names);
for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
{
- size_t len = strlen (buf);
+ size_t len = comment != NULL ? strlen (comment) : 0;
if (i == 1)
- snprintf (buf + len, size - len, " // %s = %s",
+ snprintf (comment + len, comment_size - len, "%s = %s",
opnd->cond->names[0], opnd->cond->names[i]);
else
- snprintf (buf + len, size - len, ", %s",
+ snprintf (comment + len, comment_size - len, ", %s",
opnd->cond->names[i]);
}
break;
in the disassemble_info will take care of the printing. But some
other callers may be still interested in getting the string in *STR,
so here we do snprintf regardless. */
- snprintf (buf, size, "#0x%" PRIx64, addr);
+ snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
break;
case AARCH64_OPND_ADDR_PCREL14:
in the disassemble_info will take care of the printing. But some
other callers may be still interested in getting the string in *STR,
so here we do snprintf regardless. */
- snprintf (buf, size, "#0x%" PRIx64, addr);
+ snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
break;
case AARCH64_OPND_ADDR_SIMPLE:
if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
{
if (opnd->addr.offset.is_reg)
- snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
+ snprintf (buf, size, "[%s], %s",
+ style_reg (styler, name),
+ style_reg (styler, "x%d", opnd->addr.offset.regno));
else
- snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
+ snprintf (buf, size, "[%s], %s",
+ style_reg (styler, name),
+ style_imm (styler, "#%d", opnd->addr.offset.imm));
}
else
- snprintf (buf, size, "[%s]", name);
+ snprintf (buf, size, "[%s]", style_reg (styler, name));
break;
case AARCH64_OPND_ADDR_REGOFF:
case AARCH64_OPND_SVE_ADDR_RR_LSL1:
case AARCH64_OPND_SVE_ADDR_RR_LSL2:
case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL4:
case AARCH64_OPND_SVE_ADDR_RX:
case AARCH64_OPND_SVE_ADDR_RX_LSL1:
case AARCH64_OPND_SVE_ADDR_RX_LSL2:
case AARCH64_OPND_SVE_ADDR_RX_LSL3:
print_register_offset_address
(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
- get_offset_int_reg_name (opnd));
+ get_offset_int_reg_name (opnd), styler);
break;
case AARCH64_OPND_SVE_ADDR_ZX:
print_register_offset_address
(buf, size, opnd,
get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
- get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
+ get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
break;
case AARCH64_OPND_SVE_ADDR_RZ:
case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
print_register_offset_address
(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
- get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
+ get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
+ styler);
break;
case AARCH64_OPND_ADDR_SIMM7:
case AARCH64_OPND_ADDR_SIMM11:
case AARCH64_OPND_ADDR_SIMM13:
case AARCH64_OPND_ADDR_OFFSET:
+ case AARCH64_OPND_SME_ADDR_RI_U4xVL:
case AARCH64_OPND_SVE_ADDR_RI_S4x16:
case AARCH64_OPND_SVE_ADDR_RI_S4x32:
case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
case AARCH64_OPND_SVE_ADDR_RI_U6x4:
case AARCH64_OPND_SVE_ADDR_RI_U6x8:
print_immediate_offset_address
- (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
+ (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
+ styler);
break;
case AARCH64_OPND_SVE_ADDR_ZI_U5:
case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
print_immediate_offset_address
(buf, size, opnd,
- get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
+ get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
+ styler);
break;
case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
print_register_offset_address
(buf, size, opnd,
get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
- get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
+ get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
+ styler);
break;
case AARCH64_OPND_ADDR_UIMM12:
name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
if (opnd->addr.offset.imm)
- snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
+ snprintf (buf, size, "[%s, %s]",
+ style_reg (styler, name),
+ style_imm (styler, "#%d", opnd->addr.offset.imm));
else
- snprintf (buf, size, "[%s]", name);
+ snprintf (buf, size, "[%s]", style_reg (styler, name));
break;
case AARCH64_OPND_SYSREG:
{
const aarch64_sys_reg *sr = aarch64_sys_regs + i;
- bfd_boolean exact_match
+ bool exact_match
= (!(sr->flags & (F_REG_READ | F_REG_WRITE))
|| (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
&& AARCH64_CPU_HAS_FEATURE (features, sr->features);
}
if (name)
- snprintf (buf, size, "%s", name);
+ snprintf (buf, size, "%s", style_reg (styler, name));
else
{
/* Implementation defined system register. */
unsigned int value = opnd->sysreg.value;
- snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
- (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
- value & 0x7);
+ snprintf (buf, size, "%s",
+ style_reg (styler, "s%u_%u_c%u_c%u_%u",
+ (value >> 14) & 0x3, (value >> 11) & 0x7,
+ (value >> 7) & 0xf, (value >> 3) & 0xf,
+ value & 0x7));
}
break;
case AARCH64_OPND_PSTATEFIELD:
for (i = 0; aarch64_pstatefields[i].name; ++i)
- if (aarch64_pstatefields[i].value == opnd->pstatefield)
- break;
+ if (aarch64_pstatefields[i].value == opnd->pstatefield)
+ {
+ /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
+ SVCRZA and SVCRSMZA. */
+ uint32_t flags = aarch64_pstatefields[i].flags;
+ if (flags & F_REG_IN_CRM
+ && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
+ != PSTATE_DECODE_CRM (flags)))
+ continue;
+ break;
+ }
assert (aarch64_pstatefields[i].name);
- snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
+ snprintf (buf, size, "%s",
+ style_reg (styler, aarch64_pstatefields[i].name));
break;
case AARCH64_OPND_SYSREG_AT:
case AARCH64_OPND_SYSREG_IC:
case AARCH64_OPND_SYSREG_TLBI:
case AARCH64_OPND_SYSREG_SR:
- snprintf (buf, size, "%s", opnd->sysins_op->name);
+ snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
break;
case AARCH64_OPND_BARRIER:
case AARCH64_OPND_BARRIER_DSB_NXS:
- snprintf (buf, size, "%s", opnd->barrier->name);
+ {
+ if (opnd->barrier->name[0] == '#')
+ snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
+ else
+ snprintf (buf, size, "%s",
+ style_sub_mnem (styler, opnd->barrier->name));
+ }
break;
case AARCH64_OPND_BARRIER_ISB:
if (! optional_operand_p (opcode, idx)
|| (opnd->barrier->value
!= get_optional_operand_default_value (opcode)))
- snprintf (buf, size, "#0x%x", opnd->barrier->value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#0x%x", opnd->barrier->value));
break;
case AARCH64_OPND_PRFOP:
if (opnd->prfop->name != NULL)
- snprintf (buf, size, "%s", opnd->prfop->name);
+ snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
+ else
+ snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
+ opnd->prfop->value));
+ break;
+
+ case AARCH64_OPND_RPRFMOP:
+ enum_value = opnd->imm.value;
+ if (enum_value < ARRAY_SIZE (aarch64_rprfmop_array)
+ && aarch64_rprfmop_array[enum_value])
+ snprintf (buf, size, "%s",
+ style_reg (styler, aarch64_rprfmop_array[enum_value]));
else
- snprintf (buf, size, "#0x%02x", opnd->prfop->value);
+ snprintf (buf, size, "%s",
+ style_imm (styler, "#%" PRIi64, opnd->imm.value));
break;
case AARCH64_OPND_BARRIER_PSB:
- snprintf (buf, size, "csync");
+ snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
+ break;
+
+ case AARCH64_OPND_SME_ZT0:
+ snprintf (buf, size, "%s", style_reg (styler, "zt0"));
+ break;
+
+ case AARCH64_OPND_SME_ZT0_INDEX:
+ snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
+ style_imm (styler, "%d", (int) opnd->imm.value));
+ break;
+
+ case AARCH64_OPND_SME_ZT0_LIST:
+ snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
break;
case AARCH64_OPND_BTI_TARGET:
if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
- snprintf (buf, size, "%s", opnd->hint_option->name);
+ snprintf (buf, size, "%s",
+ style_sub_mnem (styler, opnd->hint_option->name));
+ break;
+
+ case AARCH64_OPND_MOPS_ADDR_Rd:
+ case AARCH64_OPND_MOPS_ADDR_Rs:
+ snprintf (buf, size, "[%s]!",
+ style_reg (styler,
+ get_int_reg_name (opnd->reg.regno,
+ AARCH64_OPND_QLF_X, 0)));
break;
- case AARCH64_OPND_CSRE_CSR:
- snprintf (buf, size, "pdec");
+ case AARCH64_OPND_MOPS_WB_Rn:
+ snprintf (buf, size, "%s!",
+ style_reg (styler, get_int_reg_name (opnd->reg.regno,
+ AARCH64_OPND_QLF_X, 0)));
break;
default:
- assert (0);
+ snprintf (buf, size, "<invalid>");
+ break;
}
}
\f
SYSREG ((n), (e), (f) | F_ARCHEXT, \
AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
-#define SR_RNG(n,e,f) SR_FEAT2(n,e,f,RNG,V8_5)
#define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
#define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
#define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
#define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
#define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
-#define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
+#define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
+#define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
+#define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
+/* Has no separate libopcodes feature flag, but separated out for clarity. */
+#define SR_GIC(n,e,f) SR_CORE (n,e,f)
+/* Has no separate libopcodes feature flag, but separated out for clarity. */
+#define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
+#define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
#define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
#define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
+#define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
+#define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
#define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
#define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
#define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
+ SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
+ SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
+ SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
+ SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
+ SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
- SR_V8_2 ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
+ SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
- SR_SVE ("zidr_el1", CPENC (3,0,C0,C0,7), 0),
SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
- SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), 0),
+ SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
+ SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
+ SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
+ SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
+ SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
- SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
- SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
- SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
- SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
- SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
-
SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
- SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
+ SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
+ SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
+ SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
+ SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
+ SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
+ SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
+
+ SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
+ SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
+ SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
+ SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
+ SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
+
SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
+ SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
+ SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
+ SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
+
+ SR_CORE ("mecidr_el2", CPENC (3,4,C10,C8,7), F_REG_READ),
+ SR_CORE ("mecid_p0_el2", CPENC (3,4,C10,C8,0), 0),
+ SR_CORE ("mecid_a0_el2", CPENC (3,4,C10,C8,1), 0),
+ SR_CORE ("mecid_p1_el2", CPENC (3,4,C10,C8,2), 0),
+ SR_CORE ("mecid_a1_el2", CPENC (3,4,C10,C8,3), 0),
+ SR_CORE ("vmecid_p_el2", CPENC (3,4,C10,C9,0), 0),
+ SR_CORE ("vmecid_a_el2", CPENC (3,4,C10,C9,1), 0),
+ SR_CORE ("mecid_rl_a_el3",CPENC (3,6,C10,C10,1), 0),
+
+ SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
+ SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
+ SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
+ SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
+ SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
+ SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
+ SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
+ SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
+ SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
+ SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
+ SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
+
+ SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
+ SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
+ SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
+ SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
+ SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
+ SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
+ SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
+ SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
+ SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
+ SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
+ SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
+ SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
+ SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
+ SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
+ SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
+ SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
+ SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
+ SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
+ SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
+ SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
+ SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
+ SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
+ SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
+ SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
+ SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
+ SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
+ SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
+ SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
+ SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
+ SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
+ SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
+ SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
+ SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
+ SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
+ SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
+ SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
+ SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
+ SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
+ SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
+ SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
+ SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
+ SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
+ SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
+ SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
+ SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
+ SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
+ SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
+ SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
+
+ SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
+ SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
+ SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
+ SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
+ SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
+ SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
+ SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
+ SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
+ SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
+ SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
+ SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
+ SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
+ SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
+ SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
+ SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
+ SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
+ SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
+ SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
+ SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
+ SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
+ SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
+ SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
+ SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
+ SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
+ SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
+ SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
+ SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
+ SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
+ SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
+ SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
+ SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
+ SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
+ SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
+ SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
+ SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
+ SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
+ SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
+ SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
+ SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
+ SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
+ SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
+ SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
+ SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
+ SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
+ SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
+ SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
+ SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
+ SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
+ SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
+ SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
+ SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
+ SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
+ SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
+ SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
+ SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
+
+ SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
+ SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
+ SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
+ SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
+ SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
+ SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
+ SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
+ SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
+ SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
+ SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
+ SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
+ SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
+ SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
+ SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
+ SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
+ SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
+ SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
+ SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
+ SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
+ SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
+ SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
+ SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
+ SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
+ SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
+ SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
+ SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
+ SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
+ SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
+ SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
+ SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
+ SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
+ SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
+ SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
+ SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
+ SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
+ SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
+ SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
+ SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
+ SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
+ SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
+ SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
+ SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
+
+ SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
+ SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
+
+ SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
+ SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
+
{ 0, CPENC (0,0,0,0,0), 0, 0 }
};
-bfd_boolean
+bool
aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
{
return (reg_flags & F_DEPRECATED) != 0;
0b011010 (0x1a). */
const aarch64_sys_reg aarch64_pstatefields [] =
{
- SR_CORE ("spsel", 0x05, 0),
- SR_CORE ("daifset", 0x1e, 0),
- SR_CORE ("daifclr", 0x1f, 0),
- SR_PAN ("pan", 0x04, 0),
- SR_V8_2 ("uao", 0x03, 0),
- SR_SSBS ("ssbs", 0x19, 0),
- SR_V8_4 ("dit", 0x1a, 0),
- SR_MEMTAG ("tco", 0x1c, 0),
+ SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
+ SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
+ SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
+ SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
+ SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
+ SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
+ SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
+ SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
+ SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
+ | F_REG_MAX_VALUE (1)),
+ SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
+ | F_REG_MAX_VALUE (1)),
+ SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
+ | F_REG_MAX_VALUE (1)),
+ SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
{ 0, CPENC (0,0,0,0,0), 0, 0 },
};
-bfd_boolean
+bool
aarch64_pstatefield_supported_p (const aarch64_feature_set features,
const aarch64_sys_reg *reg)
{
if (!(reg->flags & F_ARCHEXT))
- return TRUE;
+ return true;
return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
}
{ "cisw", CPENS (0, C7, C14, 2), F_HASXT },
{ "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
{ "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
+ { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
+ { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
{ 0, CPENS(0,0,0,0), 0 }
};
{ "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
{ "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
+ { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
+ { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
+ { "paallos", CPENS (6, C8, C1, 4), 0},
+ { "paall", CPENS (6, C8, C7, 4), 0},
+
{ 0, CPENS(0,0,0,0), 0 }
};
{ 0, CPENS(0,0,0,0), 0 }
};
-bfd_boolean
+bool
aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
{
return (sys_ins_reg->flags & F_HASXT) != 0;
}
-extern bfd_boolean
+extern bool
aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
const char *reg_name,
aarch64_insn reg_value,
{
const char *suffix = strrchr (reg_name, '_');
if (suffix && !strcmp (suffix, "_el3"))
- return FALSE;
+ return false;
}
if (!(reg_flags & F_ARCHEXT))
- return TRUE;
+ return true;
if (reg_features
&& AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
- return TRUE;
+ return true;
/* ARMv8.4 TLB instructions. */
if ((reg_value == CPENS (0, C8, C1, 0)
|| reg_value == CPENS (6, C8, C5, 1)
|| reg_value == CPENS (6, C8, C5, 5))
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
- return TRUE;
+ return true;
/* DC CVAP. Values are from aarch64_sys_regs_dc. */
if (reg_value == CPENS (3, C7, C12, 1)
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
- return TRUE;
+ return true;
/* DC CVADP. Values are from aarch64_sys_regs_dc. */
if (reg_value == CPENS (3, C7, C13, 1)
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
- return TRUE;
+ return true;
/* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
if ((reg_value == CPENS (0, C7, C6, 3)
|| reg_value == CPENS (3, C7, C14, 5)
|| reg_value == CPENS (3, C7, C4, 4))
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
- return TRUE;
+ return true;
/* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
if ((reg_value == CPENS (0, C7, C9, 0)
|| reg_value == CPENS (0, C7, C9, 1))
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
- return TRUE;
+ return true;
/* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
if (reg_value == CPENS (3, C7, C3, 0)
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
#undef C0
static enum err_type
verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
- bfd_boolean encoding ATTRIBUTE_UNUSED,
+ bool encoding ATTRIBUTE_UNUSED,
aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
{
static enum err_type
verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
- bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
+ bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
{
return ERR_OK;
}
+/* Check an instruction that takes three register operands and that
+ requires the register numbers to be distinct from one another. */
+
+static enum err_type
+verify_three_different_regs (const struct aarch64_inst *inst,
+ const aarch64_insn insn ATTRIBUTE_UNUSED,
+ bfd_vma pc ATTRIBUTE_UNUSED,
+ bool encoding ATTRIBUTE_UNUSED,
+ aarch64_operand_error *mismatch_detail
+ ATTRIBUTE_UNUSED,
+ aarch64_instr_sequence *insn_sequence
+ ATTRIBUTE_UNUSED)
+{
+ int rd, rs, rn;
+
+ rd = inst->operands[0].reg.regno;
+ rs = inst->operands[1].reg.regno;
+ rn = inst->operands[2].reg.regno;
+ if (rd == rs || rd == rn || rs == rn)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error
+ = _("the three register operands must be distinct from one another");
+ mismatch_detail->index = -1;
+ return ERR_UND;
+ }
+
+ return ERR_OK;
+}
+
+/* Add INST to the end of INSN_SEQUENCE. */
+
+static void
+add_insn_to_sequence (const struct aarch64_inst *inst,
+ aarch64_instr_sequence *insn_sequence)
+{
+ insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
+}
+
/* Initialize an instruction sequence insn_sequence with the instruction INST.
If INST is NULL the given insn_sequence is cleared and the sequence is left
uninitialized. */
aarch64_instr_sequence *insn_sequence)
{
int num_req_entries = 0;
- insn_sequence->next_insn = 0;
- insn_sequence->num_insns = num_req_entries;
- if (insn_sequence->instr)
- XDELETE (insn_sequence->instr);
- insn_sequence->instr = NULL;
- if (inst)
+ if (insn_sequence->instr)
{
- insn_sequence->instr = XNEW (aarch64_inst);
- memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
+ XDELETE (insn_sequence->instr);
+ insn_sequence->instr = NULL;
}
/* Handle all the cases here. May need to think of something smarter than
best. */
if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
num_req_entries = 1;
+ if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
+ num_req_entries = 2;
- if (insn_sequence->current_insns)
- XDELETEVEC (insn_sequence->current_insns);
- insn_sequence->current_insns = NULL;
+ insn_sequence->num_added_insns = 0;
+ insn_sequence->num_allocated_insns = num_req_entries;
if (num_req_entries != 0)
{
- size_t size = num_req_entries * sizeof (aarch64_inst);
- insn_sequence->current_insns
- = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
- memset (insn_sequence->current_insns, 0, size);
+ insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
+ add_insn_to_sequence (inst, insn_sequence);
}
}
+/* Subroutine of verify_constraints. Check whether the instruction
+ is part of a MOPS P/M/E sequence and, if so, whether sequencing
+ expectations are met. Return true if the check passes, otherwise
+ describe the problem in MISMATCH_DETAIL.
+
+ IS_NEW_SECTION is true if INST is assumed to start a new section.
+ The other arguments are as for verify_constraints. */
+
+static bool
+verify_mops_pme_sequence (const struct aarch64_inst *inst,
+ bool is_new_section,
+ aarch64_operand_error *mismatch_detail,
+ aarch64_instr_sequence *insn_sequence)
+{
+ const struct aarch64_opcode *opcode;
+ const struct aarch64_inst *prev_insn;
+ int i;
+
+ opcode = inst->opcode;
+ if (insn_sequence->instr)
+ prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
+ else
+ prev_insn = NULL;
+
+ if (prev_insn
+ && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
+ && prev_insn->opcode != opcode - 1)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
+ mismatch_detail->error = NULL;
+ mismatch_detail->index = -1;
+ mismatch_detail->data[0].s = prev_insn->opcode[1].name;
+ mismatch_detail->data[1].s = prev_insn->opcode->name;
+ mismatch_detail->non_fatal = true;
+ return false;
+ }
+
+ if (opcode->constraints & C_SCAN_MOPS_PME)
+ {
+ if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
+ mismatch_detail->error = NULL;
+ mismatch_detail->index = -1;
+ mismatch_detail->data[0].s = opcode->name;
+ mismatch_detail->data[1].s = opcode[-1].name;
+ mismatch_detail->non_fatal = true;
+ return false;
+ }
+
+ for (i = 0; i < 3; ++i)
+ /* There's no specific requirement for the data register to be
+ the same between consecutive SET* instructions. */
+ if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
+ || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
+ || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
+ && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
+ mismatch_detail->error = _("destination register differs from "
+ "preceding instruction");
+ else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
+ mismatch_detail->error = _("source register differs from "
+ "preceding instruction");
+ else
+ mismatch_detail->error = _("size register differs from "
+ "preceding instruction");
+ mismatch_detail->index = i;
+ mismatch_detail->non_fatal = true;
+ return false;
+ }
+ }
+
+ return true;
+}
/* This function verifies that the instruction INST adheres to its specified
constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
verify_constraints (const struct aarch64_inst *inst,
const aarch64_insn insn ATTRIBUTE_UNUSED,
bfd_vma pc,
- bfd_boolean encoding,
+ bool encoding,
aarch64_operand_error *mismatch_detail,
aarch64_instr_sequence *insn_sequence)
{
mismatch_detail->error = _("instruction opens new dependency "
"sequence without ending previous one");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
}
return res;
}
+ bool is_new_section = (!encoding && pc == 0);
+ if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
+ insn_sequence))
+ {
+ res = ERR_VFI;
+ if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
+ init_insn_sequence (NULL, insn_sequence);
+ }
+
/* Verify constraints on an existing sequence. */
if (insn_sequence->instr)
{
const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
/* If we're decoding and we hit PC=0 with an open sequence then we haven't
closed a previous one that we should have. */
- if (!encoding && pc == 0)
+ if (is_new_section && res == ERR_OK)
{
mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
mismatch_detail->error = _("previous `movprfx' sequence not closed");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
/* Reset the sequence. */
init_insn_sequence (NULL, insn_sequence);
mismatch_detail->error = _("SVE instruction expected after "
"`movprfx'");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
mismatch_detail->error = _("SVE `movprfx' compatible instruction "
"expected");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
aarch64_opnd_info blk_pred, inst_pred;
memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
- bfd_boolean predicated = FALSE;
+ bool predicated = false;
assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
/* Determine if the movprfx instruction used is predicated or not. */
if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
{
- predicated = TRUE;
+ predicated = true;
blk_pred = insn_sequence->instr->operands[1];
}
case AARCH64_OPND_SVE_Pm:
case AARCH64_OPND_SVE_Pn:
case AARCH64_OPND_SVE_Pt:
+ case AARCH64_OPND_SME_Pm:
inst_pred = inst_op;
inst_pred_idx = i;
break;
mismatch_detail->error = _("predicated instruction expected "
"after `movprfx'");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
mismatch_detail->error = _("merging predicate expected due "
"to preceding `movprfx'");
mismatch_detail->index = inst_pred_idx;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
"from that in preceding "
"`movprfx'");
mismatch_detail->index = inst_pred_idx;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
"`movprfx' not used in current "
"instruction");
mismatch_detail->index = 0;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
mismatch_detail->error = _("output register of preceding "
"`movprfx' expected as output");
mismatch_detail->index = 0;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
mismatch_detail->error = _("output register of preceding "
"`movprfx' used as input");
mismatch_detail->index = last_op_usage;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
mismatch_detail->error = _("register size not compatible with "
"previous `movprfx'");
mismatch_detail->index = 0;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
}
done:
- /* Add the new instruction to the sequence. */
- memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
- inst, sizeof (aarch64_inst));
-
- /* Check if sequence is now full. */
- if (insn_sequence->next_insn >= insn_sequence->num_insns)
- {
- /* Sequence is full, but we don't have anything special to do for now,
- so clear and reset it. */
- init_insn_sequence (NULL, insn_sequence);
- }
+ if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
+ /* We've checked the last instruction in the sequence and so
+ don't need the sequence any more. */
+ init_insn_sequence (NULL, insn_sequence);
+ else
+ add_insn_to_sequence (inst, insn_sequence);
}
return res;
(with any element size, not just ESIZE) and if using DUPM would
therefore be OK. ESIZE is the number of bytes in the immediate. */
-bfd_boolean
+bool
aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
{
int64_t svalue = uvalue;
uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
- return FALSE;
+ return false;
if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
{
svalue = (int32_t) uvalue;
{
svalue = (int16_t) uvalue;
if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
- return FALSE;
+ return false;
}
}
if ((svalue & 0xff) == 0)
return svalue < -128 || svalue >= 128;
}
+/* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
+ supports the instruction described by INST. */
+
+bool
+aarch64_cpu_supports_inst_p (uint64_t cpu_variant, aarch64_inst *inst)
+{
+ if (!inst->opcode->avariant
+ || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
+ return false;
+
+ if (inst->opcode->iclass == sme_fp_sd
+ && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
+ && !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant,
+ AARCH64_FEATURE_SME_F64F64))
+ return false;
+
+ if (inst->opcode->iclass == sme_int_sd
+ && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
+ && !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant,
+ AARCH64_FEATURE_SME_I16I64))
+ return false;
+
+ return true;
+}
+
/* Include the opcode description table as well as the operand description
table. */
#define VERIFIER(x) verify_##x