+2016-09-21 Richard Sandiford <richard.sandiford@arm.com>
+
+ * opcode/aarch64.h (sve_cpy, sve_index, sve_limm, sve_misc)
+ (sve_movprfx, sve_pred_zm, sve_shift_pred, sve_shift_unpred)
+ (sve_size_bhs, sve_size_bhsd, sve_size_hsd, sve_size_sd): New
+ aarch64_insn_classes.
+
2016-09-21 Richard Sandiford <richard.sandiford@arm.com>
* opcode/aarch64.h (AARCH64_OPND_SVE_Rm): New aarch64_opnd.
movewide,
pcreladdr,
ic_system,
+ sve_cpy,
+ sve_index,
+ sve_limm,
+ sve_misc,
+ sve_movprfx,
+ sve_pred_zm,
+ sve_shift_pred,
+ sve_shift_unpred,
+ sve_size_bhs,
+ sve_size_bhsd,
+ sve_size_hsd,
+ sve_size_sd,
testbranch,
};
+2016-09-21 Richard Sandiford <richard.sandiford@arm.com>
+
+ * aarch64-opc.h (FLD_SVE_M_4, FLD_SVE_M_14, FLD_SVE_M_16)
+ (FLD_SVE_sz, FLD_SVE_tsz, FLD_SVE_tszl_8, FLD_SVE_tszl_19): New
+ aarch64_field_kinds.
+ * aarch64-opc.c (fields): Add corresponding entries.
+ * aarch64-asm.c (aarch64_get_variant): New function.
+ (aarch64_encode_variant_using_iclass): Likewise.
+ (aarch64_opcode_encode): Call it.
+ * aarch64-dis.c (aarch64_decode_variant_using_iclass): New function.
+ (aarch64_opcode_decode): Call it.
+
2016-09-21 Richard Sandiford <richard.sandiford@arm.com>
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE core
return;
}
+/* Return the index in qualifiers_list that INST is using. Should only
+ be called once the qualifiers are known to be valid. */
+
+static int
+aarch64_get_variant (struct aarch64_inst *inst)
+{
+ int i, nops, variant;
+
+ nops = aarch64_num_of_operands (inst->opcode);
+ for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
+ {
+ for (i = 0; i < nops; ++i)
+ if (inst->opcode->qualifiers_list[variant][i]
+ != inst->operands[i].qualifier)
+ break;
+ if (i == nops)
+ return variant;
+ }
+ abort ();
+}
+
/* Do miscellaneous encodings that are not common enough to be driven by
flags. */
DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
}
+/* Some instructions (including all SVE ones) use the instruction class
+ to describe how a qualifiers_list index is represented in the instruction
+ encoding. If INST is such an instruction, encode the chosen qualifier
+ variant. */
+
+static void
+aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
+{
+ switch (inst->opcode->iclass)
+ {
+ case sve_cpy:
+ insert_fields (&inst->value, aarch64_get_variant (inst),
+ 0, 2, FLD_SVE_M_14, FLD_size);
+ break;
+
+ case sve_index:
+ case sve_shift_pred:
+ case sve_shift_unpred:
+ /* For indices and shift amounts, the variant is encoded as
+ part of the immediate. */
+ break;
+
+ case sve_limm:
+ /* For sve_limm, the .B, .H, and .S forms are just a convenience
+ and depend on the immediate. They don't have a separate
+ encoding. */
+ break;
+
+ case sve_misc:
+ /* sve_misc instructions have only a single variant. */
+ break;
+
+ case sve_movprfx:
+ insert_fields (&inst->value, aarch64_get_variant (inst),
+ 0, 2, FLD_SVE_M_16, FLD_size);
+ break;
+
+ case sve_pred_zm:
+ insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
+ break;
+
+ case sve_size_bhs:
+ case sve_size_bhsd:
+ insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
+ break;
+
+ case sve_size_hsd:
+ insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
+ break;
+
+ case sve_size_sd:
+ insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
+ break;
+
+ default:
+ break;
+ }
+}
+
/* Converters converting an alias opcode instruction to its real form. */
/* ROR <Wd>, <Ws>, #<shift>
if (opcode_has_special_coder (opcode))
do_special_encoding (inst);
+ /* Possibly use the instruction class to encode the chosen qualifier
+ variant. */
+ aarch64_encode_variant_using_iclass (inst);
+
encoding_exit:
DEBUG_TRACE ("exit with %s", opcode->name);
}
}
+/* Some instructions (including all SVE ones) use the instruction class
+ to describe how a qualifiers_list index is represented in the instruction
+ encoding. If INST is such an instruction, decode the appropriate fields
+ and fill in the operand qualifiers accordingly. Return true if no
+ problems are found. */
+
+static bfd_boolean
+aarch64_decode_variant_using_iclass (aarch64_inst *inst)
+{
+ int i, variant;
+
+ variant = 0;
+ switch (inst->opcode->iclass)
+ {
+ case sve_cpy:
+ variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
+ break;
+
+ case sve_index:
+ i = extract_field (FLD_SVE_tsz, inst->value, 0);
+ if (i == 0)
+ return FALSE;
+ while ((i & 1) == 0)
+ {
+ i >>= 1;
+ variant += 1;
+ }
+ break;
+
+ case sve_limm:
+ /* Pick the smallest applicable element size. */
+ if ((inst->value & 0x20600) == 0x600)
+ variant = 0;
+ else if ((inst->value & 0x20400) == 0x400)
+ variant = 1;
+ else if ((inst->value & 0x20000) == 0)
+ variant = 2;
+ else
+ variant = 3;
+ break;
+
+ case sve_misc:
+ /* sve_misc instructions have only a single variant. */
+ break;
+
+ case sve_movprfx:
+ variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
+ break;
+
+ case sve_pred_zm:
+ variant = extract_field (FLD_SVE_M_4, inst->value, 0);
+ break;
+
+ case sve_shift_pred:
+ i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
+ sve_shift:
+ if (i == 0)
+ return FALSE;
+ while (i != 1)
+ {
+ i >>= 1;
+ variant += 1;
+ }
+ break;
+
+ case sve_shift_unpred:
+ i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
+ goto sve_shift;
+
+ case sve_size_bhs:
+ variant = extract_field (FLD_size, inst->value, 0);
+ if (variant >= 3)
+ return FALSE;
+ break;
+
+ case sve_size_bhsd:
+ variant = extract_field (FLD_size, inst->value, 0);
+ break;
+
+ case sve_size_hsd:
+ i = extract_field (FLD_size, inst->value, 0);
+ if (i < 1)
+ return FALSE;
+ variant = i - 1;
+ break;
+
+ case sve_size_sd:
+ variant = extract_field (FLD_SVE_sz, inst->value, 0);
+ break;
+
+ default:
+ /* No mapping between instruction class and qualifiers. */
+ return TRUE;
+ }
+
+ for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
+ inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
+ return TRUE;
+}
/* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
fails, which meanes that CODE is not an instruction of OPCODE; otherwise
return 1.
goto decode_fail;
}
+ /* Possibly use the instruction class to determine the correct
+ qualifier. */
+ if (!aarch64_decode_variant_using_iclass (inst))
+ {
+ DEBUG_TRACE ("iclass-based decoder FAIL");
+ goto decode_fail;
+ }
+
/* Call operand decoders. */
for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
{
{ 31, 1 }, /* b5: in the test bit and branch instructions. */
{ 19, 5 }, /* b40: in the test bit and branch instructions. */
{ 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
+ { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
+ { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
+ { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
{ 17, 1 }, /* SVE_N: SVE equivalent of N. */
{ 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
{ 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
{ 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
{ 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
{ 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
+ { 22, 1 }, /* SVE_sz: 1-bit element size select. */
+ { 16, 4 }, /* SVE_tsz: triangular size select. */
{ 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
+ { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
+ { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
{ 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
{ 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
};
FLD_b5,
FLD_b40,
FLD_scale,
+ FLD_SVE_M_4,
+ FLD_SVE_M_14,
+ FLD_SVE_M_16,
FLD_SVE_N,
FLD_SVE_Pd,
FLD_SVE_Pg3,
FLD_SVE_msz,
FLD_SVE_pattern,
FLD_SVE_prfop,
+ FLD_SVE_sz,
+ FLD_SVE_tsz,
FLD_SVE_tszh,
+ FLD_SVE_tszl_8,
+ FLD_SVE_tszl_19,
FLD_SVE_xs_14,
FLD_SVE_xs_22,
};