Similar to the previous scalar_int_mode patch.
2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
gcc/
* config/aarch64/aarch64-protos.h (aarch64_gen_adjusted_ldpstp):
Take a scalar_mode rather than a machine_mode.
(aarch64_operands_adjust_ok_for_ldpstp): Likewise.
* config/aarch64/aarch64.c (aarch64_simd_container_mode): Likewise.
(aarch64_operands_adjust_ok_for_ldpstp): Likewise.
(aarch64_gen_adjusted_ldpstp): Likewise.
(aarch64_expand_vector_init): Use scalar_mode instead of machine_mode.
Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>
From-SVN: r251736
+2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * config/aarch64/aarch64-protos.h (aarch64_gen_adjusted_ldpstp):
+ Take a scalar_mode rather than a machine_mode.
+ (aarch64_operands_adjust_ok_for_ldpstp): Likewise.
+ * config/aarch64/aarch64.c (aarch64_simd_container_mode): Likewise.
+ (aarch64_operands_adjust_ok_for_ldpstp): Likewise.
+ (aarch64_gen_adjusted_ldpstp): Likewise.
+ (aarch64_expand_vector_init): Use scalar_mode instead of machine_mode.
+
2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
void aarch64_gen_atomic_ldop (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
-bool aarch64_gen_adjusted_ldpstp (rtx *, bool, machine_mode, RTX_CODE);
+bool aarch64_gen_adjusted_ldpstp (rtx *, bool, scalar_mode, RTX_CODE);
#endif /* RTX_CODE */
void aarch64_init_builtins (void);
bool extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset);
bool aarch64_operands_ok_for_ldpstp (rtx *, bool, machine_mode);
-bool aarch64_operands_adjust_ok_for_ldpstp (rtx *, bool, machine_mode);
+bool aarch64_operands_adjust_ok_for_ldpstp (rtx *, bool, scalar_mode);
extern void aarch64_asm_output_pool_epilogue (FILE *, const char *,
tree, HOST_WIDE_INT);
int misalignment,
bool is_packed);
static machine_mode
-aarch64_simd_container_mode (machine_mode mode, unsigned width);
+aarch64_simd_container_mode (scalar_mode mode, unsigned width);
/* Major revision number of the ARM Architecture implemented by the target. */
unsigned aarch64_architecture_version;
/* Return appropriate SIMD container
for MODE within a vector of WIDTH bits. */
static machine_mode
-aarch64_simd_container_mode (machine_mode mode, unsigned width)
+aarch64_simd_container_mode (scalar_mode mode, unsigned width)
{
gcc_assert (width == 64 || width == 128);
if (TARGET_SIMD)
aarch64_expand_vector_init (rtx target, rtx vals)
{
machine_mode mode = GET_MODE (target);
- machine_mode inner_mode = GET_MODE_INNER (mode);
+ scalar_mode inner_mode = GET_MODE_INNER (mode);
/* The number of vector elements. */
int n_elts = GET_MODE_NUNITS (mode);
/* The number of vector elements which are not constant. */
bool
aarch64_operands_adjust_ok_for_ldpstp (rtx *operands, bool load,
- machine_mode mode)
+ scalar_mode mode)
{
enum reg_class rclass_1, rclass_2, rclass_3, rclass_4;
HOST_WIDE_INT offval_1, offval_2, offval_3, offval_4, msize;
bool
aarch64_gen_adjusted_ldpstp (rtx *operands, bool load,
- machine_mode mode, RTX_CODE code)
+ scalar_mode mode, RTX_CODE code)
{
rtx base, offset, t1, t2;
rtx mem_1, mem_2, mem_3, mem_4;