+2014-11-20 Charles Baylis <charles.baylis@linaro.org>
+
+ PR target/63870
+ * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args): Pass
+ expression to aarch64_simd_lane_bounds.
+ * config/aarch64/aarch64-protos.h (aarch64_simd_lane_bounds): Update
+ prototype.
+ * config/aarch64/aarch64-simd.md: (aarch64_combinez<mode>): Update
+ call to aarch64_simd_lane_bounds.
+ (aarch64_get_lanedi): Likewise.
+ (aarch64_ld2_lane<mode>): Likewise.
+ (aarch64_ld3_lane<mode>): Likewise.
+ (aarch64_ld4_lane<mode>): Likewise.
+ (aarch64_im_lane_boundsi): Likewise.
+ * config/aarch64/aarch64.c (aarch64_simd_lane_bounds): Add exp
+ parameter. Report calling function in error message if exp is non-NULL.
+
2014-11-20 Segher Boessenkool <segher@kernel.crashing.org>
PR target/60111
{
enum machine_mode vmode = mode[argc - 1];
aarch64_simd_lane_bounds (op[argc],
- 0, GET_MODE_NUNITS (vmode));
+ 0, GET_MODE_NUNITS (vmode), exp);
/* Keep to GCC-vector-extension lane indices in the RTL. */
op[argc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[argc])));
}
/* Expand builtins for SIMD intrinsics. */
rtx aarch64_simd_expand_builtin (int, tree, rtx);
-void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
void aarch64_split_128bit_move (rtx, rtx);
(match_operand:SI 2 "immediate_operand")]
"TARGET_SIMD"
{
- aarch64_simd_lane_bounds (operands[2], 0, 1);
+ aarch64_simd_lane_bounds (operands[2], 0, 1, NULL);
emit_move_insn (operands[0], operands[1]);
DONE;
})
machine_mode mode = <V_TWO_ELEM>mode;
rtx mem = gen_rtx_MEM (mode, operands[1]);
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode),
+ NULL);
emit_insn (gen_aarch64_vec_load_lanesoi_lane<mode> (operands[0],
mem,
operands[2],
machine_mode mode = <V_THREE_ELEM>mode;
rtx mem = gen_rtx_MEM (mode, operands[1]);
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode),
+ NULL);
emit_insn (gen_aarch64_vec_load_lanesci_lane<mode> (operands[0],
mem,
operands[2],
machine_mode mode = <V_FOUR_ELEM>mode;
rtx mem = gen_rtx_MEM (mode, operands[1]);
- aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode),
+ NULL);
emit_insn (gen_aarch64_vec_load_lanesxi_lane<mode> (operands[0],
mem,
operands[2],
(match_operand:SI 1 "immediate_operand" "i")]
"TARGET_SIMD"
{
- aarch64_simd_lane_bounds (operands[0], 0, INTVAL (operands[1]));
+ aarch64_simd_lane_bounds (operands[0], 0, INTVAL (operands[1]), NULL);
DONE;
}
)
/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
HIGH (exclusive). */
void
-aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high,
+ const_tree exp)
{
HOST_WIDE_INT lane;
gcc_assert (CONST_INT_P (operand));
lane = INTVAL (operand);
if (lane < low || lane >= high)
- error ("lane %ld out of range %ld - %ld", lane, low, high - 1);
+ {
+ if (exp)
+ error ("%Klane %ld out of range %ld - %ld", exp, lane, low, high - 1);
+ else
+ error ("lane %ld out of range %ld - %ld", lane, low, high - 1);
+ }
}
/* Emit code to place a AdvSIMD pair result in memory locations (with equal