+2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * emit-rtl.h (gen_int_shift_amount): Declare.
+ * emit-rtl.c (gen_int_shift_amount): New function.
+ * asan.c (asan_emit_stack_protection): Use gen_int_shift_amount
+ instead of GEN_INT.
+ * calls.c (shift_return_value): Likewise.
+ * cse.c (fold_rtx): Likewise.
+ * dse.c (find_shift_sequence): Likewise.
+ * expmed.c (init_expmed_one_mode, store_bit_field_1, expand_shift_1)
+ (expand_shift, expand_smod_pow2): Likewise.
+ * lower-subreg.c (shift_cost): Likewise.
+ * optabs.c (expand_superword_shift, expand_doubleword_mult)
+ (expand_unop, expand_binop, shift_amt_for_vec_perm_mask)
+ (expand_vec_perm_var): Likewise.
+ * simplify-rtx.c (simplify_unary_operation_1): Likewise.
+ (simplify_binary_operation_1): Likewise.
+ * combine.c (try_combine, find_split_point, force_int_to_mode)
+ (simplify_shift_const_1, simplify_shift_const): Likewise.
+ (change_zero_ext): Likewise. Use simplify_gen_binary.
+
2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
* poly-int.h (multiple_p): Fix handling of two non-poly_ints.
TREE_ASM_WRITTEN (id) = 1;
emit_move_insn (mem, expand_normal (build_fold_addr_expr (decl)));
shadow_base = expand_binop (Pmode, lshr_optab, base,
- GEN_INT (ASAN_SHADOW_SHIFT),
+ gen_int_shift_amount (Pmode, ASAN_SHADOW_SHIFT),
NULL_RTX, 1, OPTAB_DIRECT);
shadow_base
= plus_constant (Pmode, shadow_base,
HOST_WIDE_INT shift;
gcc_assert (REG_P (value) && HARD_REGISTER_P (value));
- shift = GET_MODE_BITSIZE (GET_MODE (value)) - GET_MODE_BITSIZE (mode);
+ machine_mode value_mode = GET_MODE (value);
+ shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);
if (shift == 0)
return false;
/* Use ashr rather than lshr for right shifts. This is for the benefit
of the MIPS port, which requires SImode values to be sign-extended
when stored in 64-bit registers. */
- if (!force_expand_binop (GET_MODE (value), left_p ? ashl_optab : ashr_optab,
- value, GEN_INT (shift), value, 1, OPTAB_WIDEN))
+ if (!force_expand_binop (value_mode, left_p ? ashl_optab : ashr_optab,
+ value, gen_int_shift_amount (value_mode, shift),
+ value, 1, OPTAB_WIDEN))
gcc_unreachable ();
return true;
}
&& INTVAL (XEXP (*split, 1)) > 0
&& (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
{
+ rtx i_rtx = gen_int_shift_amount (split_mode, i);
SUBST (*split, gen_rtx_ASHIFT (split_mode,
- XEXP (*split, 0), GEN_INT (i)));
+ XEXP (*split, 0), i_rtx));
/* Update split_code because we may not have a multiply
anymore. */
split_code = GET_CODE (*split);
&& (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
{
rtx nsplit = XEXP (*split, 0);
+ rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
- XEXP (nsplit, 0), GEN_INT (i)));
+ XEXP (nsplit, 0),
+ i_rtx));
/* Update split_code because we may not have a multiply
anymore. */
split_code = GET_CODE (*split);
0))))) >= 1))
{
machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
-
+ rtx pos_rtx = gen_int_shift_amount (mode, pos);
SUBST (SET_SRC (x),
gen_rtx_NEG (mode,
gen_rtx_LSHIFTRT (mode,
XEXP (SET_SRC (x), 0),
- GEN_INT (pos))));
+ pos_rtx)));
split = find_split_point (&SET_SRC (x), insn, true);
if (split && split != &SET_SRC (x))
{
unsigned HOST_WIDE_INT mask
= (HOST_WIDE_INT_1U << len) - 1;
+ rtx pos_rtx = gen_int_shift_amount (mode, pos);
SUBST (SET_SRC (x),
gen_rtx_AND (mode,
gen_rtx_LSHIFTRT
- (mode, gen_lowpart (mode, inner),
- GEN_INT (pos)),
+ (mode, gen_lowpart (mode, inner), pos_rtx),
gen_int_mode (mask, mode)));
split = find_split_point (&SET_SRC (x), insn, true);
}
else
{
+ int left_bits = GET_MODE_PRECISION (mode) - len - pos;
+ int right_bits = GET_MODE_PRECISION (mode) - len;
SUBST (SET_SRC (x),
gen_rtx_fmt_ee
(unsignedp ? LSHIFTRT : ASHIFTRT, mode,
gen_rtx_ASHIFT (mode,
gen_lowpart (mode, inner),
- GEN_INT (GET_MODE_PRECISION (mode)
- - len - pos)),
- GEN_INT (GET_MODE_PRECISION (mode) - len)));
+ gen_int_shift_amount (mode, left_bits)),
+ gen_int_shift_amount (mode, right_bits)));
split = find_split_point (&SET_SRC (x), insn, true);
if (split && split != &SET_SRC (x))
/* Must be more sign bit copies than the mask needs. */
&& ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
>= exact_log2 (mask + 1)))
- x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
- GEN_INT (GET_MODE_PRECISION (xmode)
- - exact_log2 (mask + 1)));
-
+ {
+ int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
+ x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
+ gen_int_shift_amount (xmode, nbits));
+ }
goto shiftrt;
case ASHIFTRT:
{
enum rtx_code orig_code = code;
rtx orig_varop = varop;
- int count;
+ int count, log2;
machine_mode mode = result_mode;
machine_mode shift_mode;
scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
is cheaper. But it is still better on those machines to
merge two shifts into one. */
if (CONST_INT_P (XEXP (varop, 1))
- && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
+ && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
{
- varop
- = simplify_gen_binary (ASHIFT, GET_MODE (varop),
- XEXP (varop, 0),
- GEN_INT (exact_log2 (
- UINTVAL (XEXP (varop, 1)))));
+ rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
+ varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
+ XEXP (varop, 0), log2_rtx);
continue;
}
break;
case UDIV:
/* Similar, for when divides are cheaper. */
if (CONST_INT_P (XEXP (varop, 1))
- && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
+ && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
{
- varop
- = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
- XEXP (varop, 0),
- GEN_INT (exact_log2 (
- UINTVAL (XEXP (varop, 1)))));
+ rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
+ varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
+ XEXP (varop, 0), log2_rtx);
continue;
}
break;
mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
int_result_mode);
-
+ rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
mask_rtx
= simplify_const_binary_operation (code, int_result_mode,
- mask_rtx, GEN_INT (count));
+ mask_rtx, count_rtx);
/* Give up if we can't compute an outer operation to use. */
if (mask_rtx == 0
if (code == ASHIFTRT && int_mode != int_result_mode)
break;
+ rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
rtx new_rtx = simplify_const_binary_operation (code, int_mode,
XEXP (varop, 0),
- GEN_INT (count));
+ count_rtx);
varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
count = 0;
continue;
&& (new_rtx = simplify_const_binary_operation
(code, int_result_mode,
gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
- GEN_INT (count))) != 0
+ gen_int_shift_amount (int_result_mode, count))) != 0
&& CONST_INT_P (new_rtx)
&& merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
INTVAL (new_rtx), int_result_mode,
&& (new_rtx = simplify_const_binary_operation
(ASHIFT, int_result_mode,
gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
- GEN_INT (count))) != 0
+ gen_int_shift_amount (int_result_mode, count))) != 0
&& CONST_INT_P (new_rtx)
&& merge_outer_ops (&outer_op, &outer_const, PLUS,
INTVAL (new_rtx), int_result_mode,
&& (new_rtx = simplify_const_binary_operation
(code, int_result_mode,
gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
- GEN_INT (count))) != 0
+ gen_int_shift_amount (int_result_mode, count))) != 0
&& CONST_INT_P (new_rtx)
&& merge_outer_ops (&outer_op, &outer_const, XOR,
INTVAL (new_rtx), int_result_mode,
- GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
{
rtx varop_inner = XEXP (varop, 0);
-
- varop_inner
- = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
- XEXP (varop_inner, 0),
- GEN_INT
- (count + INTVAL (XEXP (varop_inner, 1))));
+ int new_count = count + INTVAL (XEXP (varop_inner, 1));
+ rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
+ new_count);
+ varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
+ XEXP (varop_inner, 0),
+ new_count_rtx);
varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
count = 0;
continue;
x = NULL_RTX;
if (x == NULL_RTX)
- x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
+ x = simplify_gen_binary (code, shift_mode, varop,
+ gen_int_shift_amount (shift_mode, count));
/* If we were doing an LSHIFTRT in a wider mode than it was originally,
turn off all the bits that the shift would have turned off. */
return tem;
if (!x)
- x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
+ x = simplify_gen_binary (code, GET_MODE (varop), varop,
+ gen_int_shift_amount (GET_MODE (varop), count));
if (GET_MODE (x) != result_mode)
x = gen_lowpart (result_mode, x);
return x;
if (BITS_BIG_ENDIAN)
start = GET_MODE_PRECISION (inner_mode) - size - start;
- if (start)
- x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
+ if (start != 0)
+ x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
+ gen_int_shift_amount (inner_mode, start));
else
x = XEXP (x, 0);
if (mode != inner_mode)
|| INTVAL (const_arg1) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
- canon_const_arg1 = GEN_INT (INTVAL (const_arg1)
- & (GET_MODE_UNIT_BITSIZE (mode)
- - 1));
+ canon_const_arg1 = gen_int_shift_amount
+ (mode, (INTVAL (const_arg1)
+ & (GET_MODE_UNIT_BITSIZE (mode) - 1)));
else
break;
}
|| INTVAL (inner_const) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
- inner_const = GEN_INT (INTVAL (inner_const)
- & (GET_MODE_UNIT_BITSIZE (mode)
- - 1));
+ inner_const = gen_int_shift_amount
+ (mode, (INTVAL (inner_const)
+ & (GET_MODE_UNIT_BITSIZE (mode) - 1)));
else
break;
}
/* As an exception, we can turn an ASHIFTRT of this
form into a shift of the number of bits - 1. */
if (code == ASHIFTRT)
- new_const = GEN_INT (GET_MODE_UNIT_BITSIZE (mode) - 1);
+ new_const = gen_int_shift_amount
+ (mode, GET_MODE_UNIT_BITSIZE (mode) - 1);
else if (!side_effects_p (XEXP (y, 0)))
return CONST0_RTX (mode);
else
store_mode, byte);
if (ret && CONSTANT_P (ret))
{
+ rtx shift_rtx = gen_int_shift_amount (new_mode, shift);
ret = simplify_const_binary_operation (LSHIFTRT, new_mode,
- ret, GEN_INT (shift));
+ ret, shift_rtx);
if (ret && CONSTANT_P (ret))
{
byte = subreg_lowpart_offset (read_mode, new_mode);
of one dsp where the cost of these two was not the same. But
this really is a rare case anyway. */
target = expand_binop (new_mode, lshr_optab, new_reg,
- GEN_INT (shift), new_reg, 1, OPTAB_DIRECT);
+ gen_int_shift_amount (new_mode, shift),
+ new_reg, 1, OPTAB_DIRECT);
shift_seq = get_insns ();
end_sequence ();
}
}
+/* Return a constant shift amount for shifting a value of mode MODE
+ by VALUE bits. */
+
+rtx
+gen_int_shift_amount (machine_mode, HOST_WIDE_INT value)
+{
+ /* Use a 64-bit mode, to avoid any truncation.
+
+ ??? Perhaps this should be automatically derived from the .md files
+ instead, or perhaps have a target hook. */
+ scalar_int_mode shift_mode = (BITS_PER_UNIT == 8
+ ? DImode
+ : int_mode_for_size (64, 0).require ());
+ return gen_int_mode (value, shift_mode);
+}
+
/* Initialize fields of rtl_data related to stack alignment. */
void
extern void set_reg_attrs_for_decl_rtl (tree t, rtx x);
extern void adjust_reg_mode (rtx, machine_mode);
extern int mem_expr_equal_p (const_tree, const_tree);
+extern rtx gen_int_shift_amount (machine_mode, HOST_WIDE_INT);
extern bool need_atomic_barrier_p (enum memmodel, bool);
PUT_MODE (all->zext, wider_mode);
PUT_MODE (all->wide_mult, wider_mode);
PUT_MODE (all->wide_lshr, wider_mode);
- XEXP (all->wide_lshr, 1) = GEN_INT (mode_bitsize);
+ XEXP (all->wide_lshr, 1)
+ = gen_int_shift_amount (wider_mode, mode_bitsize);
set_mul_widen_cost (speed, wider_mode,
set_src_cost (all->wide_mult, wider_mode, speed));
to make sure that for big-endian machines the higher order
bits are used. */
if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
- value_word = simplify_expand_binop (word_mode, lshr_optab,
- value_word,
- GEN_INT (BITS_PER_WORD
- - new_bitsize),
- NULL_RTX, true,
- OPTAB_LIB_WIDEN);
+ {
+ int shift = BITS_PER_WORD - new_bitsize;
+ rtx shift_rtx = gen_int_shift_amount (word_mode, shift);
+ value_word = simplify_expand_binop (word_mode, lshr_optab,
+ value_word, shift_rtx,
+ NULL_RTX, true,
+ OPTAB_LIB_WIDEN);
+ }
if (!store_bit_field_1 (op0, new_bitsize,
bitnum + bit_offset,
if (CONST_INT_P (op1)
&& ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
(unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
- op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
- % GET_MODE_BITSIZE (scalar_mode));
+ op1 = gen_int_shift_amount (mode,
+ (unsigned HOST_WIDE_INT) INTVAL (op1)
+ % GET_MODE_BITSIZE (scalar_mode));
else if (GET_CODE (op1) == SUBREG
&& subreg_lowpart_p (op1)
&& SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
&& IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
GET_MODE_BITSIZE (scalar_mode) - 1))
{
- op1 = GEN_INT (GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
+ op1 = gen_int_shift_amount (mode, (GET_MODE_BITSIZE (scalar_mode)
+ - INTVAL (op1)));
left = !left;
code = left ? LROTATE_EXPR : RROTATE_EXPR;
}
if (op1 == const0_rtx)
return shifted;
else if (CONST_INT_P (op1))
- other_amount = GEN_INT (GET_MODE_BITSIZE (scalar_mode)
- - INTVAL (op1));
+ other_amount = gen_int_shift_amount
+ (mode, GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
else
{
other_amount
expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
int amount, rtx target, int unsignedp)
{
- return expand_shift_1 (code, mode,
- shifted, GEN_INT (amount), target, unsignedp);
+ return expand_shift_1 (code, mode, shifted,
+ gen_int_shift_amount (mode, amount),
+ target, unsignedp);
}
/* Likewise, but return 0 if that cannot be done. */
{
HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
signmask = force_reg (mode, signmask);
- shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
+ shift = gen_int_shift_amount (mode, GET_MODE_BITSIZE (mode) - logd);
/* Use the rtx_cost of a LSHIFTRT instruction to determine
which instruction sequence to use. If logical right shifts
PUT_CODE (rtxes->shift, code);
PUT_MODE (rtxes->shift, mode);
PUT_MODE (rtxes->source, mode);
- XEXP (rtxes->shift, 1) = GEN_INT (op1);
+ XEXP (rtxes->shift, 1) = gen_int_shift_amount (mode, op1);
return set_src_cost (rtxes->shift, mode, speed_p);
}
if (binoptab != ashr_optab)
emit_move_insn (outof_target, CONST0_RTX (word_mode));
else
- if (!force_expand_binop (word_mode, binoptab,
- outof_input, GEN_INT (BITS_PER_WORD - 1),
+ if (!force_expand_binop (word_mode, binoptab, outof_input,
+ gen_int_shift_amount (word_mode,
+ BITS_PER_WORD - 1),
outof_target, unsignedp, methods))
return false;
}
{
int low = (WORDS_BIG_ENDIAN ? 1 : 0);
int high = (WORDS_BIG_ENDIAN ? 0 : 1);
- rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
+ rtx wordm1 = (umulp ? NULL_RTX
+ : gen_int_shift_amount (word_mode, BITS_PER_WORD - 1));
rtx product, adjust, product_high, temp;
rtx op0_high = operand_subword_force (op0, high, mode);
unsigned int bits = GET_MODE_PRECISION (int_mode);
if (CONST_INT_P (op1))
- newop1 = GEN_INT (bits - INTVAL (op1));
+ newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1));
else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
newop1 = negate_rtx (GET_MODE (op1), op1);
else
/* Apply the truncation to constant shifts. */
if (double_shift_mask > 0 && CONST_INT_P (op1))
- op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
+ op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode);
if (op1 == CONST0_RTX (op1_mode))
return op0;
else
{
rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
- rtx first_shift_count, second_shift_count;
+ HOST_WIDE_INT first_shift_count, second_shift_count;
optab reverse_unsigned_shift, unsigned_shift;
reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
if (shift_count > BITS_PER_WORD)
{
- first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
- second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
+ first_shift_count = shift_count - BITS_PER_WORD;
+ second_shift_count = 2 * BITS_PER_WORD - shift_count;
}
else
{
- first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
- second_shift_count = GEN_INT (shift_count);
+ first_shift_count = BITS_PER_WORD - shift_count;
+ second_shift_count = shift_count;
}
+ rtx first_shift_count_rtx
+ = gen_int_shift_amount (word_mode, first_shift_count);
+ rtx second_shift_count_rtx
+ = gen_int_shift_amount (word_mode, second_shift_count);
into_temp1 = expand_binop (word_mode, unsigned_shift,
- outof_input, first_shift_count,
+ outof_input, first_shift_count_rtx,
NULL_RTX, unsignedp, next_methods);
into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
- into_input, second_shift_count,
+ into_input, second_shift_count_rtx,
NULL_RTX, unsignedp, next_methods);
if (into_temp1 != 0 && into_temp2 != 0)
emit_move_insn (into_target, inter);
outof_temp1 = expand_binop (word_mode, unsigned_shift,
- into_input, first_shift_count,
+ into_input, first_shift_count_rtx,
NULL_RTX, unsignedp, next_methods);
outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
- outof_input, second_shift_count,
+ outof_input, second_shift_count_rtx,
NULL_RTX, unsignedp, next_methods);
if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
{
- temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
- unsignedp, OPTAB_DIRECT);
+ temp = expand_binop (mode, rotl_optab, op0,
+ gen_int_shift_amount (mode, 8),
+ target, unsignedp, OPTAB_DIRECT);
if (temp)
return temp;
}
if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
{
- temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
- unsignedp, OPTAB_DIRECT);
+ temp = expand_binop (mode, rotr_optab, op0,
+ gen_int_shift_amount (mode, 8),
+ target, unsignedp, OPTAB_DIRECT);
if (temp)
return temp;
}
last = get_last_insn ();
- temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
+ temp1 = expand_binop (mode, ashl_optab, op0,
+ gen_int_shift_amount (mode, 8), NULL_RTX,
unsignedp, OPTAB_WIDEN);
- temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
+ temp2 = expand_binop (mode, lshr_optab, op0,
+ gen_int_shift_amount (mode, 8), NULL_RTX,
unsignedp, OPTAB_WIDEN);
if (temp1 && temp2)
{
return NULL_RTX;
}
- return GEN_INT (first * bitsize);
+ return gen_int_shift_amount (GET_MODE (sel), first * bitsize);
}
/* A subroutine of expand_vec_perm for expanding one vec_perm insn. */
NULL, 0, OPTAB_DIRECT);
else
sel = expand_simple_binop (selmode, ASHIFT, sel,
- GEN_INT (exact_log2 (u)),
+ gen_int_shift_amount (selmode,
+ exact_log2 (u)),
NULL, 0, OPTAB_DIRECT);
gcc_assert (sel != NULL);
if (STORE_FLAG_VALUE == 1)
{
temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
- GEN_INT (isize - 1));
+ gen_int_shift_amount (inner,
+ isize - 1));
if (int_mode == inner)
return temp;
if (GET_MODE_PRECISION (int_mode) > isize)
else if (STORE_FLAG_VALUE == -1)
{
temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
- GEN_INT (isize - 1));
+ gen_int_shift_amount (inner,
+ isize - 1));
if (int_mode == inner)
return temp;
if (GET_MODE_PRECISION (int_mode) > isize)
{
val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
if (val >= 0)
- return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
+ return simplify_gen_binary (ASHIFT, mode, op0,
+ gen_int_shift_amount (mode, val));
}
/* x*2 is x+x and x*(-1) is -x */
/* Convert divide by power of two into shift. */
if (CONST_INT_P (trueop1)
&& (val = exact_log2 (UINTVAL (trueop1))) > 0)
- return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
+ return simplify_gen_binary (LSHIFTRT, mode, op0,
+ gen_int_shift_amount (mode, val));
break;
case DIV:
&& IN_RANGE (INTVAL (trueop1),
GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
GET_MODE_UNIT_PRECISION (mode) - 1))
- return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
- mode, op0,
- GEN_INT (GET_MODE_UNIT_PRECISION (mode)
- - INTVAL (trueop1)));
+ {
+ int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
+ rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
+ return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
+ mode, op0, new_amount_rtx);
+ }
#endif
/* FALLTHRU */
case ASHIFTRT:
== GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
&& subreg_lowpart_p (op0))
{
- rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
- + INTVAL (op1));
+ rtx tmp = gen_int_shift_amount
+ (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
tmp = simplify_gen_binary (code, inner_mode,
XEXP (SUBREG_REG (op0), 0),
tmp);
{
val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
if (val != INTVAL (op1))
- return simplify_gen_binary (code, mode, op0, GEN_INT (val));
+ return simplify_gen_binary (code, mode, op0,
+ gen_int_shift_amount (mode, val));
}
break;