NULL_RTX, 1, OPTAB_LIB_WIDEN);
if (bitpos > 0)
value = expand_shift (LSHIFT_EXPR, mode, value,
- build_int_cst (NULL_TREE, bitpos), NULL_RTX, 1);
+ bitpos, NULL_RTX, 1);
}
/* Now clear the chosen bits in OP0,
/* Signed bit field: sign-extend with two arithmetic shifts. */
target = expand_shift (LSHIFT_EXPR, mode, target,
- build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (mode) - bitsize),
- NULL_RTX, 0);
+ GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
return expand_shift (RSHIFT_EXPR, mode, target,
- build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (mode) - bitsize),
- NULL_RTX, 0);
+ GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
}
/* From here on we know the desired field is smaller than a word. */
{
/* If the field does not already start at the lsb,
shift it so it does. */
- tree amount = build_int_cst (NULL_TREE, bitpos);
/* Maybe propagate the target for the shift. */
/* But not if we will return it--could confuse integrate.c. */
rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
if (tmode != mode) subtarget = 0;
- op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
+ op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitpos, subtarget, 1);
}
/* Convert the value to the desired mode. */
if (mode != tmode)
if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
{
- tree amount
- = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (mode) - (bitsize + bitpos));
+ int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitpos);
/* Maybe propagate the target for the shift. */
rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
}
return expand_shift (RSHIFT_EXPR, mode, op0,
- build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (mode) - bitsize),
- target, 0);
+ GET_MODE_BITSIZE (mode) - bitsize, target, 0);
}
\f
/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
{
if (bitsize != bitsdone)
part = expand_shift (LSHIFT_EXPR, word_mode, part,
- build_int_cst (NULL_TREE, bitsize - bitsdone),
- 0, 1);
+ bitsize - bitsdone, 0, 1);
}
else
{
if (bitsdone != thissize)
part = expand_shift (LSHIFT_EXPR, word_mode, part,
- build_int_cst (NULL_TREE,
- bitsdone - thissize), 0, 1);
+ bitsdone - thissize, 0, 1);
}
if (first)
return result;
/* Signed bit field: sign-extend with two arithmetic shifts. */
result = expand_shift (LSHIFT_EXPR, word_mode, result,
- build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
- NULL_RTX, 0);
+ BITS_PER_WORD - bitsize, NULL_RTX, 0);
return expand_shift (RSHIFT_EXPR, word_mode, result,
- build_int_cst (NULL_TREE, BITS_PER_WORD - bitsize),
- NULL_RTX, 0);
+ BITS_PER_WORD - bitsize, NULL_RTX, 0);
}
\f
/* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
Return the rtx for where the value is. */
rtx
-expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
- tree amount, rtx target, int unsignedp)
+expand_variable_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
+ tree amount, rtx target, int unsignedp)
{
rtx op1, temp = 0;
int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
shifted = force_reg (mode, shifted);
- temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
- mode, shifted, new_amount, 0, 1);
- temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
- mode, shifted, other_amount, subtarget, 1);
+ temp = expand_variable_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
+ mode, shifted, new_amount, 0, 1);
+ temp1 = expand_variable_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
+ mode, shifted, other_amount,
+ subtarget, 1);
return expand_binop (mode, ior_optab, temp, temp1, target,
unsignedp, methods);
}
gcc_assert (temp);
return temp;
}
+
+/* Output a shift instruction for expression code CODE,
+ with SHIFTED being the rtx for the value to shift,
+ and AMOUNT the amount to shift by.
+ Store the result in the rtx TARGET, if that is convenient.
+ If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
+ Return the rtx for where the value is. */
+
+rtx
+expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
+ int amount, rtx target, int unsignedp)
+{
+ /* ??? With re-writing expand_shift we could avoid going through a
+ tree for the shift amount and directly do GEN_INT (amount). */
+ return expand_variable_shift (code, mode, shifted,
+ build_int_cst (integer_type_node, amount),
+ target, unsignedp);
+}
\f
/* Indicates the type of fixup needed after a constant multiplication.
BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
switch (alg->op[opno])
{
case alg_shift:
- tem = expand_shift (LSHIFT_EXPR, mode, accum,
- build_int_cst (NULL_TREE, log),
- NULL_RTX, 0);
+ tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
/* REG_EQUAL note will be attached to the following insn. */
emit_move_insn (accum, tem);
val_so_far <<= log;
break;
case alg_add_t_m2:
- tem = expand_shift (LSHIFT_EXPR, mode, op0,
- build_int_cst (NULL_TREE, log),
- NULL_RTX, 0);
+ tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
add_target ? add_target : accum_target);
val_so_far += (HOST_WIDE_INT) 1 << log;
break;
case alg_sub_t_m2:
- tem = expand_shift (LSHIFT_EXPR, mode, op0,
- build_int_cst (NULL_TREE, log),
- NULL_RTX, 0);
+ tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
add_target ? add_target : accum_target);
val_so_far -= (HOST_WIDE_INT) 1 << log;
case alg_add_t2_m:
accum = expand_shift (LSHIFT_EXPR, mode, accum,
- build_int_cst (NULL_TREE, log),
- shift_subtarget,
- 0);
+ log, shift_subtarget, 0);
accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
add_target ? add_target : accum_target);
val_so_far = (val_so_far << log) + 1;
case alg_sub_t2_m:
accum = expand_shift (LSHIFT_EXPR, mode, accum,
- build_int_cst (NULL_TREE, log),
- shift_subtarget, 0);
+ log, shift_subtarget, 0);
accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
add_target ? add_target : accum_target);
val_so_far = (val_so_far << log) - 1;
break;
case alg_add_factor:
- tem = expand_shift (LSHIFT_EXPR, mode, accum,
- build_int_cst (NULL_TREE, log),
- NULL_RTX, 0);
+ tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
add_target ? add_target : accum_target);
val_so_far += val_so_far << log;
break;
case alg_sub_factor:
- tem = expand_shift (LSHIFT_EXPR, mode, accum,
- build_int_cst (NULL_TREE, log),
- NULL_RTX, 0);
+ tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
(add_target
? add_target : (optimize ? 0 : tem)));
int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
+ HOST_BITS_PER_WIDE_INT;
return expand_shift (LSHIFT_EXPR, mode, op0,
- build_int_cst (NULL_TREE, shift),
- target, unsignedp);
+ shift, target, unsignedp);
}
}
/* Special case powers of two. */
if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
return expand_shift (LSHIFT_EXPR, mode, op0,
- build_int_cst (NULL_TREE, floor_log2 (coeff)),
- target, unsignedp);
+ floor_log2 (coeff), target, unsignedp);
/* Exclude cost of op0 from max_cost to match the cost
calculation of the synth_mult. */
{
op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
return expand_shift (LSHIFT_EXPR, mode, op0,
- build_int_cst (NULL_TREE, floor_log2 (coeff)),
- target, unsignedp);
+ floor_log2 (coeff), target, unsignedp);
}
/* Exclude cost of op0 from max_cost to match the cost
enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
tem = expand_shift (RSHIFT_EXPR, mode, op0,
- build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
- NULL_RTX, 0);
+ GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
tem = expand_and (mode, tem, op1, NULL_RTX);
adj_operand
= force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
adj_operand);
tem = expand_shift (RSHIFT_EXPR, mode, op1,
- build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode) - 1),
- NULL_RTX, 0);
+ GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
tem = expand_and (mode, tem, op0, NULL_RTX);
target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
target);
wider_mode = GET_MODE_WIDER_MODE (mode);
op = expand_shift (RSHIFT_EXPR, wider_mode, op,
- build_int_cst (NULL_TREE, GET_MODE_BITSIZE (mode)), 0, 1);
+ GET_MODE_BITSIZE (mode), 0, 1);
return convert_modes (mode, wider_mode, op, 0);
}
expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
{
rtx temp, label;
- tree shift;
int logd;
logd = floor_log2 (d);
- shift = build_int_cst (NULL_TREE, logd);
if (d == 2
&& BRANCH_COST (optimize_insn_for_speed_p (),
temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
0, OPTAB_LIB_WIDEN);
- return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
}
#ifdef HAVE_conditional_move
rtx seq = get_insns ();
end_sequence ();
emit_insn (seq);
- return expand_shift (RSHIFT_EXPR, mode, temp2, shift, NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
}
end_sequence ();
}
NULL_RTX, 0, OPTAB_LIB_WIDEN);
else
temp = expand_shift (RSHIFT_EXPR, mode, temp,
- build_int_cst (NULL_TREE, ushift),
- NULL_RTX, 1);
+ ushift, NULL_RTX, 1);
temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
0, OPTAB_LIB_WIDEN);
- return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
}
label = gen_label_rtx ();
do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
expand_inc (temp, GEN_INT (d - 1));
emit_label (label);
- return expand_shift (RSHIFT_EXPR, mode, temp, shift, NULL_RTX, 0);
+ return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
}
\f
/* Emit the code to divide OP0 by OP1, putting the result in TARGET
return gen_lowpart (mode, remainder);
}
quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE,
- pre_shift),
- tquotient, 1);
+ pre_shift, tquotient, 1);
}
else if (size <= HOST_BITS_PER_WIDE_INT)
{
t2 = force_operand (gen_rtx_MINUS (compute_mode,
op0, t1),
NULL_RTX);
- t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
- integer_one_node, NULL_RTX, 1);
+ t3 = expand_shift (RSHIFT_EXPR, compute_mode,
+ t2, 1, NULL_RTX, 1);
t4 = force_operand (gen_rtx_PLUS (compute_mode,
t1, t3),
NULL_RTX);
quotient = expand_shift
(RSHIFT_EXPR, compute_mode, t4,
- build_int_cst (NULL_TREE, post_shift - 1),
- tquotient, 1);
+ post_shift - 1, tquotient, 1);
}
else
{
t1 = expand_shift
(RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, pre_shift),
- NULL_RTX, 1);
+ pre_shift, NULL_RTX, 1);
extra_cost
= (shift_cost[speed][compute_mode][pre_shift]
+ shift_cost[speed][compute_mode][post_shift]);
goto fail1;
quotient = expand_shift
(RSHIFT_EXPR, compute_mode, t2,
- build_int_cst (NULL_TREE, post_shift),
- tquotient, 1);
+ post_shift, tquotient, 1);
}
}
}
goto fail1;
t2 = expand_shift
(RSHIFT_EXPR, compute_mode, t1,
- build_int_cst (NULL_TREE, post_shift),
- NULL_RTX, 0);
+ post_shift, NULL_RTX, 0);
t3 = expand_shift
(RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, size - 1),
- NULL_RTX, 0);
+ size - 1, NULL_RTX, 0);
if (d < 0)
quotient
= force_operand (gen_rtx_MINUS (compute_mode,
NULL_RTX);
t3 = expand_shift
(RSHIFT_EXPR, compute_mode, t2,
- build_int_cst (NULL_TREE, post_shift),
- NULL_RTX, 0);
+ post_shift, NULL_RTX, 0);
t4 = expand_shift
(RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, size - 1),
- NULL_RTX, 0);
+ size - 1, NULL_RTX, 0);
if (d < 0)
quotient
= force_operand (gen_rtx_MINUS (compute_mode,
}
quotient = expand_shift
(RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, pre_shift),
- tquotient, 0);
+ pre_shift, tquotient, 0);
}
else
{
{
t1 = expand_shift
(RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, size - 1),
- NULL_RTX, 0);
+ size - 1, NULL_RTX, 0);
t2 = expand_binop (compute_mode, xor_optab, op0, t1,
NULL_RTX, 0, OPTAB_WIDEN);
extra_cost = (shift_cost[speed][compute_mode][post_shift]
{
t4 = expand_shift
(RSHIFT_EXPR, compute_mode, t3,
- build_int_cst (NULL_TREE, post_shift),
- NULL_RTX, 1);
+ post_shift, NULL_RTX, 1);
quotient = expand_binop (compute_mode, xor_optab,
t4, t1, tquotient, 0,
OPTAB_WIDEN);
0, OPTAB_WIDEN);
nsign = expand_shift
(RSHIFT_EXPR, compute_mode, t2,
- build_int_cst (NULL_TREE, size - 1),
- NULL_RTX, 0);
+ size - 1, NULL_RTX, 0);
t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
NULL_RTX);
t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
rtx t1, t2, t3;
unsigned HOST_WIDE_INT d = INTVAL (op1);
t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, floor_log2 (d)),
- tquotient, 1);
+ floor_log2 (d), tquotient, 1);
t2 = expand_binop (compute_mode, and_optab, op0,
GEN_INT (d - 1),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
rtx t1, t2, t3;
unsigned HOST_WIDE_INT d = INTVAL (op1);
t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, floor_log2 (d)),
- tquotient, 0);
+ floor_log2 (d), tquotient, 0);
t2 = expand_binop (compute_mode, and_optab, op0,
GEN_INT (d - 1),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
pre_shift = floor_log2 (d & -d);
ml = invert_mod2n (d >> pre_shift, size);
t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
- build_int_cst (NULL_TREE, pre_shift),
- NULL_RTX, unsignedp);
+ pre_shift, NULL_RTX, unsignedp);
quotient = expand_mult (compute_mode, t1,
gen_int_mode (ml, compute_mode),
NULL_RTX, 1);
remainder, 1, OPTAB_LIB_WIDEN);
}
tem = plus_constant (op1, -1);
- tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
- integer_one_node, NULL_RTX, 1);
+ tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
expand_inc (quotient, const1_rtx);
expand_dec (remainder, op1);
abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
- integer_one_node, NULL_RTX, 1);
+ 1, NULL_RTX, 1);
do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
tem = expand_binop (compute_mode, xor_optab, op0, op1,
NULL_RTX, 0, OPTAB_WIDEN);
mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
- build_int_cst (NULL_TREE, size - 1),
- NULL_RTX, 0);
+ size - 1, NULL_RTX, 0);
tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
NULL_RTX, 0, OPTAB_WIDEN);
tem = expand_binop (compute_mode, sub_optab, tem, mask,
&& (STORE_FLAG_VALUE
& ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
- size_int (GET_MODE_BITSIZE (result_mode) - 1), subtarget,
+ GET_MODE_BITSIZE (result_mode) - 1, subtarget,
normalizep == 1);
else
{
a logical shift from the sign bit to the low-order bit; for
a -1/0 value, we do an arithmetic shift. */
op0 = expand_shift (RSHIFT_EXPR, mode, op0,
- size_int (GET_MODE_BITSIZE (mode) - 1),
+ GET_MODE_BITSIZE (mode) - 1,
subtarget, normalizep != -1);
if (mode != target_mode)
subtarget = 0;
tem = expand_shift (RSHIFT_EXPR, mode, op0,
- size_int (GET_MODE_BITSIZE (mode) - 1),
+ GET_MODE_BITSIZE (mode) - 1,
subtarget, 0);
tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
OPTAB_WIDEN);
if (tem && normalizep)
tem = expand_shift (RSHIFT_EXPR, mode, tem,
- size_int (GET_MODE_BITSIZE (mode) - 1),
+ GET_MODE_BITSIZE (mode) - 1,
subtarget, normalizep == 1);
if (tem)