+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * machmode.h (is_int_mode): New fuction.
+ * combine.c (find_split_point): Use it.
+ (combine_simplify_rtx): Likewise.
+ (simplify_if_then_else): Likewise.
+ (simplify_set): Likewise.
+ (simplify_shift_const_1): Likewise.
+ (simplify_comparison): Likewise.
+ * config/aarch64/aarch64.c (aarch64_rtx_costs): Likewise.
+ * cse.c (notreg_cost): Likewise.
+ (cse_insn): Likewise.
+ * cselib.c (cselib_lookup_1): Likewise.
+ * dojump.c (do_jump_1): Likewise.
+ (do_compare_rtx_and_jump): Likewise.
+ * dse.c (get_call_args): Likewise.
+ * dwarf2out.c (rtl_for_decl_init): Likewise.
+ (native_encode_initializer): Likewise.
+ * expmed.c (emit_store_flag_1): Likewise.
+ (emit_store_flag): Likewise.
+ * expr.c (convert_modes): Likewise.
+ (store_field): Likewise.
+ (expand_expr_real_1): Likewise.
+ * fold-const.c (fold_read_from_constant_string): Likewise.
+ * gimple-ssa-sprintf.c (get_format_string): Likewise.
+ * optabs-libfuncs.c (gen_int_libfunc): Likewise.
+ * optabs.c (expand_binop): Likewise.
+ (expand_unop): Likewise.
+ (expand_abs_nojump): Likewise.
+ (expand_one_cmpl_abs_nojump): Likewise.
+ * simplify-rtx.c (mode_signbit_p): Likewise.
+ (val_signbit_p): Likewise.
+ (val_signbit_known_set_p): Likewise.
+ (val_signbit_known_clear_p): Likewise.
+ (simplify_relational_operation_1): Likewise.
+ * tree.c (vector_type_mode): Likewise.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
HOST_WIDE_INT pos = 0;
int unsignedp = 0;
rtx inner = NULL_RTX;
+ scalar_int_mode inner_mode;
/* First special-case some codes. */
switch (code)
/* We can't optimize if either mode is a partial integer
mode as we don't know how many bits are significant
in those modes. */
- if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
+ if (!is_int_mode (GET_MODE (inner), &inner_mode)
|| GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
break;
pos = 0;
- len = GET_MODE_PRECISION (GET_MODE (inner));
+ len = GET_MODE_PRECISION (inner_mode);
unsignedp = 0;
break;
{
enum rtx_code code = GET_CODE (x);
machine_mode mode = GET_MODE (x);
+ scalar_int_mode int_mode;
rtx temp;
int i;
;
else if (STORE_FLAG_VALUE == 1
- && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
- && op1 == const0_rtx
- && mode == GET_MODE (op0)
- && nonzero_bits (op0, mode) == 1)
- return gen_lowpart (mode,
+ && new_code == NE
+ && is_int_mode (mode, &int_mode)
+ && op1 == const0_rtx
+ && int_mode == GET_MODE (op0)
+ && nonzero_bits (op0, int_mode) == 1)
+ return gen_lowpart (int_mode,
expand_compound_operation (op0));
else if (STORE_FLAG_VALUE == 1
- && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && new_code == NE
+ && is_int_mode (mode, &int_mode)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && (num_sign_bit_copies (op0, mode)
- == GET_MODE_PRECISION (mode)))
+ && int_mode == GET_MODE (op0)
+ && (num_sign_bit_copies (op0, int_mode)
+ == GET_MODE_PRECISION (int_mode)))
{
op0 = expand_compound_operation (op0);
- return simplify_gen_unary (NEG, mode,
- gen_lowpart (mode, op0),
- mode);
+ return simplify_gen_unary (NEG, int_mode,
+ gen_lowpart (int_mode, op0),
+ int_mode);
}
else if (STORE_FLAG_VALUE == 1
- && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && new_code == EQ
+ && is_int_mode (mode, &int_mode)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && nonzero_bits (op0, mode) == 1)
+ && int_mode == GET_MODE (op0)
+ && nonzero_bits (op0, int_mode) == 1)
{
op0 = expand_compound_operation (op0);
- return simplify_gen_binary (XOR, mode,
- gen_lowpart (mode, op0),
+ return simplify_gen_binary (XOR, int_mode,
+ gen_lowpart (int_mode, op0),
const1_rtx);
}
else if (STORE_FLAG_VALUE == 1
- && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && new_code == EQ
+ && is_int_mode (mode, &int_mode)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && (num_sign_bit_copies (op0, mode)
- == GET_MODE_PRECISION (mode)))
+ && int_mode == GET_MODE (op0)
+ && (num_sign_bit_copies (op0, int_mode)
+ == GET_MODE_PRECISION (int_mode)))
{
op0 = expand_compound_operation (op0);
- return plus_constant (mode, gen_lowpart (mode, op0), 1);
+ return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
}
/* If STORE_FLAG_VALUE is -1, we have cases similar to
;
else if (STORE_FLAG_VALUE == -1
- && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && new_code == NE
+ && is_int_mode (mode, &int_mode)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && (num_sign_bit_copies (op0, mode)
- == GET_MODE_PRECISION (mode)))
- return gen_lowpart (mode,
- expand_compound_operation (op0));
+ && int_mode == GET_MODE (op0)
+ && (num_sign_bit_copies (op0, int_mode)
+ == GET_MODE_PRECISION (int_mode)))
+ return gen_lowpart (int_mode, expand_compound_operation (op0));
else if (STORE_FLAG_VALUE == -1
- && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
+ && new_code == NE
+ && is_int_mode (mode, &int_mode)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && nonzero_bits (op0, mode) == 1)
+ && int_mode == GET_MODE (op0)
+ && nonzero_bits (op0, int_mode) == 1)
{
op0 = expand_compound_operation (op0);
- return simplify_gen_unary (NEG, mode,
- gen_lowpart (mode, op0),
- mode);
+ return simplify_gen_unary (NEG, int_mode,
+ gen_lowpart (int_mode, op0),
+ int_mode);
}
else if (STORE_FLAG_VALUE == -1
- && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && new_code == EQ
+ && is_int_mode (mode, &int_mode)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && (num_sign_bit_copies (op0, mode)
- == GET_MODE_PRECISION (mode)))
+ && int_mode == GET_MODE (op0)
+ && (num_sign_bit_copies (op0, int_mode)
+ == GET_MODE_PRECISION (int_mode)))
{
op0 = expand_compound_operation (op0);
- return simplify_gen_unary (NOT, mode,
- gen_lowpart (mode, op0),
- mode);
+ return simplify_gen_unary (NOT, int_mode,
+ gen_lowpart (int_mode, op0),
+ int_mode);
}
/* If X is 0/1, (eq X 0) is X-1. */
else if (STORE_FLAG_VALUE == -1
- && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
+ && new_code == EQ
+ && is_int_mode (mode, &int_mode)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && nonzero_bits (op0, mode) == 1)
+ && int_mode == GET_MODE (op0)
+ && nonzero_bits (op0, int_mode) == 1)
{
op0 = expand_compound_operation (op0);
- return plus_constant (mode, gen_lowpart (mode, op0), -1);
+ return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
}
/* If STORE_FLAG_VALUE says to just test the sign bit and X has just
(ashift x c) where C puts the bit in the sign bit. Remove any
AND with STORE_FLAG_VALUE when we are done, since we are only
going to test the sign bit. */
- if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
- && HWI_COMPUTABLE_MODE_P (mode)
- && val_signbit_p (mode, STORE_FLAG_VALUE)
+ if (new_code == NE
+ && is_int_mode (mode, &int_mode)
+ && HWI_COMPUTABLE_MODE_P (int_mode)
+ && val_signbit_p (int_mode, STORE_FLAG_VALUE)
&& op1 == const0_rtx
- && mode == GET_MODE (op0)
- && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
+ && int_mode == GET_MODE (op0)
+ && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
{
- x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
+ x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
expand_compound_operation (op0),
- GET_MODE_PRECISION (mode) - 1 - i);
+ GET_MODE_PRECISION (int_mode) - 1 - i);
if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
return XEXP (x, 0);
else
int i;
enum rtx_code false_code;
rtx reversed;
+ scalar_int_mode int_mode;
/* Simplify storing of the truth value. */
if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& comparison_p
- && GET_MODE_CLASS (mode) == MODE_INT
+ && is_int_mode (mode, &int_mode)
&& ! side_effects_p (x))
{
rtx t = make_compound_operation (true_rtx, SET);
rtx cond_op0 = XEXP (cond, 0);
rtx cond_op1 = XEXP (cond, 1);
enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
- machine_mode m = mode;
+ machine_mode m = int_mode;
rtx z = 0, c1 = NULL_RTX;
if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
&& (num_sign_bit_copies (f, GET_MODE (f))
> (unsigned int)
- (GET_MODE_PRECISION (mode)
+ (GET_MODE_PRECISION (int_mode)
- GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
{
c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
&& (num_sign_bit_copies (f, GET_MODE (f))
> (unsigned int)
- (GET_MODE_PRECISION (mode)
+ (GET_MODE_PRECISION (int_mode)
- GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
{
c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
|| GET_CODE (XEXP (t, 0)) == LSHIFTRT
|| GET_CODE (XEXP (t, 0)) == ASHIFTRT)
&& GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
- && HWI_COMPUTABLE_MODE_P (mode)
+ && HWI_COMPUTABLE_MODE_P (int_mode)
&& subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
&& ((nonzero_bits (f, GET_MODE (f))
|| GET_CODE (XEXP (t, 0)) == IOR
|| GET_CODE (XEXP (t, 0)) == XOR)
&& GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
- && HWI_COMPUTABLE_MODE_P (mode)
+ && HWI_COMPUTABLE_MODE_P (int_mode)
&& subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
&& ((nonzero_bits (f, GET_MODE (f))
temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
if (extend_op != UNKNOWN)
- temp = simplify_gen_unary (extend_op, mode, temp, m);
+ temp = simplify_gen_unary (extend_op, int_mode, temp, m);
return temp;
}
= GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
rtx_insn *other_insn;
rtx *cc_use;
+ scalar_int_mode int_mode;
/* (set (pc) (return)) gets written as (return). */
if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
if (GET_CODE (dest) != PC
&& GET_CODE (src) == IF_THEN_ELSE
- && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
+ && is_int_mode (GET_MODE (src), &int_mode)
&& (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
&& XEXP (XEXP (src, 0), 1) == const0_rtx
- && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
+ && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
&& (!HAVE_conditional_move
- || ! can_conditionally_move_p (GET_MODE (src)))
- && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
- GET_MODE (XEXP (XEXP (src, 0), 0)))
- == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
+ || ! can_conditionally_move_p (int_mode))
+ && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
+ == GET_MODE_PRECISION (int_mode))
&& ! side_effects_p (src))
{
rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
&& rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
- term2 = simplify_gen_binary (AND, GET_MODE (src),
+ term2 = simplify_gen_binary (AND, int_mode,
XEXP (XEXP (src, 0), 0), true_rtx);
- term3 = simplify_gen_binary (AND, GET_MODE (src),
- simplify_gen_unary (NOT, GET_MODE (src),
+ term3 = simplify_gen_binary (AND, int_mode,
+ simplify_gen_unary (NOT, int_mode,
XEXP (XEXP (src, 0), 0),
- GET_MODE (src)),
+ int_mode),
false_rtx);
SUBST (SET_SRC (x),
- simplify_gen_binary (IOR, GET_MODE (src),
- simplify_gen_binary (IOR, GET_MODE (src),
+ simplify_gen_binary (IOR, int_mode,
+ simplify_gen_binary (IOR, int_mode,
term1, term2),
term3));
rtx orig_varop = varop;
int count;
machine_mode mode = result_mode;
- machine_mode shift_mode, tmode;
+ machine_mode shift_mode;
+ scalar_int_mode tmode, inner_mode;
unsigned int mode_words
= (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
/* We form (outer_op (code varop count) (outer_const)). */
the same number of words as what we've seen so far. Then store
the widest mode in MODE. */
if (subreg_lowpart_p (varop)
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
- > GET_MODE_SIZE (GET_MODE (varop)))
- && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
+ && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
+ && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (GET_MODE (varop))
+ && (unsigned int) ((GET_MODE_SIZE (inner_mode)
+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
== mode_words
- && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
- && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
+ && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT)
{
varop = SUBREG_REG (varop);
- if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
- mode = GET_MODE (varop);
+ if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (mode))
+ mode = inner_mode;
continue;
}
break;
rtx op1 = *pop1;
rtx tem, tem1;
int i;
- machine_mode mode, tmode;
+ scalar_int_mode mode, inner_mode;
+ machine_mode tmode;
/* Try a few ways of applying the same transformation to both operands. */
while (1)
;
else if (subreg_lowpart_p (op0)
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
- && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
+ && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
&& (code == NE || code == EQ)
- && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
- <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
&& !paradoxical_subreg_p (op0)
- && (nonzero_bits (SUBREG_REG (op0),
- GET_MODE (SUBREG_REG (op0)))
+ && (nonzero_bits (SUBREG_REG (op0), inner_mode)
& ~GET_MODE_MASK (GET_MODE (op0))) == 0)
{
/* Remove outer subregs that don't do anything. */
- tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
+ tem = gen_lowpart (inner_mode, op1);
- if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
+ if ((nonzero_bits (tem, inner_mode)
& ~GET_MODE_MASK (GET_MODE (op0))) == 0)
{
op0 = SUBREG_REG (op0);
op1 = make_compound_operation (op1, SET);
if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
- && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
+ && is_int_mode (GET_MODE (op0), &mode)
+ && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
&& (code == NE || code == EQ))
{
if (paradoxical_subreg_p (op0))
if (REG_P (SUBREG_REG (op0)))
{
op0 = SUBREG_REG (op0);
- op1 = gen_lowpart (GET_MODE (op0), op1);
+ op1 = gen_lowpart (inner_mode, op1);
}
}
- else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
- <= HOST_BITS_PER_WIDE_INT)
- && (nonzero_bits (SUBREG_REG (op0),
- GET_MODE (SUBREG_REG (op0)))
- & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
+ else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (SUBREG_REG (op0), inner_mode)
+ & ~GET_MODE_MASK (mode)) == 0)
{
- tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
+ tem = gen_lowpart (inner_mode, op1);
- if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
- & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
+ if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
op0 = SUBREG_REG (op0), op1 = tem;
}
}
mode for which we can do the compare. There are a number of cases in
which we can use the wider mode. */
- mode = GET_MODE (op0);
- if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
+ if (is_int_mode (GET_MODE (op0), &mode)
&& GET_MODE_SIZE (mode) < UNITS_PER_WORD
&& ! have_insn_for (COMPARE, mode))
FOR_EACH_WIDER_MODE (tmode, mode)
const struct cpu_cost_table *extra_cost
= aarch64_tune_params.insn_extra_cost;
int code = GET_CODE (x);
+ scalar_int_mode int_mode;
/* By default, assume that everything has equivalent cost to the
cheapest instruction. Any additional costs are applied as a delta
return true;
}
- if (GET_MODE_CLASS (mode) == MODE_INT)
+ if (is_int_mode (mode, &int_mode))
{
if (CONST_INT_P (op1))
{
/* We have a mask + shift version of a UBFIZ
i.e. the *andim_ashift<mode>_bfiz pattern. */
if (GET_CODE (op0) == ASHIFT
- && aarch64_mask_and_shift_for_ubfiz_p (mode, op1,
- XEXP (op0, 1)))
+ && aarch64_mask_and_shift_for_ubfiz_p (int_mode, op1,
+ XEXP (op0, 1)))
{
- *cost += rtx_cost (XEXP (op0, 0), mode,
+ *cost += rtx_cost (XEXP (op0, 0), int_mode,
(enum rtx_code) code, 0, speed);
if (speed)
*cost += extra_cost->alu.bfx;
return true;
}
- else if (aarch64_bitmask_imm (INTVAL (op1), mode))
+ else if (aarch64_bitmask_imm (INTVAL (op1), int_mode))
{
/* We possibly get the immediate for free, this is not
modelled. */
- *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
+ *cost += rtx_cost (op0, int_mode,
+ (enum rtx_code) code, 0, speed);
if (speed)
*cost += extra_cost->alu.logical;
}
/* In both cases we want to cost both operands. */
- *cost += rtx_cost (new_op0, mode, (enum rtx_code) code, 0, speed);
- *cost += rtx_cost (op1, mode, (enum rtx_code) code, 1, speed);
+ *cost += rtx_cost (new_op0, int_mode, (enum rtx_code) code,
+ 0, speed);
+ *cost += rtx_cost (op1, int_mode, (enum rtx_code) code,
+ 1, speed);
return true;
}
static int
notreg_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno)
{
+ scalar_int_mode int_mode, inner_mode;
return ((GET_CODE (x) == SUBREG
&& REG_P (SUBREG_REG (x))
- && GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
- && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))
+ && is_int_mode (mode, &int_mode)
+ && is_int_mode (GET_MODE (SUBREG_REG (x)), &inner_mode)
+ && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
&& subreg_lowpart_p (x)
- && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (SUBREG_REG (x))))
+ && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, inner_mode))
? 0
: rtx_cost (x, mode, outer, opno, optimize_this_for_speed_p) * 2);
}
/* Set nonzero if we need to call force_const_mem on with the
contents of src_folded before using it. */
int src_folded_force_flag = 0;
+ scalar_int_mode int_mode;
dest = SET_DEST (sets[i].rtl);
src = SET_SRC (sets[i].rtl);
wider mode. */
if (src_const && src_related == 0 && CONST_INT_P (src_const)
- && GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_PRECISION (mode) < BITS_PER_WORD)
+ && is_int_mode (mode, &int_mode)
+ && GET_MODE_PRECISION (int_mode) < BITS_PER_WORD)
{
- machine_mode wider_mode;
-
- FOR_EACH_WIDER_MODE (wider_mode, mode)
+ opt_scalar_int_mode wider_mode_iter;
+ FOR_EACH_WIDER_MODE (wider_mode_iter, int_mode)
{
+ scalar_int_mode wider_mode = wider_mode_iter.require ();
if (GET_MODE_PRECISION (wider_mode) > BITS_PER_WORD)
break;
const_elt; const_elt = const_elt->next_same_value)
if (REG_P (const_elt->exp))
{
- src_related = gen_lowpart (mode, const_elt->exp);
+ src_related = gen_lowpart (int_mode, const_elt->exp);
break;
}
e = new_cselib_val (next_uid, GET_MODE (x), x);
new_elt_loc_list (e, x);
+
+ scalar_int_mode int_mode;
if (REG_VALUES (i) == 0)
{
/* Maintain the invariant that the first entry of
REG_VALUES (i) = new_elt_list (REG_VALUES (i), NULL);
}
else if (cselib_preserve_constants
- && GET_MODE_CLASS (mode) == MODE_INT)
+ && is_int_mode (mode, &int_mode))
{
/* During var-tracking, try harder to find equivalences
for SUBREGs. If a setter sets say a DImode register
and user uses that register only in SImode, add a lowpart
subreg location. */
struct elt_list *lwider = NULL;
+ scalar_int_mode lmode;
l = REG_VALUES (i);
if (l && l->elt == NULL)
l = l->next;
for (; l; l = l->next)
- if (GET_MODE_CLASS (GET_MODE (l->elt->val_rtx)) == MODE_INT
- && GET_MODE_SIZE (GET_MODE (l->elt->val_rtx))
- > GET_MODE_SIZE (mode)
+ if (is_int_mode (GET_MODE (l->elt->val_rtx), &lmode)
+ && GET_MODE_SIZE (lmode) > GET_MODE_SIZE (int_mode)
&& (lwider == NULL
- || GET_MODE_SIZE (GET_MODE (l->elt->val_rtx))
+ || GET_MODE_SIZE (lmode)
< GET_MODE_SIZE (GET_MODE (lwider->elt->val_rtx))))
{
struct elt_loc_list *el;
if (i < FIRST_PSEUDO_REGISTER
- && hard_regno_nregs[i][GET_MODE (l->elt->val_rtx)] != 1)
+ && hard_regno_nregs[i][lmode] != 1)
continue;
for (el = l->elt->locs; el; el = el->next)
if (!REG_P (el->loc))
}
if (lwider)
{
- rtx sub = lowpart_subreg (mode, lwider->elt->val_rtx,
+ rtx sub = lowpart_subreg (int_mode, lwider->elt->val_rtx,
GET_MODE (lwider->elt->val_rtx));
if (sub)
new_elt_loc_list (e, sub);
{
machine_mode mode;
rtx_code_label *drop_through_label = 0;
+ scalar_int_mode int_mode;
switch (code)
{
if (integer_zerop (op1))
do_jump (op0, if_true_label, if_false_label,
prob.invert ());
- else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
- && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump))
+ else if (is_int_mode (TYPE_MODE (inner_type), &int_mode)
+ && !can_compare_p (EQ, int_mode, ccp_jump))
do_jump_by_parts_equality (op0, op1, if_false_label, if_true_label,
prob);
else
if (integer_zerop (op1))
do_jump (op0, if_false_label, if_true_label, prob);
- else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT
- && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump))
+ else if (is_int_mode (TYPE_MODE (inner_type), &int_mode)
+ && !can_compare_p (NE, int_mode, ccp_jump))
do_jump_by_parts_equality (op0, op1, if_true_label, if_false_label,
prob.invert ());
else
case LT_EXPR:
mode = TYPE_MODE (TREE_TYPE (op0));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (LT, mode, ccp_jump))
- do_jump_by_parts_greater (op0, op1, 1, if_false_label, if_true_label,
- prob);
+ if (is_int_mode (mode, &int_mode)
+ && ! can_compare_p (LT, int_mode, ccp_jump))
+ do_jump_by_parts_greater (op0, op1, 1, if_false_label,
+ if_true_label, prob);
else
do_compare_and_jump (op0, op1, LT, LTU, if_false_label, if_true_label,
prob);
case LE_EXPR:
mode = TYPE_MODE (TREE_TYPE (op0));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (LE, mode, ccp_jump))
+ if (is_int_mode (mode, &int_mode)
+ && ! can_compare_p (LE, int_mode, ccp_jump))
do_jump_by_parts_greater (op0, op1, 0, if_true_label, if_false_label,
prob.invert ());
else
case GT_EXPR:
mode = TYPE_MODE (TREE_TYPE (op0));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (GT, mode, ccp_jump))
- do_jump_by_parts_greater (op0, op1, 0, if_false_label, if_true_label,
- prob);
+ if (is_int_mode (mode, &int_mode)
+ && ! can_compare_p (GT, int_mode, ccp_jump))
+ do_jump_by_parts_greater (op0, op1, 0, if_false_label,
+ if_true_label, prob);
else
do_compare_and_jump (op0, op1, GT, GTU, if_false_label, if_true_label,
prob);
case GE_EXPR:
mode = TYPE_MODE (TREE_TYPE (op0));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (GE, mode, ccp_jump))
+ if (is_int_mode (mode, &int_mode)
+ && ! can_compare_p (GE, int_mode, ccp_jump))
do_jump_by_parts_greater (op0, op1, 1, if_true_label, if_false_label,
prob.invert ());
else
if (! if_true_label)
dummy_label = if_true_label = gen_label_rtx ();
- if (GET_MODE_CLASS (mode) == MODE_INT
- && ! can_compare_p (code, mode, ccp_jump))
+ scalar_int_mode int_mode;
+ if (is_int_mode (mode, &int_mode)
+ && ! can_compare_p (code, int_mode, ccp_jump))
{
switch (code)
{
case LTU:
- do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
+ do_jump_by_parts_greater_rtx (int_mode, 1, op1, op0,
if_false_label, if_true_label, prob);
break;
case LEU:
- do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
+ do_jump_by_parts_greater_rtx (int_mode, 1, op0, op1,
if_true_label, if_false_label,
prob.invert ());
break;
case GTU:
- do_jump_by_parts_greater_rtx (mode, 1, op0, op1,
+ do_jump_by_parts_greater_rtx (int_mode, 1, op0, op1,
if_false_label, if_true_label, prob);
break;
case GEU:
- do_jump_by_parts_greater_rtx (mode, 1, op1, op0,
+ do_jump_by_parts_greater_rtx (int_mode, 1, op1, op0,
if_true_label, if_false_label,
prob.invert ());
break;
case LT:
- do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
+ do_jump_by_parts_greater_rtx (int_mode, 0, op1, op0,
if_false_label, if_true_label, prob);
break;
case LE:
- do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
+ do_jump_by_parts_greater_rtx (int_mode, 0, op0, op1,
if_true_label, if_false_label,
prob.invert ());
break;
case GT:
- do_jump_by_parts_greater_rtx (mode, 0, op0, op1,
+ do_jump_by_parts_greater_rtx (int_mode, 0, op0, op1,
if_false_label, if_true_label, prob);
break;
case GE:
- do_jump_by_parts_greater_rtx (mode, 0, op1, op0,
+ do_jump_by_parts_greater_rtx (int_mode, 0, op1, op0,
if_true_label, if_false_label,
prob.invert ());
break;
case EQ:
- do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label,
+ do_jump_by_parts_equality_rtx (int_mode, op0, op1, if_false_label,
if_true_label, prob);
break;
case NE:
- do_jump_by_parts_equality_rtx (mode, op0, op1, if_true_label,
+ do_jump_by_parts_equality_rtx (int_mode, op0, op1, if_true_label,
if_false_label,
prob.invert ());
break;
arg != void_list_node && idx < nargs;
arg = TREE_CHAIN (arg), idx++)
{
- machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
+ scalar_int_mode mode;
rtx reg, link, tmp;
+
+ if (!is_int_mode (TYPE_MODE (TREE_VALUE (arg)), &mode))
+ return false;
+
reg = targetm.calls.function_arg (args_so_far, mode, NULL_TREE, true);
- if (!reg || !REG_P (reg) || GET_MODE (reg) != mode
- || GET_MODE_CLASS (mode) != MODE_INT)
+ if (!reg || !REG_P (reg) || GET_MODE (reg) != mode)
return false;
for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
link = XEXP (link, 1))
if (GET_CODE (XEXP (link, 0)) == USE)
{
+ scalar_int_mode arg_mode;
args[idx] = XEXP (XEXP (link, 0), 0);
if (REG_P (args[idx])
&& REGNO (args[idx]) == REGNO (reg)
&& (GET_MODE (args[idx]) == mode
- || (GET_MODE_CLASS (GET_MODE (args[idx])) == MODE_INT
- && (GET_MODE_SIZE (GET_MODE (args[idx]))
- <= UNITS_PER_WORD)
- && (GET_MODE_SIZE (GET_MODE (args[idx]))
- > GET_MODE_SIZE (mode)))))
+ || (is_int_mode (GET_MODE (args[idx]), &arg_mode)
+ && (GET_MODE_SIZE (arg_mode) <= UNITS_PER_WORD)
+ && (GET_MODE_SIZE (arg_mode) > GET_MODE_SIZE (mode)))))
break;
}
if (!link)
{
tree enttype = TREE_TYPE (type);
tree domain = TYPE_DOMAIN (type);
- machine_mode mode = TYPE_MODE (enttype);
+ scalar_int_mode mode;
- if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) == 1
+ if (is_int_mode (TYPE_MODE (enttype), &mode)
+ && GET_MODE_SIZE (mode) == 1
&& domain
&& integer_zerop (TYPE_MIN_VALUE (domain))
&& compare_tree_int (TYPE_MAX_VALUE (domain),
if (TREE_CODE (type) == ARRAY_TYPE)
{
tree enttype = TREE_TYPE (type);
- machine_mode mode = TYPE_MODE (enttype);
+ scalar_int_mode mode;
- if (GET_MODE_CLASS (mode) != MODE_INT || GET_MODE_SIZE (mode) != 1)
+ if (!is_int_mode (TYPE_MODE (enttype), &mode)
+ || GET_MODE_SIZE (mode) != 1)
return false;
if (int_size_in_bytes (type) != size)
return false;
/* If we are comparing a double-word integer with zero or -1, we can
convert the comparison into one involving a single word. */
- if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
- && GET_MODE_CLASS (mode) == MODE_INT
+ scalar_int_mode int_mode;
+ if (is_int_mode (mode, &int_mode)
+ && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
&& (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
{
rtx tem;
/* Do a logical OR or AND of the two words and compare the
result. */
- op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
- op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
+ op00 = simplify_gen_subreg (word_mode, op0, int_mode, 0);
+ op01 = simplify_gen_subreg (word_mode, op0, int_mode, UNITS_PER_WORD);
tem = expand_binop (word_mode,
op1 == const0_rtx ? ior_optab : and_optab,
op00, op01, NULL_RTX, unsignedp,
rtx op0h;
/* If testing the sign bit, can just test on high word. */
- op0h = simplify_gen_subreg (word_mode, op0, mode,
+ op0h = simplify_gen_subreg (word_mode, op0, int_mode,
subreg_highpart_offset (word_mode,
- mode));
+ int_mode));
tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
unsignedp, normalizep);
}
/* If this is A < 0 or A >= 0, we can do this by taking the ones
complement of A (for GE) and shifting the sign bit to the low bit. */
if (op1 == const0_rtx && (code == LT || code == GE)
- && GET_MODE_CLASS (mode) == MODE_INT
+ && is_int_mode (mode, &int_mode)
&& (normalizep || STORE_FLAG_VALUE == 1
- || val_signbit_p (mode, STORE_FLAG_VALUE)))
+ || val_signbit_p (int_mode, STORE_FLAG_VALUE)))
{
subtarget = target;
if (!target)
- target_mode = mode;
+ target_mode = int_mode;
/* If the result is to be wider than OP0, it is best to convert it
first. If it is to be narrower, it is *incorrect* to convert it
first. */
- else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
+ else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (int_mode))
{
- op0 = convert_modes (target_mode, mode, op0, 0);
+ op0 = convert_modes (target_mode, int_mode, op0, 0);
mode = target_mode;
}
/* The remaining tricks only apply to integer comparisons. */
- if (GET_MODE_CLASS (mode) == MODE_INT)
- return emit_store_flag_int (target, subtarget, code, op0, op1, mode,
+ scalar_int_mode int_mode;
+ if (is_int_mode (mode, &int_mode))
+ return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode,
unsignedp, normalizep, trueval);
return 0;
convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
{
rtx temp;
+ scalar_int_mode int_mode;
/* If FROM is a SUBREG that indicates that we have already done at least
the required extension, strip it. */
if (mode == oldmode)
return x;
- if (CONST_SCALAR_INT_P (x) && GET_MODE_CLASS (mode) == MODE_INT)
+ if (CONST_SCALAR_INT_P (x)
+ && is_int_mode (mode, &int_mode))
{
/* If the caller did not tell us the old mode, then there is not
much to do with respect to canonicalization. We have to
if (GET_MODE_CLASS (oldmode) != MODE_INT)
oldmode = MAX_MODE_INT;
wide_int w = wide_int::from (rtx_mode_t (x, oldmode),
- GET_MODE_PRECISION (mode),
+ GET_MODE_PRECISION (int_mode),
unsignedp ? UNSIGNED : SIGNED);
- return immed_wide_int_const (w, mode);
+ return immed_wide_int_const (w, int_mode);
}
/* We can do this with a gen_lowpart if both desired and current modes
are integer, and this is either a constant integer, a register, or a
non-volatile MEM. */
- if (GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_CLASS (oldmode) == MODE_INT
- && GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode)
- && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) mode])
+ scalar_int_mode int_oldmode;
+ if (is_int_mode (mode, &int_mode)
+ && is_int_mode (oldmode, &int_oldmode)
+ && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
+ && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
|| (REG_P (x)
&& (!HARD_REGISTER_P (x)
- || HARD_REGNO_MODE_OK (REGNO (x), mode))
- && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x)))))
-
- return gen_lowpart (mode, x);
+ || HARD_REGNO_MODE_OK (REGNO (x), int_mode))
+ && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x)))))
+ return gen_lowpart (int_mode, x);
/* Converting from integer constant into mode is always equivalent to an
subreg operation. */
case, if it has reverse storage order, it needs to be accessed as a
scalar field with reverse storage order and we must first put the
value into target order. */
+ scalar_int_mode temp_mode;
if (AGGREGATE_TYPE_P (TREE_TYPE (exp))
- && GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT)
+ && is_int_mode (GET_MODE (temp), &temp_mode))
{
- HOST_WIDE_INT size = GET_MODE_BITSIZE (GET_MODE (temp));
+ HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode);
reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp));
if (reverse)
- temp = flip_storage_order (GET_MODE (temp), temp);
+ temp = flip_storage_order (temp_mode, temp);
if (bitsize < size
&& reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
&& !(mode == BLKmode && bitsize > BITS_PER_WORD))
- temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp,
+ temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
size - bitsize, NULL_RTX, 1);
}
|| GET_MODE_CLASS (mode) == MODE_VECTOR_ACCUM
|| GET_MODE_CLASS (mode) == MODE_VECTOR_UACCUM)
return const_vector_from_tree (exp);
- if (GET_MODE_CLASS (mode) == MODE_INT)
+ scalar_int_mode int_mode;
+ if (is_int_mode (mode, &int_mode))
{
if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
return const_scalar_mask_from_tree (exp);
else
{
- tree type_for_mode = lang_hooks.types.type_for_mode (mode, 1);
+ tree type_for_mode
+ = lang_hooks.types.type_for_mode (int_mode, 1);
if (type_for_mode)
tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR,
type_for_mode, exp);
&& compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
{
tree type = TREE_TYPE (TREE_TYPE (init));
- machine_mode mode = TYPE_MODE (type);
+ scalar_int_mode mode;
- if (GET_MODE_CLASS (mode) == MODE_INT
+ if (is_int_mode (TYPE_MODE (type), &mode)
&& GET_MODE_SIZE (mode) == 1)
return gen_int_mode (TREE_STRING_POINTER (init)
[TREE_INT_CST_LOW (index1)],
{
unsigned HOST_WIDE_INT idx;
tree field, value;
+ scalar_int_mode field_mode;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0),
idx, field, value)
the bitfield does not meet either of those conditions,
we can't do this optimization. */
&& (! DECL_BIT_FIELD (field)
- || ((GET_MODE_CLASS (DECL_MODE (field)) == MODE_INT)
- && (GET_MODE_PRECISION (DECL_MODE (field))
+ || (is_int_mode (DECL_MODE (field), &field_mode)
+ && (GET_MODE_PRECISION (field_mode)
<= HOST_BITS_PER_WIDE_INT))))
{
if (DECL_BIT_FIELD (field)
and this is for big-endian data, we must put the field
into the high-order bits. And we must also put it back
into memory order if it has been previously reversed. */
+ scalar_int_mode op0_mode;
if (TREE_CODE (type) == RECORD_TYPE
- && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
+ && is_int_mode (GET_MODE (op0), &op0_mode))
{
- HOST_WIDE_INT size = GET_MODE_BITSIZE (GET_MODE (op0));
+ HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
if (bitsize < size
&& reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
- op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0,
+ op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
size - bitsize, op0, 1);
if (reversep)
- op0 = flip_storage_order (GET_MODE (op0), op0);
+ op0 = flip_storage_order (op0_mode, op0);
}
/* If the result type is BLKmode, store the data into a temporary
string = exp1;
}
+ scalar_int_mode char_mode;
if (string
&& TYPE_MODE (TREE_TYPE (exp)) == TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))
&& TREE_CODE (string) == STRING_CST
&& TREE_CODE (index) == INTEGER_CST
&& compare_tree_int (index, TREE_STRING_LENGTH (string)) < 0
- && (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_TYPE (string))))
- == MODE_INT)
- && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) == 1))
+ && is_int_mode (TYPE_MODE (TREE_TYPE (TREE_TYPE (string))),
+ &char_mode)
+ && GET_MODE_SIZE (char_mode) == 1)
return build_int_cst_type (TREE_TYPE (exp),
(TREE_STRING_POINTER (string)
[TREE_INT_CST_LOW (index)]));
tree type = TREE_TYPE (format);
- if (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (type))) != MODE_INT
- || GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type))) != 1)
+ scalar_int_mode char_mode;
+ if (!is_int_mode (TYPE_MODE (TREE_TYPE (type)), &char_mode)
+ || GET_MODE_SIZE (char_mode) != 1)
{
/* Wide format string. */
return NULL;
+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * go-lang.c (go_langhook_type_for_mode): Use is_int_mode.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
return NULL_TREE;
}
+ scalar_int_mode imode;
scalar_float_mode fmode;
enum mode_class mc = GET_MODE_CLASS (mode);
- if (mc == MODE_INT)
- return go_langhook_type_for_size (GET_MODE_BITSIZE (mode), unsignedp);
+ if (is_int_mode (mode, &imode))
+ return go_langhook_type_for_size (GET_MODE_BITSIZE (imode), unsignedp);
else if (is_float_mode (mode, &fmode))
{
switch (GET_MODE_BITSIZE (fmode))
extern bool int_n_enabled_p[NUM_INT_N_ENTS];
extern const int_n_data_t int_n_data[NUM_INT_N_ENTS];
+/* Return true if MODE has class MODE_INT, storing it as a scalar_int_mode
+ in *INT_MODE if so. */
+
+template<typename T>
+inline bool
+is_int_mode (machine_mode mode, T *int_mode)
+{
+ if (GET_MODE_CLASS (mode) == MODE_INT)
+ {
+ *int_mode = scalar_int_mode (scalar_int_mode::from_int (mode));
+ return true;
+ }
+ return false;
+}
+
/* Return true if MODE has class MODE_FLOAT, storing it as a
scalar_float_mode in *FLOAT_MODE if so. */
{
int maxsize = 2 * BITS_PER_WORD;
int minsize = BITS_PER_WORD;
+ scalar_int_mode int_mode;
- if (GET_MODE_CLASS (mode) != MODE_INT)
+ if (!is_int_mode (mode, &int_mode))
return;
if (maxsize < LONG_LONG_TYPE_SIZE)
maxsize = LONG_LONG_TYPE_SIZE;
&& (trapv_binoptab_p (optable)
|| trapv_unoptab_p (optable)))
minsize = INT_TYPE_SIZE;
- if (GET_MODE_BITSIZE (mode) < minsize
- || GET_MODE_BITSIZE (mode) > maxsize)
+ if (GET_MODE_BITSIZE (int_mode) < minsize
+ || GET_MODE_BITSIZE (int_mode) > maxsize)
return;
- gen_libfunc (optable, opname, suffix, mode);
+ gen_libfunc (optable, opname, suffix, int_mode);
}
/* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
? OPTAB_WIDEN : methods);
enum mode_class mclass;
machine_mode wider_mode;
+ scalar_int_mode int_mode;
rtx libfunc;
rtx temp;
rtx_insn *entry_last = get_last_insn ();
&& optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
|| (binoptab == rotr_optab
&& optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
- && mclass == MODE_INT)
+ && is_int_mode (mode, &int_mode))
{
optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
rtx newop1;
- unsigned int bits = GET_MODE_PRECISION (mode);
+ unsigned int bits = GET_MODE_PRECISION (int_mode);
if (CONST_INT_P (op1))
newop1 = GEN_INT (bits - INTVAL (op1));
- else if (targetm.shift_truncation_mask (mode) == bits - 1)
+ else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
newop1 = negate_rtx (GET_MODE (op1), op1);
else
newop1 = expand_binop (GET_MODE (op1), sub_optab,
gen_int_mode (bits, GET_MODE (op1)), op1,
NULL_RTX, unsignedp, OPTAB_DIRECT);
- temp = expand_binop_directly (mode, otheroptab, op0, newop1,
+ temp = expand_binop_directly (int_mode, otheroptab, op0, newop1,
target, unsignedp, methods, last);
if (temp)
return temp;
/* These can be done a word at a time. */
if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
- && mclass == MODE_INT
- && GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && is_int_mode (mode, &int_mode)
+ && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
&& optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
{
int i;
|| target == op0
|| target == op1
|| !valid_multiword_target_p (target))
- target = gen_reg_rtx (mode);
+ target = gen_reg_rtx (int_mode);
start_sequence ();
/* Do the actual arithmetic. */
- for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
+ for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
{
- rtx target_piece = operand_subword (target, i, 1, mode);
+ rtx target_piece = operand_subword (target, i, 1, int_mode);
rtx x = expand_binop (word_mode, binoptab,
- operand_subword_force (op0, i, mode),
- operand_subword_force (op1, i, mode),
+ operand_subword_force (op0, i, int_mode),
+ operand_subword_force (op1, i, int_mode),
target_piece, unsignedp, next_methods);
if (x == 0)
insns = get_insns ();
end_sequence ();
- if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
+ if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
{
emit_insn (insns);
return target;
/* Synthesize double word shifts from single word shifts. */
if ((binoptab == lshr_optab || binoptab == ashl_optab
|| binoptab == ashr_optab)
- && mclass == MODE_INT
+ && is_int_mode (mode, &int_mode)
&& (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
- && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
- && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
+ && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
+ && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
&& optab_handler (binoptab, word_mode) != CODE_FOR_nothing
&& optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
&& optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
machine_mode op1_mode;
- double_shift_mask = targetm.shift_truncation_mask (mode);
+ double_shift_mask = targetm.shift_truncation_mask (int_mode);
shift_mask = targetm.shift_truncation_mask (word_mode);
op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
|| target == op0
|| target == op1
|| !valid_multiword_target_p (target))
- target = gen_reg_rtx (mode);
+ target = gen_reg_rtx (int_mode);
start_sequence ();
left_shift = binoptab == ashl_optab;
outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
- outof_target = operand_subword (target, outof_word, 1, mode);
- into_target = operand_subword (target, 1 - outof_word, 1, mode);
+ outof_target = operand_subword (target, outof_word, 1, int_mode);
+ into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
- outof_input = operand_subword_force (op0, outof_word, mode);
- into_input = operand_subword_force (op0, 1 - outof_word, mode);
+ outof_input = operand_subword_force (op0, outof_word, int_mode);
+ into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
if (expand_doubleword_shift (op1_mode, binoptab,
outof_input, into_input, op1,
/* Synthesize double word rotates from single word shifts. */
if ((binoptab == rotl_optab || binoptab == rotr_optab)
- && mclass == MODE_INT
+ && is_int_mode (mode, &int_mode)
&& CONST_INT_P (op1)
- && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
+ && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
&& optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
&& optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
{
|| target == op1
|| !REG_P (target)
|| !valid_multiword_target_p (target))
- target = gen_reg_rtx (mode);
+ target = gen_reg_rtx (int_mode);
start_sequence ();
left_shift = (binoptab == rotl_optab);
outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
- outof_target = operand_subword (target, outof_word, 1, mode);
- into_target = operand_subword (target, 1 - outof_word, 1, mode);
+ outof_target = operand_subword (target, outof_word, 1, int_mode);
+ into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
- outof_input = operand_subword_force (op0, outof_word, mode);
- into_input = operand_subword_force (op0, 1 - outof_word, mode);
+ outof_input = operand_subword_force (op0, outof_word, int_mode);
+ into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
if (shift_count == BITS_PER_WORD)
{
/* These can be done a word at a time by propagating carries. */
if ((binoptab == add_optab || binoptab == sub_optab)
- && mclass == MODE_INT
- && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
+ && is_int_mode (mode, &int_mode)
+ && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
&& optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
{
unsigned int i;
optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
- const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
+ const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
rtx xop0, xop1, xtarget;
#endif
/* Prepare the operands. */
- xop0 = force_reg (mode, op0);
- xop1 = force_reg (mode, op1);
+ xop0 = force_reg (int_mode, op0);
+ xop1 = force_reg (int_mode, op1);
- xtarget = gen_reg_rtx (mode);
+ xtarget = gen_reg_rtx (int_mode);
if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
target = xtarget;
for (i = 0; i < nwords; i++)
{
int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
- rtx target_piece = operand_subword (xtarget, index, 1, mode);
- rtx op0_piece = operand_subword_force (xop0, index, mode);
- rtx op1_piece = operand_subword_force (xop1, index, mode);
+ rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
+ rtx op0_piece = operand_subword_force (xop0, index, int_mode);
+ rtx op1_piece = operand_subword_force (xop1, index, int_mode);
rtx x;
/* Main add/subtract of the input operands. */
carry_in = carry_out;
}
- if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
+ if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
{
- if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
+ if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
|| ! rtx_equal_p (target, xtarget))
{
rtx_insn *temp = emit_move_insn (target, xtarget);
set_dst_reg_note (temp, REG_EQUAL,
gen_rtx_fmt_ee (optab_to_code (binoptab),
- mode, copy_rtx (xop0),
+ int_mode, copy_rtx (xop0),
copy_rtx (xop1)),
target);
}
try using a signed widening multiply. */
if (binoptab == smul_optab
- && mclass == MODE_INT
- && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && is_int_mode (mode, &int_mode)
+ && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
&& optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
&& optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
{
rtx product = NULL_RTX;
- if (widening_optab_handler (umul_widen_optab, mode, word_mode)
- != CODE_FOR_nothing)
+ if (widening_optab_handler (umul_widen_optab, int_mode, word_mode)
+ != CODE_FOR_nothing)
{
- product = expand_doubleword_mult (mode, op0, op1, target,
+ product = expand_doubleword_mult (int_mode, op0, op1, target,
true, methods);
if (!product)
delete_insns_since (last);
}
if (product == NULL_RTX
- && widening_optab_handler (smul_widen_optab, mode, word_mode)
- != CODE_FOR_nothing)
+ && (widening_optab_handler (smul_widen_optab, int_mode, word_mode)
+ != CODE_FOR_nothing))
{
- product = expand_doubleword_mult (mode, op0, op1, target,
+ product = expand_doubleword_mult (int_mode, op0, op1, target,
false, methods);
if (!product)
delete_insns_since (last);
if (product != NULL_RTX)
{
- if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
+ if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
{
rtx_insn *move = emit_move_insn (target ? target : product,
product);
set_dst_reg_note (move,
REG_EQUAL,
- gen_rtx_fmt_ee (MULT, mode,
+ gen_rtx_fmt_ee (MULT, int_mode,
copy_rtx (op0),
copy_rtx (op1)),
target ? target : product);
{
enum mode_class mclass = GET_MODE_CLASS (mode);
machine_mode wider_mode;
+ scalar_int_mode int_mode;
scalar_float_mode float_mode;
rtx temp;
rtx libfunc;
/* These can be done a word at a time. */
if (unoptab == one_cmpl_optab
- && mclass == MODE_INT
- && GET_MODE_SIZE (mode) > UNITS_PER_WORD
+ && is_int_mode (mode, &int_mode)
+ && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
&& optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
{
int i;
rtx_insn *insns;
if (target == 0 || target == op0 || !valid_multiword_target_p (target))
- target = gen_reg_rtx (mode);
+ target = gen_reg_rtx (int_mode);
start_sequence ();
/* Do the actual arithmetic. */
- for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
+ for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
{
- rtx target_piece = operand_subword (target, i, 1, mode);
+ rtx target_piece = operand_subword (target, i, 1, int_mode);
rtx x = expand_unop (word_mode, unoptab,
- operand_subword_force (op0, i, mode),
+ operand_subword_force (op0, i, int_mode),
target_piece, unsignedp);
if (target_piece != x)
value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
where W is the width of MODE. */
- if (GET_MODE_CLASS (mode) == MODE_INT
+ scalar_int_mode int_mode;
+ if (is_int_mode (mode, &int_mode)
&& BRANCH_COST (optimize_insn_for_speed_p (),
false) >= 2)
{
- rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
- GET_MODE_PRECISION (mode) - 1,
+ rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
+ GET_MODE_PRECISION (int_mode) - 1,
NULL_RTX, 0);
- temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
+ temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
OPTAB_LIB_WIDEN);
if (temp != 0)
- temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
+ temp = expand_binop (int_mode,
+ result_unsignedp ? sub_optab : subv_optab,
temp, extended, target, 0, OPTAB_LIB_WIDEN);
if (temp != 0)
/* If this machine has expensive jumps, we can do one's complement
absolute value of X as (((signed) x >> (W-1)) ^ x). */
- if (GET_MODE_CLASS (mode) == MODE_INT
+ scalar_int_mode int_mode;
+ if (is_int_mode (mode, &int_mode)
&& BRANCH_COST (optimize_insn_for_speed_p (),
false) >= 2)
{
- rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
- GET_MODE_PRECISION (mode) - 1,
+ rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
+ GET_MODE_PRECISION (int_mode) - 1,
NULL_RTX, 0);
- temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
+ temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
OPTAB_LIB_WIDEN);
if (temp != 0)
{
unsigned HOST_WIDE_INT val;
unsigned int width;
+ scalar_int_mode int_mode;
- if (GET_MODE_CLASS (mode) != MODE_INT)
+ if (!is_int_mode (mode, &int_mode))
return false;
- width = GET_MODE_PRECISION (mode);
+ width = GET_MODE_PRECISION (int_mode);
if (width == 0)
return false;
val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
{
unsigned int width;
+ scalar_int_mode int_mode;
- if (GET_MODE_CLASS (mode) != MODE_INT)
+ if (!is_int_mode (mode, &int_mode))
return false;
- width = GET_MODE_PRECISION (mode);
+ width = GET_MODE_PRECISION (int_mode);
if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
return false;
- val &= GET_MODE_MASK (mode);
+ val &= GET_MODE_MASK (int_mode);
return val == (HOST_WIDE_INT_1U << (width - 1));
}
{
unsigned int width;
- if (GET_MODE_CLASS (mode) != MODE_INT)
+ scalar_int_mode int_mode;
+ if (!is_int_mode (mode, &int_mode))
return false;
- width = GET_MODE_PRECISION (mode);
+ width = GET_MODE_PRECISION (int_mode);
if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
return false;
{
unsigned int width;
- if (GET_MODE_CLASS (mode) != MODE_INT)
+ scalar_int_mode int_mode;
+ if (!is_int_mode (mode, &int_mode))
return false;
- width = GET_MODE_PRECISION (mode);
+ width = GET_MODE_PRECISION (int_mode);
if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
return false;
/* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
the same as (zero_extract:SI FOO (const_int 1) BAR). */
+ scalar_int_mode int_mode;
if (code == NE
&& op1 == const0_rtx
- && GET_MODE_CLASS (mode) == MODE_INT
+ && is_int_mode (mode, &int_mode)
&& cmp_mode != VOIDmode
/* ??? Work-around BImode bugs in the ia64 backend. */
- && mode != BImode
+ && int_mode != BImode
&& cmp_mode != BImode
&& nonzero_bits (op0, cmp_mode) == 1
&& STORE_FLAG_VALUE == 1)
- return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
- ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
- : lowpart_subreg (mode, op0, cmp_mode);
+ return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (cmp_mode)
+ ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, cmp_mode)
+ : lowpart_subreg (int_mode, op0, cmp_mode);
/* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
if ((code == EQ || code == NE)
&& (!targetm.vector_mode_supported_p (mode)
|| !have_regs_of_mode[mode]))
{
- machine_mode innermode = TREE_TYPE (t)->type_common.mode;
+ scalar_int_mode innermode;
/* For integers, try mapping it to a same-sized scalar mode. */
- if (GET_MODE_CLASS (innermode) == MODE_INT)
+ if (is_int_mode (TREE_TYPE (t)->type_common.mode, &innermode))
{
unsigned int size = (TYPE_VECTOR_SUBPARTS (t)
* GET_MODE_BITSIZE (innermode));