+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * machmode.h (mode_traits): New structure.
+ (get_narrowest_mode): New function.
+ (mode_iterator::start): Likewise.
+ (mode_iterator::iterate_p): Likewise.
+ (mode_iterator::get_wider): Likewise.
+ (mode_iterator::get_known_wider): Likewise.
+ (mode_iterator::get_2xwider): Likewise.
+ (FOR_EACH_MODE_IN_CLASS): New mode iterator.
+ (FOR_EACH_MODE): Likewise.
+ (FOR_EACH_MODE_FROM): Likewise.
+ (FOR_EACH_MODE_UNTIL): Likewise.
+ (FOR_EACH_WIDER_MODE): Likewise.
+ (FOR_EACH_2XWIDER_MODE): Likewise.
+ * builtins.c (expand_builtin_strlen): Use new mode iterators.
+ * combine.c (simplify_comparison): Likewise
+ * config/i386/i386.c (type_natural_mode): Likewise.
+ * cse.c (cse_insn): Likewise.
+ * dse.c (find_shift_sequence): Likewise.
+ * emit-rtl.c (init_derived_machine_modes): Likewise.
+ (init_emit_once): Likewise.
+ * explow.c (hard_function_value): Likewise.
+ * expmed.c (extract_fixed_bit_field_1): Likewise.
+ (extract_bit_field_1): Likewise.
+ (expand_divmod): Likewise.
+ (emit_store_flag_1): Likewise.
+ * expr.c (init_expr_target): Likewise.
+ (convert_move): Likewise.
+ (alignment_for_piecewise_move): Likewise.
+ (widest_int_mode_for_size): Likewise.
+ (emit_block_move_via_movmem): Likewise.
+ (copy_blkmode_to_reg): Likewise.
+ (set_storage_via_setmem): Likewise.
+ (compress_float_constant): Likewise.
+ * omp-low.c (omp_clause_aligned_alignment): Likewise.
+ * optabs-query.c (get_best_extraction_insn): Likewise.
+ * optabs.c (expand_binop): Likewise.
+ (expand_twoval_unop): Likewise.
+ (expand_twoval_binop): Likewise.
+ (widen_leading): Likewise.
+ (widen_bswap): Likewise.
+ (expand_parity): Likewise.
+ (expand_unop): Likewise.
+ (prepare_cmp_insn): Likewise.
+ (prepare_float_lib_cmp): Likewise.
+ (expand_float): Likewise.
+ (expand_fix): Likewise.
+ (expand_sfix_optab): Likewise.
+ * postreload.c (move2add_use_add2_insn): Likewise.
+ * reg-stack.c (reg_to_stack): Likewise.
+ * reginfo.c (choose_hard_reg_mode): Likewise.
+ * rtlanal.c (init_num_sign_bit_copies_in_rep): Likewise.
+ * stor-layout.c (mode_for_size): Likewise.
+ (smallest_mode_for_size): Likewise.
+ (mode_for_vector): Likewise.
+ (finish_bitfield_representative): Likewise.
+ * tree-ssa-math-opts.c (target_supports_divmod_p): Likewise.
+ * tree-vect-generic.c (type_for_widest_vector_mode): Likewise.
+ * tree-vect-stmts.c (vectorizable_conversion): Likewise.
+ * var-tracking.c (prepare_call_arguments): Likewise.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc-interface/misc.c (fp_prec_to_size): Use new mode iterators.
+ (fp_size_to_prec): Likewise.
+
2017-08-29 Martin Liska <mliska@suse.cz>
PR other/39851
{
machine_mode mode;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
if (GET_MODE_PRECISION (mode) == prec)
return GET_MODE_BITSIZE (mode);
{
machine_mode mode;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
if (GET_MODE_BITSIZE (mode) == size)
return GET_MODE_PRECISION (mode);
tree src = CALL_EXPR_ARG (exp, 0);
rtx src_reg;
rtx_insn *before_strlen;
- machine_mode insn_mode = target_mode;
+ machine_mode insn_mode;
enum insn_code icode = CODE_FOR_nothing;
unsigned int align;
return NULL_RTX;
/* Bail out if we can't compute strlen in the right mode. */
- while (insn_mode != VOIDmode)
+ FOR_EACH_MODE_FROM (insn_mode, target_mode)
{
icode = optab_handler (strlen_optab, insn_mode);
if (icode != CODE_FOR_nothing)
break;
-
- insn_mode = GET_MODE_WIDER_MODE (insn_mode);
}
if (insn_mode == VOIDmode)
return NULL_RTX;
+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * c-common.c (c_common_fixed_point_type_for_size): Use new mode
+ iterators.
+ * c-cppbuiltin.c (c_cpp_builtins): Likewise.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
c_common_fixed_point_type_for_size (unsigned int ibit, unsigned int fbit,
int unsignedp, int satp)
{
- machine_mode mode;
+ enum mode_class mclass;
if (ibit == 0)
- mode = unsignedp ? UQQmode : QQmode;
+ mclass = unsignedp ? MODE_UFRACT : MODE_FRACT;
else
- mode = unsignedp ? UHAmode : HAmode;
+ mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
- for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
+ machine_mode mode;
+ FOR_EACH_MODE_IN_CLASS (mode, mclass)
if (GET_MODE_IBIT (mode) >= ibit && GET_MODE_FBIT (mode) >= fbit)
break;
if (flag_building_libgcc)
{
/* Properties of floating-point modes for libgcc2.c. */
- for (machine_mode mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ machine_mode mode;
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
{
const char *name = GET_MODE_NAME (mode);
char *macro_name
}
else if (c0 == c1)
- for (tmode = GET_CLASS_NARROWEST_MODE
- (GET_MODE_CLASS (GET_MODE (op0)));
- tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
+ FOR_EACH_MODE_UNTIL (tmode, GET_MODE (op0))
if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
{
op0 = gen_lowpart_or_truncate (tmode, inner_op0);
if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_SIZE (mode) < UNITS_PER_WORD
&& ! have_insn_for (COMPARE, mode))
- for (tmode = GET_MODE_WIDER_MODE (mode);
- (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
- tmode = GET_MODE_WIDER_MODE (tmode))
- if (have_insn_for (COMPARE, tmode))
- {
- int zero_extended;
-
- /* If this is a test for negative, we can make an explicit
- test of the sign bit. Test this first so we can use
- a paradoxical subreg to extend OP0. */
+ FOR_EACH_WIDER_MODE (tmode, mode)
+ {
+ if (!HWI_COMPUTABLE_MODE_P (tmode))
+ break;
+ if (have_insn_for (COMPARE, tmode))
+ {
+ int zero_extended;
- if (op1 == const0_rtx && (code == LT || code == GE)
- && HWI_COMPUTABLE_MODE_P (mode))
- {
- unsigned HOST_WIDE_INT sign
- = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
- op0 = simplify_gen_binary (AND, tmode,
- gen_lowpart (tmode, op0),
- gen_int_mode (sign, tmode));
- code = (code == LT) ? NE : EQ;
- break;
- }
+ /* If this is a test for negative, we can make an explicit
+ test of the sign bit. Test this first so we can use
+ a paradoxical subreg to extend OP0. */
- /* If the only nonzero bits in OP0 and OP1 are those in the
- narrower mode and this is an equality or unsigned comparison,
- we can use the wider mode. Similarly for sign-extended
- values, in which case it is true for all comparisons. */
- zero_extended = ((code == EQ || code == NE
- || code == GEU || code == GTU
- || code == LEU || code == LTU)
- && (nonzero_bits (op0, tmode)
- & ~GET_MODE_MASK (mode)) == 0
- && ((CONST_INT_P (op1)
- || (nonzero_bits (op1, tmode)
- & ~GET_MODE_MASK (mode)) == 0)));
-
- if (zero_extended
- || ((num_sign_bit_copies (op0, tmode)
- > (unsigned int) (GET_MODE_PRECISION (tmode)
- - GET_MODE_PRECISION (mode)))
- && (num_sign_bit_copies (op1, tmode)
- > (unsigned int) (GET_MODE_PRECISION (tmode)
- - GET_MODE_PRECISION (mode)))))
- {
- /* If OP0 is an AND and we don't have an AND in MODE either,
- make a new AND in the proper mode. */
- if (GET_CODE (op0) == AND
- && !have_insn_for (AND, mode))
+ if (op1 == const0_rtx && (code == LT || code == GE)
+ && HWI_COMPUTABLE_MODE_P (mode))
+ {
+ unsigned HOST_WIDE_INT sign
+ = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
op0 = simplify_gen_binary (AND, tmode,
- gen_lowpart (tmode,
- XEXP (op0, 0)),
- gen_lowpart (tmode,
- XEXP (op0, 1)));
- else
- {
- if (zero_extended)
- {
- op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
- op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
- }
- else
- {
- op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
- op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
- }
- break;
- }
- }
- }
+ gen_lowpart (tmode, op0),
+ gen_int_mode (sign, tmode));
+ code = (code == LT) ? NE : EQ;
+ break;
+ }
+
+ /* If the only nonzero bits in OP0 and OP1 are those in the
+ narrower mode and this is an equality or unsigned comparison,
+ we can use the wider mode. Similarly for sign-extended
+ values, in which case it is true for all comparisons. */
+ zero_extended = ((code == EQ || code == NE
+ || code == GEU || code == GTU
+ || code == LEU || code == LTU)
+ && (nonzero_bits (op0, tmode)
+ & ~GET_MODE_MASK (mode)) == 0
+ && ((CONST_INT_P (op1)
+ || (nonzero_bits (op1, tmode)
+ & ~GET_MODE_MASK (mode)) == 0)));
+
+ if (zero_extended
+ || ((num_sign_bit_copies (op0, tmode)
+ > (unsigned int) (GET_MODE_PRECISION (tmode)
+ - GET_MODE_PRECISION (mode)))
+ && (num_sign_bit_copies (op1, tmode)
+ > (unsigned int) (GET_MODE_PRECISION (tmode)
+ - GET_MODE_PRECISION (mode)))))
+ {
+ /* If OP0 is an AND and we don't have an AND in MODE either,
+ make a new AND in the proper mode. */
+ if (GET_CODE (op0) == AND
+ && !have_insn_for (AND, mode))
+ op0 = simplify_gen_binary (AND, tmode,
+ gen_lowpart (tmode,
+ XEXP (op0, 0)),
+ gen_lowpart (tmode,
+ XEXP (op0, 1)));
+ else
+ {
+ if (zero_extended)
+ {
+ op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
+ op0, mode);
+ op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
+ op1, mode);
+ }
+ else
+ {
+ op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
+ op0, mode);
+ op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
+ op1, mode);
+ }
+ break;
+ }
+ }
+ }
+ }
/* We may have changed the comparison operands. Re-canonicalize. */
if (swap_commutative_operands_p (op0, op1))
mode = MIN_MODE_VECTOR_INT;
/* Get the mode which has this inner mode and number of units. */
- for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_FROM (mode, mode)
if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
&& GET_MODE_INNER (mode) == innermode)
{
{
machine_mode wider_mode;
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode
- && GET_MODE_PRECISION (wider_mode) <= BITS_PER_WORD
- && src_related == 0;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
+ if (GET_MODE_PRECISION (wider_mode) > BITS_PER_WORD)
+ break;
+
struct table_elt *const_elt
= lookup (src_const, HASH (src_const, wider_mode), wider_mode);
src_related = gen_lowpart (mode, const_elt->exp);
break;
}
+
+ if (src_related != 0)
+ break;
}
}
machine_mode tmode;
rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1));
- for (tmode = GET_MODE_WIDER_MODE (mode);
- GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
- tmode = GET_MODE_WIDER_MODE (tmode))
+ FOR_EACH_WIDER_MODE (tmode, mode)
{
+ if (GET_MODE_SIZE (tmode) > UNITS_PER_WORD)
+ break;
+
rtx inner = gen_lowpart (tmode, XEXP (src, 0));
struct table_elt *larger_elt;
PUT_CODE (memory_extend_rtx, extend_op);
XEXP (memory_extend_rtx, 0) = src;
- for (tmode = GET_MODE_WIDER_MODE (mode);
- GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
- tmode = GET_MODE_WIDER_MODE (tmode))
+ FOR_EACH_WIDER_MODE (tmode, mode)
{
struct table_elt *larger_elt;
+ if (GET_MODE_SIZE (tmode) > UNITS_PER_WORD)
+ break;
+
PUT_MODE (memory_extend_rtx, tmode);
larger_elt = lookup (memory_extend_rtx,
HASH (memory_extend_rtx, tmode), tmode);
justify the value we want to read but is available in one insn on
the machine. */
- for (new_mode = smallest_mode_for_size (access_size * BITS_PER_UNIT,
- MODE_INT);
- GET_MODE_BITSIZE (new_mode) <= BITS_PER_WORD;
- new_mode = GET_MODE_WIDER_MODE (new_mode))
+ FOR_EACH_MODE_FROM (new_mode,
+ smallest_mode_for_size (access_size * BITS_PER_UNIT,
+ MODE_INT))
{
rtx target, new_reg, new_lhs;
rtx_insn *shift_seq, *insn;
int cost;
+ if (GET_MODE_BITSIZE (new_mode) > BITS_PER_WORD)
+ break;
+
/* If a constant was stored into memory, try to simplify it here,
otherwise the cost of the shift might preclude this optimization
e.g. at -Os, even when no actual shift will be needed. */
byte_mode = VOIDmode;
word_mode = VOIDmode;
- for (machine_mode mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ machine_mode mode;
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
{
if (GET_MODE_BITSIZE (mode) == BITS_PER_UNIT
&& byte_mode == VOIDmode)
const REAL_VALUE_TYPE *const r =
(i == 0 ? &dconst0 : i == 1 ? &dconst1 : &dconst2);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
const_tiny_rtx[i][(int) mode] =
const_double_from_real_value (*r, mode);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_DECIMAL_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_DECIMAL_FLOAT)
const_tiny_rtx[i][(int) mode] =
const_double_from_real_value (*r, mode);
const_tiny_rtx[i][(int) VOIDmode] = GEN_INT (i);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
const_tiny_rtx[i][(int) mode] = GEN_INT (i);
for (mode = MIN_MODE_PARTIAL_INT;
const_tiny_rtx[3][(int) VOIDmode] = constm1_rtx;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
const_tiny_rtx[3][(int) mode] = constm1_rtx;
for (mode = MIN_MODE_PARTIAL_INT;
mode <= MAX_MODE_PARTIAL_INT;
mode = (machine_mode)((int)(mode) + 1))
const_tiny_rtx[3][(int) mode] = constm1_rtx;
-
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_INT)
{
rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)];
const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_FLOAT)
{
rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)];
const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FRACT)
{
FCONST0 (mode).data.high = 0;
FCONST0 (mode).data.low = 0;
FCONST0 (mode), mode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_UFRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_UFRACT)
{
FCONST0 (mode).data.high = 0;
FCONST0 (mode).data.low = 0;
FCONST0 (mode), mode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_ACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_ACCUM)
{
FCONST0 (mode).data.high = 0;
FCONST0 (mode).data.low = 0;
FCONST1 (mode), mode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_UACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_UACCUM)
{
FCONST0 (mode).data.high = 0;
FCONST0 (mode).data.low = 0;
FCONST1 (mode), mode);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_FRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FRACT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_UFRACT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UFRACT)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_ACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_ACCUM)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_UACCUM);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UACCUM)
{
const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
if (STORE_FLAG_VALUE == 1)
const_tiny_rtx[1][(int) BImode] = const1_rtx;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_POINTER_BOUNDS);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_POINTER_BOUNDS)
{
wide_int wi_zero = wi::zero (GET_MODE_PRECISION (mode));
const_tiny_rtx[0][mode] = immed_wide_int_const (wi_zero, mode);
since the value of bytes will then be large enough that no
mode will match anyway. */
- for (tmpmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmpmode != VOIDmode;
- tmpmode = GET_MODE_WIDER_MODE (tmpmode))
+ FOR_EACH_MODE_IN_CLASS (tmpmode, MODE_INT)
{
/* Have we found a large enough mode? */
if (GET_MODE_SIZE (tmpmode) >= bytes)
else
new_mode = MIN_MODE_VECTOR_INT;
- for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
+ FOR_EACH_MODE_FROM (new_mode, new_mode)
if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
&& GET_MODE_UNIT_SIZE (new_mode) == GET_MODE_SIZE (tmode)
&& targetm.vector_mode_supported_p (new_mode))
/* Find the narrowest integer mode that contains the field. */
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
if (GET_MODE_BITSIZE (mode) >= bitsize + bitnum)
{
op0 = convert_to_mode (mode, op0, 0);
optab2 = (op1_is_pow2 ? optab1
: (unsignedp ? udivmod_optab : sdivmod_optab));
- for (compute_mode = mode; compute_mode != VOIDmode;
- compute_mode = GET_MODE_WIDER_MODE (compute_mode))
+ FOR_EACH_MODE_FROM (compute_mode, mode)
if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
|| optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
break;
if (compute_mode == VOIDmode)
- for (compute_mode = mode; compute_mode != VOIDmode;
- compute_mode = GET_MODE_WIDER_MODE (compute_mode))
+ FOR_EACH_MODE_FROM (compute_mode, mode)
if (optab_libfunc (optab1, compute_mode)
|| optab_libfunc (optab2, compute_mode))
break;
}
mclass = GET_MODE_CLASS (mode);
- for (compare_mode = mode; compare_mode != VOIDmode;
- compare_mode = GET_MODE_WIDER_MODE (compare_mode))
+ FOR_EACH_MODE_FROM (compare_mode, mode)
{
machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
icode = optab_handler (cstore_optab, optab_mode);
mem = gen_rtx_MEM (VOIDmode, gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 1));
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
{
machine_mode srcmode;
- for (srcmode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); srcmode != mode;
- srcmode = GET_MODE_WIDER_MODE (srcmode))
+ FOR_EACH_MODE_UNTIL (srcmode, mode)
{
enum insn_code ic;
int shift_amount;
/* Search for a mode to convert via. */
- for (intermediate = from_mode; intermediate != VOIDmode;
- intermediate = GET_MODE_WIDER_MODE (intermediate))
+ FOR_EACH_MODE_FROM (intermediate, from_mode)
if (((can_extend_p (to_mode, intermediate, unsignedp)
!= CODE_FOR_nothing)
|| (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
{
machine_mode tmode, xmode;
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > max_pieces
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
+ xmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
+ {
+ if (GET_MODE_SIZE (tmode) > max_pieces
+ || SLOW_UNALIGNED_ACCESS (tmode, align))
+ break;
+ xmode = tmode;
+ }
align = MAX (align, GET_MODE_ALIGNMENT (xmode));
}
{
machine_mode tmode, mode = VOIDmode;
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
if (GET_MODE_SIZE (tmode) < size)
mode = tmode;
including more than one in the machine description unless
the more limited one has some advantage. */
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
{
enum insn_code code = direct_optab_handler (movmem_optab, mode);
{
/* Find the smallest integer mode large enough to hold the
entire structure. */
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
/* Have we found a large enough mode? */
if (GET_MODE_SIZE (mode) >= bytes)
break;
expected_size = min_size;
}
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
{
enum insn_code code = direct_optab_handler (setmem_optab, mode);
else
oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed);
- for (srcmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (orig_srcmode));
- srcmode != orig_srcmode;
- srcmode = GET_MODE_WIDER_MODE (srcmode))
+ FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)
{
enum insn_code ic;
rtx trunc_y;
extern const unsigned char mode_wider[NUM_MACHINE_MODES];
extern const unsigned char mode_2xwider[NUM_MACHINE_MODES];
+template<typename T>
+struct mode_traits
+{
+ /* For use by the machmode support code only.
+
+ There are cases in which the machmode support code needs to forcibly
+ convert a machine_mode to a specific mode class T, and in which the
+ context guarantees that this is valid without the need for an assert.
+ This can be done using:
+
+ return typename mode_traits<T>::from_int (mode);
+
+ when returning a T and:
+
+ res = T (typename mode_traits<T>::from_int (mode));
+
+ when assigning to a value RES that must be assignment-compatible
+ with (but possibly not the same as) T.
+
+ Here we use an enum type distinct from machine_mode but with the
+ same range as machine_mode. T should have a constructor that
+ accepts this enum type; it should not have a constructor that
+ accepts machine_mode.
+
+ We use this somewhat indirect approach to avoid too many constructor
+ calls when the compiler is built with -O0. For example, even in
+ unoptimized code, the return statement above would construct the
+ returned T directly from the numerical value of MODE. */
+ enum from_int { dummy = MAX_MACHINE_MODE };
+};
+
+template<>
+struct mode_traits<machine_mode>
+{
+ /* machine_mode itself needs no conversion. */
+ typedef machine_mode from_int;
+};
+
/* Get the name of mode MODE as a string. */
extern const char * const mode_name[NUM_MACHINE_MODES];
#define GET_CLASS_NARROWEST_MODE(CLASS) \
((machine_mode) class_narrowest_mode[CLASS])
+/* Return the narrowest mode in T's class. */
+
+template<typename T>
+inline T
+get_narrowest_mode (T mode)
+{
+ return typename mode_traits<T>::from_int
+ (class_narrowest_mode[GET_MODE_CLASS (mode)]);
+}
+
/* Define the integer modes whose sizes are BITS_PER_UNIT and BITS_PER_WORD
and the mode whose class is Pmode and whose size is POINTER_SIZE. */
extern bool int_n_enabled_p[NUM_INT_N_ENTS];
extern const int_n_data_t int_n_data[NUM_INT_N_ENTS];
+namespace mode_iterator
+{
+ /* Start mode iterator *ITER at the first mode in class MCLASS, if any. */
+
+ inline void
+ start (machine_mode *iter, enum mode_class mclass)
+ {
+ *iter = GET_CLASS_NARROWEST_MODE (mclass);
+ }
+
+ /* Return true if mode iterator *ITER has not reached the end. */
+
+ inline bool
+ iterate_p (machine_mode *iter)
+ {
+ return *iter != E_VOIDmode;
+ }
+
+ /* Set mode iterator *ITER to the next widest mode in the same class,
+ if any. */
+
+ inline void
+ get_wider (machine_mode *iter)
+ {
+ *iter = GET_MODE_WIDER_MODE (*iter);
+ }
+
+ /* Set mode iterator *ITER to the next widest mode in the same class.
+ Such a mode is known to exist. */
+
+ inline void
+ get_known_wider (machine_mode *iter)
+ {
+ *iter = GET_MODE_WIDER_MODE (*iter);
+ gcc_checking_assert (*iter != VOIDmode);
+ }
+
+ /* Set mode iterator *ITER to the mode that is two times wider than the
+ current one, if such a mode exists. */
+
+ inline void
+ get_2xwider (machine_mode *iter)
+ {
+ *iter = GET_MODE_2XWIDER_MODE (*iter);
+ }
+}
+
+/* Make ITERATOR iterate over all the modes in mode class CLASS,
+ from narrowest to widest. */
+#define FOR_EACH_MODE_IN_CLASS(ITERATOR, CLASS) \
+ for (mode_iterator::start (&(ITERATOR), CLASS); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_wider (&(ITERATOR)))
+
+/* Make ITERATOR iterate over all the modes in the range [START, END),
+ in order of increasing width. */
+#define FOR_EACH_MODE(ITERATOR, START, END) \
+ for ((ITERATOR) = (START); \
+ (ITERATOR) != (END); \
+ mode_iterator::get_known_wider (&(ITERATOR)))
+
+/* Make ITERATOR iterate over START and all wider modes in the same
+ class, in order of increasing width. */
+#define FOR_EACH_MODE_FROM(ITERATOR, START) \
+ for ((ITERATOR) = (START); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_wider (&(ITERATOR)))
+
+/* Make ITERATOR iterate over modes in the range [NARROWEST, END)
+ in order of increasing width, where NARROWEST is the narrowest mode
+ in END's class. */
+#define FOR_EACH_MODE_UNTIL(ITERATOR, END) \
+ FOR_EACH_MODE (ITERATOR, get_narrowest_mode (END), END)
+
+/* Make ITERATOR iterate over modes in the same class as MODE, in order
+ of increasing width. Start at the first mode wider than START,
+ or don't iterate at all if there is no wider mode. */
+#define FOR_EACH_WIDER_MODE(ITERATOR, START) \
+ for ((ITERATOR) = (START), mode_iterator::get_wider (&(ITERATOR)); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_wider (&(ITERATOR)))
+
+/* Make ITERATOR iterate over modes in the same class as MODE, in order
+ of increasing width, and with each mode being twice the width of the
+ previous mode. Start at the mode that is two times wider than START,
+ or don't iterate at all if there is no such mode. */
+#define FOR_EACH_2XWIDER_MODE(ITERATOR, START) \
+ for ((ITERATOR) = (START), mode_iterator::get_2xwider (&(ITERATOR)); \
+ mode_iterator::iterate_p (&(ITERATOR)); \
+ mode_iterator::get_2xwider (&(ITERATOR)))
+
#endif /* not HAVE_MACHINE_MODES */
static enum mode_class classes[]
= { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
for (int i = 0; i < 4; i += 2)
- for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, classes[i])
{
vmode = targetm.vectorize.preferred_simd_mode (mode);
if (GET_MODE_CLASS (vmode) != classes[i + 1])
machine_mode field_mode)
{
machine_mode mode = smallest_mode_for_size (struct_bits, MODE_INT);
- while (mode != VOIDmode)
+ FOR_EACH_MODE_FROM (mode, mode)
{
if (get_extraction_insn (insn, pattern, type, mode))
{
- while (mode != VOIDmode
- && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (field_mode)
- && !TRULY_NOOP_TRUNCATION_MODES_P (insn->field_mode,
- field_mode))
+ FOR_EACH_MODE_FROM (mode, mode)
{
+ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (field_mode)
+ || TRULY_NOOP_TRUNCATION_MODES_P (insn->field_mode,
+ field_mode))
+ break;
get_extraction_insn (insn, pattern, type, mode);
- mode = GET_MODE_WIDER_MODE (mode);
}
return true;
}
- mode = GET_MODE_WIDER_MODE (mode);
}
return false;
}
if (CLASS_HAS_WIDER_MODES_P (mclass)
&& methods != OPTAB_DIRECT && methods != OPTAB_LIB)
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
|| (binoptab == smul_optab
if (CLASS_HAS_WIDER_MODES_P (mclass))
{
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
!= CODE_FOR_nothing
if (CLASS_HAS_WIDER_MODES_P (mclass))
{
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
{
if (CLASS_HAS_WIDER_MODES_P (mclass))
{
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
{
if (CLASS_HAS_WIDER_MODES_P (mclass))
{
machine_mode wider_mode;
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
{
if (!CLASS_HAS_WIDER_MODES_P (mclass))
return NULL_RTX;
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
goto found;
return NULL_RTX;
if (CLASS_HAS_WIDER_MODES_P (mclass))
{
machine_mode wider_mode;
- for (wider_mode = mode; wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_MODE_FROM (wider_mode, mode)
{
if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
{
}
if (CLASS_HAS_WIDER_MODES_P (mclass))
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
{
if (CLASS_HAS_WIDER_MODES_P (mclass))
{
- for (wider_mode = GET_MODE_WIDER_MODE (mode);
- wider_mode != VOIDmode;
- wider_mode = GET_MODE_WIDER_MODE (wider_mode))
+ FOR_EACH_WIDER_MODE (wider_mode, mode)
{
if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
|| optab_libfunc (unoptab, wider_mode))
/* Try to use a memory block compare insn - either cmpstr
or cmpmem will do. */
- for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- cmp_mode != VOIDmode;
- cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
+ FOR_EACH_MODE_IN_CLASS (cmp_mode, MODE_INT)
{
cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
if (cmp_code == CODE_FOR_nothing)
mclass = GET_MODE_CLASS (mode);
test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
- cmp_mode = mode;
- do
- {
+ FOR_EACH_MODE_FROM (cmp_mode, mode)
+ {
enum insn_code icode;
icode = optab_handler (cbranch_optab, cmp_mode);
if (icode != CODE_FOR_nothing
if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
break;
- cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
}
- while (cmp_mode != VOIDmode);
if (methods != OPTAB_LIB_WIDEN)
goto fail;
bool reversed_p = false;
cmp_mode = targetm.libgcc_cmp_return_mode ();
- for (mode = orig_mode;
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_FROM (mode, orig_mode)
{
if (code_to_optab (comparison)
&& (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
wider mode. If the integer mode is wider than the mode of FROM,
we can do the conversion signed even if the input is unsigned. */
- for (fmode = GET_MODE (to); fmode != VOIDmode;
- fmode = GET_MODE_WIDER_MODE (fmode))
- for (imode = GET_MODE (from); imode != VOIDmode;
- imode = GET_MODE_WIDER_MODE (imode))
+ FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
+ FOR_EACH_MODE_FROM (imode, GET_MODE (from))
{
int doing_unsigned = unsignedp;
least as wide as the target. Using FMODE will avoid rounding woes
with unsigned values greater than the signed maximum value. */
- for (fmode = GET_MODE (to); fmode != VOIDmode;
- fmode = GET_MODE_WIDER_MODE (fmode))
+ FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
&& can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
break;
this conversion. If the integer mode is wider than the mode of TO,
we can do the conversion either signed or unsigned. */
- for (fmode = GET_MODE (from); fmode != VOIDmode;
- fmode = GET_MODE_WIDER_MODE (fmode))
- for (imode = GET_MODE (to); imode != VOIDmode;
- imode = GET_MODE_WIDER_MODE (imode))
+ FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
+ FOR_EACH_MODE_FROM (imode, GET_MODE (to))
{
int doing_unsigned = unsignedp;
simply clears out that bit. The rest is trivial. */
if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
- for (fmode = GET_MODE (from); fmode != VOIDmode;
- fmode = GET_MODE_WIDER_MODE (fmode))
+ FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
&& (!DECIMAL_FLOAT_MODE_P (fmode)
|| GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
this conversion. If the integer mode is wider than the mode of TO,
we can do the conversion either signed or unsigned. */
- for (fmode = GET_MODE (from); fmode != VOIDmode;
- fmode = GET_MODE_WIDER_MODE (fmode))
- for (imode = GET_MODE (to); imode != VOIDmode;
- imode = GET_MODE_WIDER_MODE (imode))
+ FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
+ FOR_EACH_MODE_FROM (imode, GET_MODE (to))
{
icode = convert_optab_handler (tab, imode, fmode);
if (icode != CODE_FOR_nothing)
else if (sym == NULL_RTX && GET_MODE (reg) != BImode)
{
machine_mode narrow_mode;
- for (narrow_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- narrow_mode != VOIDmode
- && narrow_mode != GET_MODE (reg);
- narrow_mode = GET_MODE_WIDER_MODE (narrow_mode))
+ FOR_EACH_MODE_UNTIL (narrow_mode, GET_MODE (reg))
{
if (have_insn_for (STRICT_LOW_PART, narrow_mode)
&& ((reg_offset[regno] & ~GET_MODE_MASK (narrow_mode))
for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
{
machine_mode mode;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_FLOAT)
FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i);
}
held in REGNO. If none, we look for the largest floating-point mode.
If we still didn't find a valid mode, try CCmode. */
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
if ((unsigned) hard_regno_nregs[regno][mode] == nregs
&& HARD_REGNO_MODE_OK (regno, mode)
&& (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
&& GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode))
found_mode = mode;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT)
if ((unsigned) hard_regno_nregs[regno][mode] == nregs
&& HARD_REGNO_MODE_OK (regno, mode)
&& (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
&& GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode))
found_mode = mode;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_FLOAT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT)
if ((unsigned) hard_regno_nregs[regno][mode] == nregs
&& HARD_REGNO_MODE_OK (regno, mode)
&& (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
&& GET_MODE_SIZE (mode) > GET_MODE_SIZE (found_mode))
found_mode = mode;
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_INT);
- mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
if ((unsigned) hard_regno_nregs[regno][mode] == nregs
&& HARD_REGNO_MODE_OK (regno, mode)
&& (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
{
machine_mode mode, in_mode;
- for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
- in_mode = GET_MODE_WIDER_MODE (mode))
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (in_mode, MODE_INT)
+ FOR_EACH_MODE_UNTIL (mode, in_mode)
{
machine_mode i;
/* We are in in_mode. Count how many bits outside of mode
have to be copies of the sign-bit. */
- for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
+ FOR_EACH_MODE (i, mode, in_mode)
{
machine_mode wider = GET_MODE_WIDER_MODE (i);
return BLKmode;
/* Get the first mode which has this size, in the specified class. */
- for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, mclass)
if (GET_MODE_PRECISION (mode) == size)
return mode;
/* Get the first mode which has at least this size, in the
specified class. */
- for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, mclass)
if (GET_MODE_PRECISION (mode) >= size)
break;
/* Do not check vector_mode_supported_p here. We'll do that
later in vector_type_mode. */
- for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_FROM (mode, mode)
if (GET_MODE_NUNITS (mode) == nunits
&& GET_MODE_INNER (mode) == innermode)
break;
gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
/* Find the smallest nice mode to use. */
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
if (GET_MODE_BITSIZE (mode) >= bitsize)
break;
if (mode != VOIDmode
{
/* If optab_handler exists for div_optab, perhaps in a wider mode,
we don't want to use the libfunc even if it exists for given mode. */
- for (machine_mode div_mode = mode;
- div_mode != VOIDmode;
- div_mode = GET_MODE_WIDER_MODE (div_mode))
+ machine_mode div_mode;
+ FOR_EACH_MODE_FROM (div_mode, mode)
if (optab_handler (div_optab, div_mode) != CODE_FOR_nothing)
return false;
else
mode = MIN_MODE_VECTOR_INT;
- for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_FROM (mode, mode)
if (GET_MODE_INNER (mode) == inner_mode
&& GET_MODE_NUNITS (mode) > best_nunits
&& optab_handler (op, mode) != CODE_FOR_nothing)
<= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
goto unsupported;
- rhs_mode = TYPE_MODE (rhs_type);
fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
- for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
- rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
- rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
+ FOR_EACH_2XWIDER_MODE (rhs_mode, TYPE_MODE (rhs_type))
{
+ if (GET_MODE_SIZE (rhs_mode) > fltsz)
+ break;
+
cvt_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
|| GET_MODE_CLASS (GET_MODE (x)) == MODE_PARTIAL_INT)
{
- machine_mode mode = GET_MODE (x);
+ machine_mode mode;
- while ((mode = GET_MODE_WIDER_MODE (mode)) != VOIDmode
- && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD)
+ FOR_EACH_WIDER_MODE (mode, GET_MODE (x))
{
- rtx reg = simplify_subreg (mode, x, GET_MODE (x), 0);
+ if (GET_MODE_BITSIZE (mode) > BITS_PER_WORD)
+ break;
+ rtx reg = simplify_subreg (mode, x, GET_MODE (x), 0);
if (reg == NULL_RTX || !REG_P (reg))
continue;
val = cselib_lookup (reg, mode, 0, VOIDmode);