+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * machmode.h (mode_to_bits): Return a poly_uint16 rather than an
+ unsigned short.
+ (GET_MODE_BITSIZE): Return a constant if ONLY_FIXED_SIZE_MODES,
+ or if measurement_type is polynomial.
+ * calls.c (shift_return_value): Treat GET_MODE_BITSIZE as polynomial.
+ * combine.c (make_extraction): Likewise.
+ * dse.c (find_shift_sequence): Likewise.
+ * dwarf2out.c (mem_loc_descriptor): Likewise.
+ * expmed.c (store_integral_bit_field, extract_bit_field_1): Likewise.
+ (extract_bit_field, extract_low_bits): Likewise.
+ * expr.c (convert_move, convert_modes, emit_move_insn_1): Likewise.
+ (optimize_bitfield_assignment_op, expand_assignment): Likewise.
+ (store_expr_with_bounds, store_field, expand_expr_real_1): Likewise.
+ * fold-const.c (optimize_bit_field_compare, merge_ranges): Likewise.
+ * gimple-fold.c (optimize_atomic_compare_exchange_p): Likewise.
+ * reload.c (find_reloads): Likewise.
+ * reload1.c (alter_reg): Likewise.
+ * stor-layout.c (bitwise_mode_for_mode, compute_record_mode): Likewise.
+ * targhooks.c (default_secondary_memory_needed_mode): Likewise.
+ * tree-if-conv.c (predicate_mem_writes): Likewise.
+ * tree-ssa-strlen.c (handle_builtin_memcmp): Likewise.
+ * tree-vect-patterns.c (adjust_bool_pattern): Likewise.
+ * tree-vect-stmts.c (vectorizable_simd_clone_call): Likewise.
+ * valtrack.c (dead_debug_insert_temp): Likewise.
+ * varasm.c (mergeable_constant_section): Likewise.
+ * config/sh/sh.h (LOCAL_ALIGNMENT): Use as_a <fixed_size_mode>.
+
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc-interface/misc.c (enumerate_modes): Treat GET_MODE_BITSIZE
+ as polynomial.
+
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
}
/* If no predefined C types were found, register the mode itself. */
- int nunits, precision;
+ int nunits, precision, bitsize;
if (!skip_p
&& GET_MODE_NUNITS (i).is_constant (&nunits)
- && GET_MODE_PRECISION (i).is_constant (&precision))
+ && GET_MODE_PRECISION (i).is_constant (&precision)
+ && GET_MODE_BITSIZE (i).is_constant (&bitsize))
f (GET_MODE_NAME (i), digs, complex_p,
vector_p ? nunits : 0, float_rep,
- precision, GET_MODE_BITSIZE (i),
- GET_MODE_ALIGNMENT (i));
+ precision, bitsize, GET_MODE_ALIGNMENT (i));
}
}
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * c-ubsan.c (ubsan_instrument_shift): Treat GET_MODE_BITSIZE
+ as polynomial.
+
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
/* If this is not a signed operation, don't perform overflow checks.
Also punt on bit-fields. */
if (TYPE_OVERFLOW_WRAPS (type0)
- || GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0)
+ || maybe_ne (GET_MODE_BITSIZE (TYPE_MODE (type0)),
+ TYPE_PRECISION (type0))
|| !sanitize_flags_p (SANITIZE_SHIFT_BASE))
;
bool
shift_return_value (machine_mode mode, bool left_p, rtx value)
{
- HOST_WIDE_INT shift;
-
gcc_assert (REG_P (value) && HARD_REGISTER_P (value));
machine_mode value_mode = GET_MODE (value);
- shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);
- if (shift == 0)
+ poly_int64 shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);
+
+ if (known_eq (shift, 0))
return false;
/* Use ashr rather than lshr for right shifts. This is for the benefit
are the same as for a register operation, since at present we don't
have named patterns for aligned memory structures. */
struct extraction_insn insn;
- if (get_best_reg_extraction_insn (&insn, pattern,
- GET_MODE_BITSIZE (inner_mode), mode))
+ unsigned int inner_size;
+ if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
+ && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
{
wanted_inner_reg_mode = insn.struct_mode.require ();
pos_mode = insn.pos_mode;
If it's a MEM we need to recompute POS relative to that.
However, if we're extracting from (or inserting into) a register,
we want to recompute POS relative to wanted_inner_mode. */
- int width = (MEM_P (inner)
- ? GET_MODE_BITSIZE (is_mode)
- : GET_MODE_BITSIZE (wanted_inner_mode));
+ int width;
+ if (!MEM_P (inner))
+ width = GET_MODE_BITSIZE (wanted_inner_mode);
+ else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
+ return NULL_RTX;
if (pos_rtx == 0)
pos = width - len - pos;
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
((GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_INT \
|| GET_MODE_CLASS (TYPE_MODE (TYPE)) == MODE_COMPLEX_FLOAT) \
- ? (unsigned) MIN (BIGGEST_ALIGNMENT, GET_MODE_BITSIZE (TYPE_MODE (TYPE))) \
+ ? (unsigned) MIN (BIGGEST_ALIGNMENT, \
+ GET_MODE_BITSIZE (as_a <fixed_size_mode> \
+ (TYPE_MODE (TYPE)))) \
: (unsigned) DATA_ALIGNMENT(TYPE, ALIGN))
/* Make arrays of chars word-aligned for the same reasons. */
/* Try a wider mode if truncating the store mode to NEW_MODE
requires a real instruction. */
- if (GET_MODE_BITSIZE (new_mode) < GET_MODE_BITSIZE (store_mode)
+ if (maybe_lt (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (store_mode))
&& !TRULY_NOOP_TRUNCATION_MODES_P (new_mode, store_mode))
continue;
We output CONST_DOUBLEs as blocks. */
if (mode == VOIDmode
|| (GET_MODE (rtl) == VOIDmode
- && GET_MODE_BITSIZE (mode) != HOST_BITS_PER_DOUBLE_INT))
+ && maybe_ne (GET_MODE_BITSIZE (mode),
+ HOST_BITS_PER_DOUBLE_INT)))
break;
type_die = base_type_for_mode (mode, SCALAR_INT_MODE_P (mode));
if (type_die == NULL)
if (!MEM_P (op0)
&& !reverse
&& lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
- && bitsize == GET_MODE_BITSIZE (fieldmode)
+ && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
&& optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
{
struct expand_operand ops[2];
if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
{
scalar_mode inner_mode = GET_MODE_INNER (tmode);
- unsigned int nunits = (GET_MODE_BITSIZE (GET_MODE (op0))
- / GET_MODE_UNIT_BITSIZE (tmode));
- if (!mode_for_vector (inner_mode, nunits).exists (&new_mode)
+ poly_uint64 nunits;
+ if (!multiple_p (GET_MODE_BITSIZE (GET_MODE (op0)),
+ GET_MODE_UNIT_BITSIZE (tmode), &nunits)
+ || !mode_for_vector (inner_mode, nunits).exists (&new_mode)
|| !VECTOR_MODE_P (new_mode)
|| GET_MODE_SIZE (new_mode) != GET_MODE_SIZE (GET_MODE (op0))
|| GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode)
machine_mode mode1;
/* Handle -fstrict-volatile-bitfields in the cases where it applies. */
- if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
+ if (maybe_ne (GET_MODE_BITSIZE (GET_MODE (str_rtx)), 0))
mode1 = GET_MODE (str_rtx);
- else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
+ else if (target && maybe_ne (GET_MODE_BITSIZE (GET_MODE (target)), 0))
mode1 = GET_MODE (target);
else
mode1 = tmode;
if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
return NULL_RTX;
- if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
+ if (known_eq (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (src_mode))
&& targetm.modes_tieable_p (mode, src_mode))
{
rtx x = gen_lowpart_common (mode, src);
if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
{
- gcc_assert (GET_MODE_BITSIZE (from_mode) == GET_MODE_BITSIZE (to_mode));
+ gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
+ GET_MODE_BITSIZE (to_mode)));
if (VECTOR_MODE_P (to_mode))
from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
subreg operation. */
if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
{
- gcc_assert (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (oldmode));
+ gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
+ GET_MODE_BITSIZE (oldmode)));
return simplify_gen_subreg (mode, x, oldmode, 0);
}
only safe when simplify_subreg can convert MODE constants into integer
constants. At present, it can only do this reliably if the value
fits within a HOST_WIDE_INT. */
- if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ if (!CONSTANT_P (y)
+ || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
{
rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
machine_mode mode1, rtx str_rtx,
tree to, tree src, bool reverse)
{
+ /* str_mode is not guaranteed to be a scalar type. */
machine_mode str_mode = GET_MODE (str_rtx);
- unsigned int str_bitsize = GET_MODE_BITSIZE (str_mode);
+ unsigned int str_bitsize;
tree op0, op1;
rtx value, result;
optab binop;
|| !pbitregion_start.is_constant (&bitregion_start)
|| !pbitregion_end.is_constant (&bitregion_end)
|| bitsize >= BITS_PER_WORD
+ || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
|| str_bitsize > BITS_PER_WORD
|| TREE_SIDE_EFFECTS (to)
|| TREE_THIS_VOLATILE (to))
else
{
concat_store_slow:;
- rtx temp = assign_stack_temp (GET_MODE (to_rtx),
+ rtx temp = assign_stack_temp (to_mode,
GET_MODE_SIZE (GET_MODE (to_rtx)));
write_complex_part (temp, XEXP (to_rtx, 0), false);
write_complex_part (temp, XEXP (to_rtx, 1), true);
{
if (GET_MODE_CLASS (GET_MODE (target))
!= GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
- && GET_MODE_BITSIZE (GET_MODE (target))
- == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp))))
+ && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
+ GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
{
rtx t = simplify_gen_subreg (GET_MODE (target), temp,
TYPE_MODE (TREE_TYPE (exp)), 0);
{
tree type = TREE_TYPE (exp);
if (INTEGRAL_TYPE_P (type)
- && TYPE_PRECISION (type) < GET_MODE_BITSIZE (TYPE_MODE (type))
+ && maybe_ne (TYPE_PRECISION (type),
+ GET_MODE_BITSIZE (TYPE_MODE (type)))
&& known_eq (bitsize, TYPE_PRECISION (type)))
{
tree op = gimple_assign_rhs1 (nop_def);
if (known_eq (offset, 0)
&& !reverse
&& tree_fits_uhwi_p (TYPE_SIZE (type))
- && (GET_MODE_BITSIZE (DECL_MODE (base))
- == tree_to_uhwi (TYPE_SIZE (type))))
+ && known_eq (GET_MODE_BITSIZE (DECL_MODE (base)),
+ tree_to_uhwi (TYPE_SIZE (type))))
return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
target, tmode, modifier);
if (TYPE_MODE (type) == BLKmode)
|| !known_size_p (plbitsize)
|| !plbitsize.is_constant (&lbitsize)
|| !plbitpos.is_constant (&lbitpos)
- || lbitsize == GET_MODE_BITSIZE (lmode)
+ || known_eq (lbitsize, GET_MODE_BITSIZE (lmode))
|| offset != 0
|| TREE_CODE (linner) == PLACEHOLDER_EXPR
|| lvolatilep)
switch (TREE_CODE (TREE_TYPE (low0)))
{
case ENUMERAL_TYPE:
- if (TYPE_PRECISION (TREE_TYPE (low0))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (low0))))
+ if (maybe_ne (TYPE_PRECISION (TREE_TYPE (low0)),
+ GET_MODE_BITSIZE
+ (TYPE_MODE (TREE_TYPE (low0)))))
break;
/* FALLTHROUGH */
case INTEGER_TYPE:
switch (TREE_CODE (TREE_TYPE (high1)))
{
case ENUMERAL_TYPE:
- if (TYPE_PRECISION (TREE_TYPE (high1))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (high1))))
+ if (maybe_ne (TYPE_PRECISION (TREE_TYPE (high1)),
+ GET_MODE_BITSIZE
+ (TYPE_MODE (TREE_TYPE (high1)))))
break;
/* FALLTHROUGH */
case INTEGER_TYPE:
/* Don't optimize floating point expected vars, VIEW_CONVERT_EXPRs
might not preserve all the bits. See PR71716. */
|| SCALAR_FLOAT_TYPE_P (etype)
- || TYPE_PRECISION (etype) != GET_MODE_BITSIZE (TYPE_MODE (etype)))
+ || maybe_ne (TYPE_PRECISION (etype),
+ GET_MODE_BITSIZE (TYPE_MODE (etype))))
return false;
tree weak = gimple_call_arg (stmt, 3);
/* Return the base GET_MODE_BITSIZE value for MODE. */
-ALWAYS_INLINE unsigned short
+ALWAYS_INLINE poly_uint16
mode_to_bits (machine_mode mode)
{
return mode_to_bytes (mode) * BITS_PER_UNIT;
/* Get the size in bits of an object of mode MODE. */
-#define GET_MODE_BITSIZE(MODE) (mode_to_bits (MODE))
+#if ONLY_FIXED_SIZE_MODES
+#define GET_MODE_BITSIZE(MODE) ((unsigned short) mode_to_bits (MODE).coeffs[0])
+#else
+ALWAYS_INLINE poly_uint16
+GET_MODE_BITSIZE (machine_mode mode)
+{
+ return mode_to_bits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_poly<typename T::measurement_type>::type
+GET_MODE_BITSIZE (const T &mode)
+{
+ return mode_to_bits (mode);
+}
+
+template<typename T>
+ALWAYS_INLINE typename if_nonpoly<typename T::measurement_type>::type
+GET_MODE_BITSIZE (const T &mode)
+{
+ return mode_to_bits (mode).coeffs[0];
+}
+#endif
/* Get the number of value bits of an object of mode MODE. */
|| (REG_P (operand)
&& REGNO (operand) >= FIRST_PSEUDO_REGISTER))
&& (WORD_REGISTER_OPERATIONS
- || ((GET_MODE_BITSIZE (GET_MODE (operand))
- < BIGGEST_ALIGNMENT)
- && paradoxical_subreg_p (operand_mode[i],
- GET_MODE (operand)))
+ || (((maybe_lt
+ (GET_MODE_BITSIZE (GET_MODE (operand)),
+ BIGGEST_ALIGNMENT))
+ && (paradoxical_subreg_p
+ (operand_mode[i], GET_MODE (operand)))))
|| BYTES_BIG_ENDIAN
|| ((GET_MODE_SIZE (operand_mode[i])
<= UNITS_PER_WORD)
unsigned int inherent_align = GET_MODE_ALIGNMENT (mode);
machine_mode wider_mode = wider_subreg_mode (mode, reg_max_ref_mode[i]);
poly_uint64 total_size = GET_MODE_SIZE (wider_mode);
- unsigned int min_align = GET_MODE_BITSIZE (reg_max_ref_mode[i]);
+ /* ??? Seems strange to derive the minimum alignment from the size,
+ but that's the traditional behavior. For polynomial-size modes,
+ the natural extension is to use the minimum possible size. */
+ unsigned int min_align
+ = constant_lower_bound (GET_MODE_BITSIZE (reg_max_ref_mode[i]));
poly_int64 adjust = 0;
something_was_spilled = true;
bitwise_mode_for_mode (machine_mode mode)
{
/* Quick exit if we already have a suitable mode. */
- unsigned int bitsize = GET_MODE_BITSIZE (mode);
scalar_int_mode int_mode;
if (is_a <scalar_int_mode> (mode, &int_mode)
&& GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
/* Reuse the sanity checks from int_mode_for_mode. */
gcc_checking_assert ((int_mode_for_mode (mode), true));
+ poly_int64 bitsize = GET_MODE_BITSIZE (mode);
+
/* Try to replace complex modes with complex modes. In general we
expect both components to be processed independently, so we only
care whether there is a register for the inner mode. */
/* Try to replace vector modes with vector modes. Also try using vector
modes if an integer mode would be too big. */
- if (VECTOR_MODE_P (mode) || bitsize > MAX_FIXED_MODE_SIZE)
+ if (VECTOR_MODE_P (mode)
+ || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
{
machine_mode trial = mode;
if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
does not apply to unions. */
if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
&& tree_fits_uhwi_p (TYPE_SIZE (type))
- && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type)))
+ && known_eq (GET_MODE_BITSIZE (mode), tree_to_uhwi (TYPE_SIZE (type))))
;
else
mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();
default_secondary_memory_needed_mode (machine_mode mode)
{
if (!targetm.lra_p ()
- && GET_MODE_BITSIZE (mode) < BITS_PER_WORD
+ && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
&& INTEGRAL_MODE_P (mode))
return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
return mode;
tree ref, addr, ptr, mask;
gcall *new_stmt;
gimple_seq stmts = NULL;
- int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
+ machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
+ /* We checked before setting GF_PLF_2 that an equivalent
+ integer mode exists. */
+ int bitsize = GET_MODE_BITSIZE (mode).to_constant ();
ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
mark_addressable (ref);
addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
location_t loc = gimple_location (stmt2);
tree type, off;
type = build_nonstandard_integer_type (leni, 1);
- gcc_assert (GET_MODE_BITSIZE (TYPE_MODE (type)) == leni);
+ gcc_assert (known_eq (GET_MODE_BITSIZE (TYPE_MODE (type)), leni));
tree ptrtype = build_pointer_type_for_mode (char_type_node,
ptr_mode, true);
off = build_int_cst (ptrtype, 0);
gcc_assert (TREE_CODE_CLASS (rhs_code) == tcc_comparison);
if (TREE_CODE (TREE_TYPE (rhs1)) != INTEGER_TYPE
|| !TYPE_UNSIGNED (TREE_TYPE (rhs1))
- || (TYPE_PRECISION (TREE_TYPE (rhs1))
- != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
+ || maybe_ne (TYPE_PRECISION (TREE_TYPE (rhs1)),
+ GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1)))))
{
scalar_mode mode = SCALAR_TYPE_MODE (TREE_TYPE (rhs1));
itype
if (simd_clone_subparts (atype)
< simd_clone_subparts (arginfo[i].vectype))
{
- unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
+ poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
k = (simd_clone_subparts (arginfo[i].vectype)
/ simd_clone_subparts (atype));
gcc_assert ((k & (k - 1)) == 0);
if (simd_clone_subparts (vectype) < nunits)
{
unsigned int k, l;
- unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
+ poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
+ poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
k = nunits / simd_clone_subparts (vectype);
gcc_assert ((k & (k - 1)) == 0);
for (l = 0; l < k; l++)
{
t = build_fold_addr_expr (new_temp);
t = build2 (MEM_REF, vectype, t,
- build_int_cst (TREE_TYPE (t),
- l * prec / BITS_PER_UNIT));
+ build_int_cst (TREE_TYPE (t), l * bytes));
}
else
t = build3 (BIT_FIELD_REF, vectype, new_temp,
usesp = &cur->next;
*tailp = cur->next;
cur->next = NULL;
+ /* "may" rather than "must" because we want (for example)
+ N V4SFs to win over plain V4SF even though N might be 1. */
+ rtx candidate = *DF_REF_REAL_LOC (cur->use);
if (!reg
- || (GET_MODE_BITSIZE (GET_MODE (reg))
- < GET_MODE_BITSIZE (GET_MODE (*DF_REF_REAL_LOC (cur->use)))))
- reg = *DF_REF_REAL_LOC (cur->use);
+ || maybe_lt (GET_MODE_BITSIZE (GET_MODE (reg)),
+ GET_MODE_BITSIZE (GET_MODE (candidate))))
+ reg = candidate;
}
else
tailp = &(*tailp)->next;
unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED,
unsigned int flags ATTRIBUTE_UNUSED)
{
- unsigned int modesize = GET_MODE_BITSIZE (mode);
-
if (HAVE_GAS_SHF_MERGE && flag_merge_constants
&& mode != VOIDmode
&& mode != BLKmode
- && modesize <= align
+ && known_le (GET_MODE_BITSIZE (mode), align)
&& align >= 8
&& align <= 256
&& (align & (align - 1)) == 0)