+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * machmode.h (int_mode_for_size): New function.
+ * builtins.c (set_builtin_user_assembler_name): Use int_mode_for_size
+ instead of mode_for_size.
+ * calls.c (save_fixed_argument_area): Likewise. Make use of BLKmode
+ explicit.
+ * combine.c (expand_field_assignment): Use int_mode_for_size
+ instead of mode_for_size.
+ (make_extraction): Likewise.
+ (simplify_shift_const_1): Likewise.
+ (simplify_comparison): Likewise.
+ * dojump.c (do_jump): Likewise.
+ * dwarf2out.c (mem_loc_descriptor): Likewise.
+ * emit-rtl.c (init_derived_machine_modes): Likewise.
+ * expmed.c (flip_storage_order): Likewise.
+ (convert_extracted_bit_field): Likewise.
+ * expr.c (copy_blkmode_from_reg): Likewise.
+ * graphite-isl-ast-to-gimple.c (max_mode_int_precision): Likewise.
+ * internal-fn.c (expand_mul_overflow): Likewise.
+ * lower-subreg.c (simple_move): Likewise.
+ * optabs-libfuncs.c (init_optabs): Likewise.
+ * simplify-rtx.c (simplify_unary_operation_1): Likewise.
+ * tree.c (vector_type_mode): Likewise.
+ * tree-ssa-strlen.c (handle_builtin_memcmp): Likewise.
+ * tree-vect-data-refs.c (vect_lanes_optab_supported_p): Likewise.
+ * tree-vect-generic.c (expand_vector_parallel): Likewise.
+ * tree-vect-stmts.c (vectorizable_load): Likewise.
+ (vectorizable_store): Likewise.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc-interface/decl.c (gnat_to_gnu_entity): Use int_mode_for_size
+ instead of mode_for_size.
+ (gnat_to_gnu_subprog_type): Likewise.
+ * gcc-interface/utils.c (make_type_from_size): Likewise.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
/* True if we make a dummy type here. */
bool made_dummy = false;
/* The mode to be used for the pointer type. */
- machine_mode p_mode = mode_for_size (esize, MODE_INT, 0);
+ scalar_int_mode p_mode;
/* The GCC type used for the designated type. */
tree gnu_desig_type = NULL_TREE;
- if (!targetm.valid_pointer_mode (p_mode))
+ if (!int_mode_for_size (esize, 0).exists (&p_mode)
+ || !targetm.valid_pointer_mode (p_mode))
p_mode = ptr_mode;
/* If either the designated type or its full view is an unconstrained
unsigned int size
= TREE_INT_CST_LOW (TYPE_SIZE (gnu_cico_return_type));
unsigned int i = BITS_PER_UNIT;
- machine_mode mode;
+ scalar_int_mode mode;
while (i < size)
i <<= 1;
- mode = mode_for_size (i, MODE_INT, 0);
- if (mode != BLKmode)
+ if (int_mode_for_size (i, 0).exists (&mode))
{
SET_TYPE_MODE (gnu_cico_return_type, mode);
SET_TYPE_ALIGN (gnu_cico_return_type,
may need to return the thin pointer. */
if (TYPE_FAT_POINTER_P (type) && size < POINTER_SIZE * 2)
{
- machine_mode p_mode = mode_for_size (size, MODE_INT, 0);
- if (!targetm.valid_pointer_mode (p_mode))
+ scalar_int_mode p_mode;
+ if (!int_mode_for_size (size, 0).exists (&p_mode)
+ || !targetm.valid_pointer_mode (p_mode))
p_mode = ptr_mode;
return
build_pointer_type_for_mode
if (DECL_FUNCTION_CODE (decl) == BUILT_IN_FFS
&& INT_TYPE_SIZE < BITS_PER_WORD)
{
+ scalar_int_mode mode = int_mode_for_size (INT_TYPE_SIZE, 0).require ();
set_user_assembler_libfunc ("ffs", asmspec);
- set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
- "ffs");
+ set_optab_libfunc (ffs_optab, mode, "ffs");
}
}
*high_to_save = high;
num_to_save = high - low + 1;
- save_mode = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1);
/* If we don't have the required alignment, must do this
in BLKmode. */
- if ((low & (MIN (GET_MODE_SIZE (save_mode),
- BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)))
+ scalar_int_mode imode;
+ if (int_mode_for_size (num_to_save * BITS_PER_UNIT, 1).exists (&imode)
+ && (low & (MIN (GET_MODE_SIZE (imode),
+ BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)) == 0)
+ save_mode = imode;
+ else
save_mode = BLKmode;
if (ARGS_GROW_DOWNWARD)
/* Don't attempt bitwise arithmetic on non scalar integer modes. */
if (! SCALAR_INT_MODE_P (compute_mode))
{
- machine_mode imode;
-
/* Don't do anything for vector or complex integral types. */
if (! FLOAT_MODE_P (compute_mode))
break;
/* Try to find an integral mode to pun with. */
- imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
- if (imode == BLKmode)
+ if (!int_mode_for_size (GET_MODE_BITSIZE (compute_mode), 0)
+ .exists (&compute_mode))
break;
- compute_mode = imode;
- inner = gen_lowpart (imode, inner);
+ inner = gen_lowpart (compute_mode, inner);
}
/* Compute a mask of LEN bits, if we can do this on the host machine. */
machine_mode wanted_inner_reg_mode = word_mode;
machine_mode pos_mode = word_mode;
machine_mode extraction_mode = word_mode;
- machine_mode tmode = mode_for_size (len, MODE_INT, 1);
rtx new_rtx = 0;
rtx orig_pos_rtx = pos_rtx;
HOST_WIDE_INT orig_pos;
For MEM, we can avoid an extract if the field starts on an appropriate
boundary and we can change the mode of the memory reference. */
- if (tmode != BLKmode
+ scalar_int_mode tmode;
+ if (int_mode_for_size (len, 1).exists (&tmode)
&& ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
&& !MEM_P (inner)
&& (pos == 0 || REG_P (inner))
&& ! mode_dependent_address_p (XEXP (varop, 0),
MEM_ADDR_SPACE (varop))
&& ! MEM_VOLATILE_P (varop)
- && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
- MODE_INT, 1)) != BLKmode)
+ && (int_mode_for_size (GET_MODE_BITSIZE (mode) - count, 1)
+ .exists (&tmode)))
{
new_rtx = adjust_address_nv (varop, tmode,
BYTES_BIG_ENDIAN ? 0
& GET_MODE_MASK (mode))
+ 1)) >= 0
&& const_op >> i == 0
- && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
+ && int_mode_for_size (i, 1).exists (&tmode))
{
op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
continue;
&& CONST_INT_P (XEXP (op0, 1))
&& GET_CODE (XEXP (op0, 0)) == ASHIFT
&& XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
- && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
- MODE_INT, 1)) != BLKmode
+ && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
+ .exists (&tmode))
&& (((unsigned HOST_WIDE_INT) const_op
+ (GET_MODE_MASK (tmode) >> 1) + 1)
<= GET_MODE_MASK (tmode)))
&& CONST_INT_P (XEXP (XEXP (op0, 0), 1))
&& GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
&& XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
- && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
- MODE_INT, 1)) != BLKmode
+ && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
+ .exists (&tmode))
&& (((unsigned HOST_WIDE_INT) const_op
+ (GET_MODE_MASK (tmode) >> 1) + 1)
<= GET_MODE_MASK (tmode)))
&& TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
&& (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
- && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
+ && int_mode_for_size (i + 1, 0).exists (&mode)
&& (type = lang_hooks.types.type_for_mode (mode, 1)) != 0
&& TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
&& have_insn_for (COMPARE, TYPE_MODE (type)))
|| GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT))
{
dw_die_ref type_die = base_type_for_mode (mode, 1);
- machine_mode amode;
+ scalar_int_mode amode;
if (type_die == NULL)
return NULL;
- amode = mode_for_size (DWARF2_ADDR_SIZE * BITS_PER_UNIT,
- MODE_INT, 0);
if (INTVAL (rtl) >= 0
- && amode != BLKmode
+ && (int_mode_for_size (DWARF2_ADDR_SIZE * BITS_PER_UNIT, 0)
+ .exists (&amode))
&& trunc_int_for_mode (INTVAL (rtl), amode) == INTVAL (rtl)
/* const DW_OP_convert <XXX> vs.
DW_OP_const_type <XXX, 1, const>. */
byte_mode = opt_byte_mode.require ();
word_mode = opt_word_mode.require ();
- ptr_mode = as_a <scalar_int_mode> (mode_for_size (POINTER_SIZE,
- MODE_INT, 0));
+ ptr_mode = int_mode_for_size (POINTER_SIZE, 0).require ();
}
/* Create some permanent unique rtl objects shared between all functions. */
rtx
flip_storage_order (machine_mode mode, rtx x)
{
- machine_mode int_mode;
+ scalar_int_mode int_mode;
rtx result;
if (mode == QImode)
if (__builtin_expect (reverse_storage_order_supported < 0, 0))
check_reverse_storage_order_support ();
- if (SCALAR_INT_MODE_P (mode))
- int_mode = mode;
- else
+ if (!is_a <scalar_int_mode> (mode, &int_mode))
{
if (FLOAT_MODE_P (mode)
&& __builtin_expect (reverse_float_storage_order_supported < 0, 0))
check_reverse_float_storage_order_support ();
- int_mode = mode_for_size (GET_MODE_PRECISION (mode), MODE_INT, 0);
- if (int_mode == BLKmode)
+ if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode))
{
sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
return x;
value via a SUBREG. */
if (!SCALAR_INT_MODE_P (tmode))
{
- machine_mode smode;
-
- smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
- x = convert_to_mode (smode, x, unsignedp);
- x = force_reg (smode, x);
+ scalar_int_mode int_mode
+ = int_mode_for_size (GET_MODE_BITSIZE (tmode), 0).require ();
+ x = convert_to_mode (int_mode, x, unsignedp);
+ x = force_reg (int_mode, x);
return gen_lowpart (tmode, x);
}
copy_mode = word_mode;
if (MEM_P (target))
{
- machine_mode mem_mode = mode_for_size (bitsize, MODE_INT, 1);
- if (mem_mode != BLKmode)
- copy_mode = mem_mode;
+ opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
+ if (mem_mode.exists ())
+ copy_mode = mem_mode.require ();
}
else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
copy_mode = tmode;
should use isl to derive the optimal type for each subexpression. */
static int max_mode_int_precision =
- GET_MODE_PRECISION (mode_for_size (MAX_FIXED_MODE_SIZE, MODE_INT, 0));
+ GET_MODE_PRECISION (int_mode_for_size (MAX_FIXED_MODE_SIZE, 0).require ());
static int graphite_expression_type_precision = 128 <= max_mode_int_precision ?
128 : max_mode_int_precision;
{
struct separate_ops ops;
int prec = GET_MODE_PRECISION (mode);
- machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
+ scalar_int_mode hmode;
machine_mode wmode;
ops.op0 = make_tree (type, op0);
ops.op1 = make_tree (type, op1);
profile_probability::very_likely ());
}
}
- else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
+ else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
+ && 2 * GET_MODE_PRECISION (hmode) == prec)
{
rtx_code_label *large_op0 = gen_label_rtx ();
rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
size. */
mode = GET_MODE (SET_DEST (set));
if (!SCALAR_INT_MODE_P (mode)
- && (mode_for_size (GET_MODE_SIZE (mode) * BITS_PER_UNIT, MODE_INT, 0)
- == BLKmode))
+ && !int_mode_for_size (GET_MODE_BITSIZE (mode), 0).exists ())
return NULL_RTX;
/* Reject PARTIAL_INT modes. They are used for processor specific
extern machine_mode mode_for_size (unsigned int, enum mode_class, int);
+/* Return the machine mode to use for a MODE_INT of SIZE bits, if one
+ exists. If LIMIT is nonzero, modes wider than MAX_FIXED_MODE_SIZE
+ will not be used. */
+
+inline opt_scalar_int_mode
+int_mode_for_size (unsigned int size, int limit)
+{
+ return dyn_cast <scalar_int_mode> (mode_for_size (size, MODE_INT, limit));
+}
+
/* Return the machine mode to use for a MODE_FLOAT of SIZE bits, if one
exists. */
/* The ffs function operates on `int'. Fall back on it if we do not
have a libgcc2 function for that width. */
if (INT_TYPE_SIZE < BITS_PER_WORD)
- set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
- "ffs");
+ {
+ scalar_int_mode mode = int_mode_for_size (INT_TYPE_SIZE, 0).require ();
+ set_optab_libfunc (ffs_optab, mode, "ffs");
+ }
/* Explicitly initialize the bswap libfuncs since we need them to be
valid for things other than word_mode. */
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
&& GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
{
- machine_mode tmode
- = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
- - INTVAL (XEXP (op, 1)), MODE_INT, 1);
+ scalar_int_mode tmode;
gcc_assert (GET_MODE_BITSIZE (mode)
> GET_MODE_BITSIZE (GET_MODE (op)));
- if (tmode != BLKmode)
+ if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
{
rtx inner =
rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
&& XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
&& GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
{
- machine_mode tmode
- = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
- - INTVAL (XEXP (op, 1)), MODE_INT, 1);
- if (tmode != BLKmode)
+ scalar_int_mode tmode;
+ if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
+ - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
{
rtx inner =
rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
unsigned align1 = get_pointer_alignment (arg1);
unsigned align2 = get_pointer_alignment (arg2);
unsigned align = MIN (align1, align2);
- machine_mode mode = mode_for_size (leni, MODE_INT, 1);
- if (mode != BLKmode
+ scalar_int_mode mode;
+ if (int_mode_for_size (leni, 1).exists (&mode)
&& (align >= leni || !SLOW_UNALIGNED_ACCESS (mode, align)))
{
location_t loc = gimple_location (stmt2);
vect_lanes_optab_supported_p (const char *name, convert_optab optab,
tree vectype, unsigned HOST_WIDE_INT count)
{
- machine_mode mode, array_mode;
+ machine_mode mode;
+ scalar_int_mode array_mode;
bool limit_p;
mode = TYPE_MODE (vectype);
limit_p = !targetm.array_mode_supported_p (mode, count);
- array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
- MODE_INT, limit_p);
-
- if (array_mode == BLKmode)
+ if (!int_mode_for_size (count * GET_MODE_BITSIZE (mode),
+ limit_p).exists (&array_mode))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
enum tree_code code)
{
tree result, compute_type;
- machine_mode mode;
int n_words = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD;
location_t loc = gimple_location (gsi_stmt (*gsi));
else
{
/* Use a single scalar operation with a mode no wider than word_mode. */
- mode = mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), MODE_INT, 0);
+ scalar_int_mode mode
+ = int_mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), 0).require ();
compute_type = lang_hooks.types.type_for_mode (mode, 1);
result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code, type);
warning_at (loc, OPT_Wvector_operation_performance,
supported. */
unsigned lsize
= group_size * GET_MODE_BITSIZE (elmode);
- elmode = mode_for_size (lsize, MODE_INT, 0);
+ elmode = int_mode_for_size (lsize, 0).require ();
vmode = mode_for_vector (elmode, nunits / group_size);
/* If we can't construct such a vector fall back to
element extracts from the original vector type and
to a larger load. */
unsigned lsize
= group_size * TYPE_PRECISION (TREE_TYPE (vectype));
- elmode = mode_for_size (lsize, MODE_INT, 0);
+ elmode = int_mode_for_size (lsize, 0).require ();
vmode = mode_for_vector (elmode, nunits / group_size);
/* If we can't construct such a vector fall back to
element loads of the original vector type. */
/* For integers, try mapping it to a same-sized scalar mode. */
if (GET_MODE_CLASS (innermode) == MODE_INT)
{
- mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
- * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
-
- if (mode != VOIDmode && have_regs_of_mode[mode])
+ unsigned int size = (TYPE_VECTOR_SUBPARTS (t)
+ * GET_MODE_BITSIZE (innermode));
+ scalar_int_mode mode;
+ if (int_mode_for_size (size, 0).exists (&mode)
+ && have_regs_of_mode[mode])
return mode;
}