+2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * builtins.c (expand_builtin_powi): Use int_mode_for_size.
+ (get_builtin_sync_mode): Likewise.
+ (expand_ifn_atomic_compare_exchange): Likewise.
+ (expand_builtin_atomic_clear): Likewise.
+ (expand_builtin_atomic_test_and_set): Likewise.
+ (fold_builtin_atomic_always_lock_free): Likewise.
+ * calls.c (compute_argument_addresses): Likewise.
+ (emit_library_call_value_1): Likewise.
+ (store_one_arg): Likewise.
+ * combine.c (combine_instructions): Likewise.
+ * config/aarch64/aarch64.c (aarch64_function_value): Likewise.
+ * config/arm/arm.c (arm_function_value): Likewise.
+ (aapcs_allocate_return_reg): Likewise.
+ * config/c6x/c6x.c (c6x_expand_movmem): Likewise.
+ * config/i386/i386.c (construct_container): Likewise.
+ (ix86_gimplify_va_arg): Likewise.
+ (ix86_expand_sse_cmp): Likewise.
+ (emit_memmov): Likewise.
+ (emit_memset): Likewise.
+ (expand_small_movmem_or_setmem): Likewise.
+ (ix86_expand_pextr): Likewise.
+ (ix86_expand_pinsr): Likewise.
+ * config/lm32/lm32.c (lm32_block_move_inline): Likewise.
+ * config/microblaze/microblaze.c (microblaze_block_move_straight):
+ Likewise.
+ * config/mips/mips.c (mips_function_value_1) Likewise.
+ (mips_block_move_straight): Likewise.
+ (mips_expand_ins_as_unaligned_store): Likewise.
+ * config/powerpcspe/powerpcspe.c
+ (rs6000_darwin64_record_arg_advance_flush): Likewise.
+ (rs6000_darwin64_record_arg_flush): Likewise.
+ * config/rs6000/rs6000.c
+ (rs6000_darwin64_record_arg_advance_flush): Likewise.
+ (rs6000_darwin64_record_arg_flush): Likewise.
+ * config/sparc/sparc.c (sparc_function_arg_1): Likewise.
+ (sparc_function_value_1): Likewise.
+ * config/spu/spu.c (adjust_operand): Likewise.
+ (spu_emit_branch_or_set): Likewise.
+ (arith_immediate_p): Likewise.
+ * emit-rtl.c (gen_lowpart_common): Likewise.
+ * expr.c (expand_expr_real_1): Likewise.
+ * function.c (assign_parm_setup_block): Likewise.
+ * gimple-ssa-store-merging.c (encode_tree_to_bitpos): Likewise.
+ * reload1.c (alter_reg): Likewise.
+ * stor-layout.c (mode_for_vector): Likewise.
+ (layout_type): Likewise.
+
2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
* config/spu/spu.c (exp2_immediate_p): Use int_mode_for_mode.
+2017-09-05 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * gcc-interface/utils2.c (build_load_modify_store):
+ Use int_mode_for_size.
+
2017-09-05 Eric Botcazou <ebotcazou@adacore.com>
PR ada/62235
{
unsigned int size = tree_to_uhwi (TYPE_SIZE (type));
type = copy_type (type);
- SET_TYPE_MODE (type, mode_for_size (size, MODE_INT, 0));
+ machine_mode mode = int_mode_for_size (size, 0).else_blk ();
+ SET_TYPE_MODE (type, mode);
}
/* Create the temporary by inserting a SAVE_EXPR. */
/* Emit a libcall to libgcc. */
/* Mode of the 2nd argument must match that of an int. */
- mode2 = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
+ mode2 = int_mode_for_size (INT_TYPE_SIZE, 0).require ();
if (target == NULL_RTX)
target = gen_reg_rtx (mode);
{
/* The size is not negotiable, so ask not to get BLKmode in return
if the target indicates that a smaller size would be better. */
- return mode_for_size (BITS_PER_UNIT << fcode_diff, MODE_INT, 0);
+ return int_mode_for_size (BITS_PER_UNIT << fcode_diff, 0).require ();
}
/* Expand the memory expression LOC and return the appropriate memory operand
{
int size = tree_to_shwi (gimple_call_arg (call, 3)) & 255;
gcc_assert (size == 1 || size == 2 || size == 4 || size == 8 || size == 16);
- machine_mode mode = mode_for_size (BITS_PER_UNIT * size, MODE_INT, 0);
+ machine_mode mode = int_mode_for_size (BITS_PER_UNIT * size, 0).require ();
rtx expect, desired, mem, oldval, boolret;
enum memmodel success, failure;
tree lhs;
rtx mem, ret;
enum memmodel model;
- mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require ();
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
enum memmodel model;
machine_mode mode;
- mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require ();
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
if (TREE_CODE (arg0) != INTEGER_CST)
return NULL_TREE;
+ /* We need a corresponding integer mode for the access to be lock-free. */
size = INTVAL (expand_normal (arg0)) * BITS_PER_UNIT;
- mode = mode_for_size (size, MODE_INT, 0);
+ if (!int_mode_for_size (size, 0).exists (&mode))
+ return boolean_false_node;
+
mode_align = GET_MODE_ALIGNMENT (mode);
if (TREE_CODE (arg1) == INTEGER_CST)
/* Only part of the parameter is being passed on the stack.
Generate a simple memory reference of the correct size. */
units_on_stack = args[i].locate.size.constant;
- partial_mode = mode_for_size (units_on_stack * BITS_PER_UNIT,
- MODE_INT, 1);
+ unsigned int bits_on_stack = units_on_stack * BITS_PER_UNIT;
+ partial_mode = int_mode_for_size (bits_on_stack, 1).else_blk ();
args[i].stack = gen_rtx_MEM (partial_mode, addr);
set_mem_size (args[i].stack, units_on_stack);
}
unsigned int size
= argvec[argnum].locate.size.constant * BITS_PER_UNIT;
machine_mode save_mode
- = mode_for_size (size, MODE_INT, 1);
+ = int_mode_for_size (size, 1).else_blk ();
rtx adr
= plus_constant (Pmode, argblock,
argvec[argnum].locate.offset.constant);
{
/* We need to make a save area. */
unsigned int size = arg->locate.size.constant * BITS_PER_UNIT;
- machine_mode save_mode = mode_for_size (size, MODE_INT, 1);
+ machine_mode save_mode
+ = int_mode_for_size (size, 1).else_blk ();
rtx adr = memory_address (save_mode, XEXP (arg->stack_slot, 0));
rtx stack_area = gen_rtx_MEM (save_mode, adr);
/* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
-static machine_mode nonzero_bits_mode;
+static scalar_int_mode nonzero_bits_mode;
/* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
be safely used. It is zero while computing them and after combine has
uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
gcc_obstack_init (&insn_link_obstack);
- nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
+ nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
/* Don't use reg_stat[].nonzero_bits when computing it. This can cause
problems when, for example, we have j <<= 1 in a loop. */
if (size % UNITS_PER_WORD != 0)
{
size += UNITS_PER_WORD - size % UNITS_PER_WORD;
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ mode = int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
}
}
if (size % UNITS_PER_WORD != 0)
{
size += UNITS_PER_WORD - size % UNITS_PER_WORD;
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ mode = int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
}
}
if (size % UNITS_PER_WORD != 0)
{
size += UNITS_PER_WORD - size % UNITS_PER_WORD;
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ mode = int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
}
}
if (dst_size > src_size)
dst_size = src_size;
- srcmode = mode_for_size (src_size * BITS_PER_UNIT, MODE_INT, 0);
- dstmode = mode_for_size (dst_size * BITS_PER_UNIT, MODE_INT, 0);
+ srcmode = int_mode_for_size (src_size * BITS_PER_UNIT, 0).require ();
+ dstmode = int_mode_for_size (dst_size * BITS_PER_UNIT, 0).require ();
if (src_size >= 4)
reg_lowpart = reg = gen_reg_rtx (srcmode);
else
case X86_64_INTEGERSI_CLASS:
/* Merge TImodes on aligned occasions here too. */
if (i * 8 + 8 > bytes)
- tmpmode
- = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
+ {
+ unsigned int tmpbits = (bytes - i * 8) * BITS_PER_UNIT;
+ if (!int_mode_for_size (tmpbits, 0).exists (&tmpmode))
+ /* We've requested 24 bytes we
+ don't have mode for. Use DImode. */
+ tmpmode = DImode;
+ }
else if (regclass[i] == X86_64_INTEGERSI_CLASS)
tmpmode = SImode;
else
tmpmode = DImode;
- /* We've requested 24 bytes we
- don't have mode for. Use DImode. */
- if (tmpmode == BLKmode)
- tmpmode = DImode;
exp [nexps++]
= gen_rtx_EXPR_LIST (VOIDmode,
gen_rtx_REG (tmpmode, *intreg),
if (prev_size + cur_size > size)
{
cur_size = size - prev_size;
- mode = mode_for_size (cur_size * BITS_PER_UNIT, MODE_INT, 1);
- if (mode == BLKmode)
+ unsigned int nbits = cur_size * BITS_PER_UNIT;
+ if (!int_mode_for_size (nbits, 1).exists (&mode))
mode = QImode;
}
piece_type = lang_hooks.types.type_for_mode (mode, 1);
if (GET_MODE_SIZE (cmp_ops_mode) == 64)
{
- cmp_mode = mode_for_size (GET_MODE_NUNITS (cmp_ops_mode), MODE_INT, 0);
- gcc_assert (cmp_mode != BLKmode);
-
+ unsigned int nbits = GET_MODE_NUNITS (cmp_ops_mode);
+ cmp_mode = int_mode_for_size (nbits, 0).require ();
maskcmp = true;
}
else
Start with the biggest power of 2 less than SIZE_TO_MOVE and half
it until move of such size is supported. */
piece_size = 1 << floor_log2 (size_to_move);
- move_mode = mode_for_size (piece_size * BITS_PER_UNIT, MODE_INT, 0);
- code = optab_handler (mov_optab, move_mode);
- while (code == CODE_FOR_nothing && piece_size > 1)
+ while (!int_mode_for_size (piece_size * BITS_PER_UNIT, 0).exists (&move_mode)
+ || (code = optab_handler (mov_optab, move_mode)) == CODE_FOR_nothing)
{
+ gcc_assert (piece_size > 1);
piece_size >>= 1;
- move_mode = mode_for_size (piece_size * BITS_PER_UNIT, MODE_INT, 0);
- code = optab_handler (mov_optab, move_mode);
}
/* Find the corresponding vector mode with the same size as MOVE_MODE.
move_mode = QImode;
if (size_to_move < GET_MODE_SIZE (move_mode))
{
- move_mode = mode_for_size (size_to_move * BITS_PER_UNIT, MODE_INT, 0);
+ unsigned int move_bits = size_to_move * BITS_PER_UNIT;
+ move_mode = int_mode_for_size (move_bits, 0).require ();
promoted_val = gen_lowpart (move_mode, promoted_val);
}
piece_size = GET_MODE_SIZE (move_mode);
rtx done_label, bool issetmem)
{
rtx_code_label *label = ix86_expand_aligntest (count, size, false);
- machine_mode mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 1);
+ machine_mode mode = int_mode_for_size (size * BITS_PER_UNIT, 1).else_blk ();
rtx modesize;
int n;
machine_mode srcmode, dstmode;
rtx d, pat;
- dstmode = mode_for_size (size, MODE_INT, 0);
+ if (!int_mode_for_size (size, 0).exists (&dstmode))
+ return false;
switch (dstmode)
{
rtx (*pinsr)(rtx, rtx, rtx, rtx);
rtx d;
- srcmode = mode_for_size (size, MODE_INT, 0);
+ if (!int_mode_for_size (size, 0).exists (&srcmode))
+ return false;
switch (srcmode)
{
break;
}
- mode = mode_for_size (bits, MODE_INT, 0);
+ mode = int_mode_for_size (bits, 0).require ();
delta = bits / BITS_PER_UNIT;
/* Allocate a buffer for the temporary registers. */
rtx *regs;
bits = BITS_PER_WORD;
- mode = mode_for_size (bits, MODE_INT, 0);
+ mode = int_mode_for_size (bits, 0).require ();
delta = bits / BITS_PER_UNIT;
/* Allocate a buffer for the temporary registers. */
if (size % UNITS_PER_WORD != 0)
{
size += UNITS_PER_WORD - size % UNITS_PER_WORD;
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ mode = int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
}
}
bits = BITS_PER_WORD;
}
- mode = mode_for_size (bits, MODE_INT, 0);
+ mode = int_mode_for_size (bits, 0).require ();
delta = bits / BITS_PER_UNIT;
/* Allocate a buffer for the temporary registers. */
if (!mips_get_unaligned_mem (dest, width, bitpos, &left, &right))
return false;
- mode = mode_for_size (width, MODE_INT, 0);
+ mode = int_mode_for_size (width, 0).require ();
src = gen_lowpart (mode, src);
if (mode == DImode)
{
{
unsigned int startbit, endbit;
int intregs, intoffset;
- machine_mode mode;
/* Handle the situations where a float is taking up the first half
of the GPR, and the other half is empty (typically due to
if (intoffset % BITS_PER_WORD != 0)
{
- mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
- MODE_INT, 0);
- if (mode == BLKmode)
+ unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
+ if (!int_mode_for_size (bits, 0).exists ())
{
/* We couldn't find an appropriate mode, which happens,
e.g., in packed structs when there are 3 bytes to load.
if (intoffset % BITS_PER_WORD != 0)
{
- mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
- MODE_INT, 0);
- if (mode == BLKmode)
+ unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
+ if (!int_mode_for_size (bits, 0).exists (&mode))
{
/* We couldn't find an appropriate mode, which happens,
e.g., in packed structs when there are 3 bytes to load.
{
unsigned int startbit, endbit;
int intregs, intoffset;
- machine_mode mode;
/* Handle the situations where a float is taking up the first half
of the GPR, and the other half is empty (typically due to
if (intoffset % BITS_PER_WORD != 0)
{
- mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
- MODE_INT, 0);
- if (mode == BLKmode)
+ unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
+ if (!int_mode_for_size (bits, 0).exists ())
{
/* We couldn't find an appropriate mode, which happens,
e.g., in packed structs when there are 3 bytes to load.
if (intoffset % BITS_PER_WORD != 0)
{
- mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
- MODE_INT, 0);
- if (mode == BLKmode)
+ unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
+ if (!int_mode_for_size (bits, 0).exists (&mode))
{
/* We couldn't find an appropriate mode, which happens,
e.g., in packed structs when there are 3 bytes to load.
HOST_WIDE_INT size = int_size_in_bytes (type);
gcc_assert (size <= 16);
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
}
return gen_rtx_REG (mode, regno);
HOST_WIDE_INT size = int_size_in_bytes (type);
gcc_assert (size <= 32);
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
/* ??? We probably should have made the same ABI change in
3.4.0 as the one we made for unions. The latter was
op_size = 32;
}
/* If it is not a MODE_INT (and/or it is smaller than SI) add a SUBREG. */
- mode = mode_for_size (op_size, MODE_INT, 0);
+ mode = int_mode_for_size (op_size, 0).require ();
if (mode != GET_MODE (op))
op = gen_rtx_SUBREG (mode, op, 0);
return op;
rtx target = operands[0];
int compare_size = GET_MODE_BITSIZE (comp_mode);
int target_size = GET_MODE_BITSIZE (GET_MODE (target));
- machine_mode mode = mode_for_size (target_size, MODE_INT, 0);
+ machine_mode mode = int_mode_for_size (target_size, 0).require ();
rtx select_mask;
rtx op_t = operands[2];
rtx op_f = operands[3];
innermode = GET_MODE (x);
if (CONST_INT_P (x)
&& msize * BITS_PER_UNIT <= HOST_BITS_PER_WIDE_INT)
- innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
+ innermode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
else if (innermode == VOIDmode)
- innermode = mode_for_size (HOST_BITS_PER_DOUBLE_INT, MODE_INT, 0);
+ innermode = int_mode_for_size (HOST_BITS_PER_DOUBLE_INT, 0).require ();
xsize = GET_MODE_SIZE (innermode);
&& ! (target != 0 && MEM_P (op0)
&& MEM_P (target)
&& bitpos % BITS_PER_UNIT == 0))
- ext_mode = mode_for_size (bitsize, MODE_INT, 1);
+ ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
if (ext_mode == BLKmode)
{
that mode's store operation. */
else if (size <= UNITS_PER_WORD)
{
- machine_mode mode
- = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ unsigned int bits = size * BITS_PER_UNIT;
+ machine_mode mode = int_mode_for_size (bits, 0).else_blk ();
if (mode != BLKmode
#ifdef BLOCK_REG_PADDING
tree tmp_int = expr;
bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
|| (bitpos % BITS_PER_UNIT)
- || mode_for_size (bitlen, MODE_INT, 0) == BLKmode);
+ || !int_mode_for_size (bitlen, 0).exists ());
if (!sub_byte_op_p)
return (native_encode_expr (tmp_int, ptr + first_byte, total_bytes, 0)
{
adjust = inherent_size - total_size;
if (adjust)
- stack_slot
- = adjust_address_nv (x, mode_for_size (total_size
- * BITS_PER_UNIT,
- MODE_INT, 1),
- adjust);
+ {
+ unsigned int total_bits = total_size * BITS_PER_UNIT;
+ machine_mode mem_mode
+ = int_mode_for_size (total_bits, 1).else_blk ();
+ stack_slot = adjust_address_nv (x, mem_mode, adjust);
+ }
}
if (! dont_share_p && ira_conflicts_p)
{
adjust = GET_MODE_SIZE (mode) - total_size;
if (adjust)
- stack_slot
- = adjust_address_nv (x, mode_for_size (total_size
- * BITS_PER_UNIT,
- MODE_INT, 1),
- adjust);
+ {
+ unsigned int total_bits = total_size * BITS_PER_UNIT;
+ machine_mode mem_mode
+ = int_mode_for_size (total_bits, 1).else_blk ();
+ stack_slot = adjust_address_nv (x, mem_mode, adjust);
+ }
}
spill_stack_slot[from_reg] = stack_slot;
/* For integers, try mapping it to a same-sized scalar mode. */
if (mode == VOIDmode
&& GET_MODE_CLASS (innermode) == MODE_INT)
- mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
- MODE_INT, 0);
+ {
+ unsigned int nbits = nunits * GET_MODE_BITSIZE (innermode);
+ mode = int_mode_for_size (nbits, 0).else_blk ();
+ }
if (mode == VOIDmode
|| (GET_MODE_CLASS (mode) == MODE_INT
TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
/* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
integral, which may be an __intN. */
- SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
+ SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ());
TYPE_PRECISION (type) = POINTER_SIZE;
break;
/* It's hard to see what the mode and size of a function ought to
be, but we do know the alignment is FUNCTION_BOUNDARY, so
make it consistent with that. */
- SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
+ SET_TYPE_MODE (type,
+ int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ());
TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
break;