From fffbab82e7fd15ef695159746a0ce7b9ac906778 Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Wed, 30 Aug 2017 11:10:28 +0000 Subject: [PATCH] [17/77] Add an int_mode_for_size helper function This patch adds a wrapper around mode_for_size for cases in which the mode class is MODE_INT (the commonest case). The return type can then be an opt_scalar_int_mode instead of a machine_mode. 2017-08-30 Richard Sandiford Alan Hayward David Sherwood gcc/ * machmode.h (int_mode_for_size): New function. * builtins.c (set_builtin_user_assembler_name): Use int_mode_for_size instead of mode_for_size. * calls.c (save_fixed_argument_area): Likewise. Make use of BLKmode explicit. * combine.c (expand_field_assignment): Use int_mode_for_size instead of mode_for_size. (make_extraction): Likewise. (simplify_shift_const_1): Likewise. (simplify_comparison): Likewise. * dojump.c (do_jump): Likewise. * dwarf2out.c (mem_loc_descriptor): Likewise. * emit-rtl.c (init_derived_machine_modes): Likewise. * expmed.c (flip_storage_order): Likewise. (convert_extracted_bit_field): Likewise. * expr.c (copy_blkmode_from_reg): Likewise. * graphite-isl-ast-to-gimple.c (max_mode_int_precision): Likewise. * internal-fn.c (expand_mul_overflow): Likewise. * lower-subreg.c (simple_move): Likewise. * optabs-libfuncs.c (init_optabs): Likewise. * simplify-rtx.c (simplify_unary_operation_1): Likewise. * tree.c (vector_type_mode): Likewise. * tree-ssa-strlen.c (handle_builtin_memcmp): Likewise. * tree-vect-data-refs.c (vect_lanes_optab_supported_p): Likewise. * tree-vect-generic.c (expand_vector_parallel): Likewise. * tree-vect-stmts.c (vectorizable_load): Likewise. (vectorizable_store): Likewise. gcc/ada/ * gcc-interface/decl.c (gnat_to_gnu_entity): Use int_mode_for_size instead of mode_for_size. (gnat_to_gnu_subprog_type): Likewise. * gcc-interface/utils.c (make_type_from_size): Likewise. Co-Authored-By: Alan Hayward Co-Authored-By: David Sherwood From-SVN: r251469 --- gcc/ChangeLog | 32 ++++++++++++++++++++++++++++++++ gcc/ada/ChangeLog | 9 +++++++++ gcc/ada/gcc-interface/decl.c | 10 +++++----- gcc/ada/gcc-interface/utils.c | 5 +++-- gcc/builtins.c | 4 ++-- gcc/calls.c | 9 ++++++--- gcc/combine.c | 27 ++++++++++++--------------- gcc/dojump.c | 2 +- gcc/dwarf2out.c | 7 +++---- gcc/emit-rtl.c | 3 +-- gcc/expmed.c | 18 +++++++----------- gcc/expr.c | 6 +++--- gcc/graphite-isl-ast-to-gimple.c | 2 +- gcc/internal-fn.c | 5 +++-- gcc/lower-subreg.c | 3 +-- gcc/machmode.h | 10 ++++++++++ gcc/optabs-libfuncs.c | 6 ++++-- gcc/simplify-rtx.c | 14 ++++++-------- gcc/tree-ssa-strlen.c | 4 ++-- gcc/tree-vect-data-refs.c | 9 ++++----- gcc/tree-vect-generic.c | 4 ++-- gcc/tree-vect-stmts.c | 4 ++-- gcc/tree.c | 9 +++++---- 23 files changed, 124 insertions(+), 78 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index d7e70be58d6..a9d5ea22c00 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,35 @@ +2017-08-30 Richard Sandiford + Alan Hayward + David Sherwood + + * machmode.h (int_mode_for_size): New function. + * builtins.c (set_builtin_user_assembler_name): Use int_mode_for_size + instead of mode_for_size. + * calls.c (save_fixed_argument_area): Likewise. Make use of BLKmode + explicit. + * combine.c (expand_field_assignment): Use int_mode_for_size + instead of mode_for_size. + (make_extraction): Likewise. + (simplify_shift_const_1): Likewise. + (simplify_comparison): Likewise. + * dojump.c (do_jump): Likewise. + * dwarf2out.c (mem_loc_descriptor): Likewise. + * emit-rtl.c (init_derived_machine_modes): Likewise. + * expmed.c (flip_storage_order): Likewise. + (convert_extracted_bit_field): Likewise. + * expr.c (copy_blkmode_from_reg): Likewise. + * graphite-isl-ast-to-gimple.c (max_mode_int_precision): Likewise. + * internal-fn.c (expand_mul_overflow): Likewise. + * lower-subreg.c (simple_move): Likewise. + * optabs-libfuncs.c (init_optabs): Likewise. + * simplify-rtx.c (simplify_unary_operation_1): Likewise. + * tree.c (vector_type_mode): Likewise. + * tree-ssa-strlen.c (handle_builtin_memcmp): Likewise. + * tree-vect-data-refs.c (vect_lanes_optab_supported_p): Likewise. + * tree-vect-generic.c (expand_vector_parallel): Likewise. + * tree-vect-stmts.c (vectorizable_load): Likewise. + (vectorizable_store): Likewise. + 2017-08-30 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog index 80242114224..633ca11e4ce 100644 --- a/gcc/ada/ChangeLog +++ b/gcc/ada/ChangeLog @@ -1,3 +1,12 @@ +2017-08-30 Richard Sandiford + Alan Hayward + David Sherwood + + * gcc-interface/decl.c (gnat_to_gnu_entity): Use int_mode_for_size + instead of mode_for_size. + (gnat_to_gnu_subprog_type): Likewise. + * gcc-interface/utils.c (make_type_from_size): Likewise. + 2017-08-30 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c index 83c582ff64f..1b88e25bbf9 100644 --- a/gcc/ada/gcc-interface/decl.c +++ b/gcc/ada/gcc-interface/decl.c @@ -3625,11 +3625,12 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition) /* True if we make a dummy type here. */ bool made_dummy = false; /* The mode to be used for the pointer type. */ - machine_mode p_mode = mode_for_size (esize, MODE_INT, 0); + scalar_int_mode p_mode; /* The GCC type used for the designated type. */ tree gnu_desig_type = NULL_TREE; - if (!targetm.valid_pointer_mode (p_mode)) + if (!int_mode_for_size (esize, 0).exists (&p_mode) + || !targetm.valid_pointer_mode (p_mode)) p_mode = ptr_mode; /* If either the designated type or its full view is an unconstrained @@ -5939,12 +5940,11 @@ gnat_to_gnu_subprog_type (Entity_Id gnat_subprog, bool definition, unsigned int size = TREE_INT_CST_LOW (TYPE_SIZE (gnu_cico_return_type)); unsigned int i = BITS_PER_UNIT; - machine_mode mode; + scalar_int_mode mode; while (i < size) i <<= 1; - mode = mode_for_size (i, MODE_INT, 0); - if (mode != BLKmode) + if (int_mode_for_size (i, 0).exists (&mode)) { SET_TYPE_MODE (gnu_cico_return_type, mode); SET_TYPE_ALIGN (gnu_cico_return_type, diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c index 23127aa6896..bc916bbb974 100644 --- a/gcc/ada/gcc-interface/utils.c +++ b/gcc/ada/gcc-interface/utils.c @@ -1166,8 +1166,9 @@ make_type_from_size (tree type, tree size_tree, bool for_biased) may need to return the thin pointer. */ if (TYPE_FAT_POINTER_P (type) && size < POINTER_SIZE * 2) { - machine_mode p_mode = mode_for_size (size, MODE_INT, 0); - if (!targetm.valid_pointer_mode (p_mode)) + scalar_int_mode p_mode; + if (!int_mode_for_size (size, 0).exists (&p_mode) + || !targetm.valid_pointer_mode (p_mode)) p_mode = ptr_mode; return build_pointer_type_for_mode diff --git a/gcc/builtins.c b/gcc/builtins.c index 4cb1d37ddfd..ab6a3c0bf1f 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -10365,9 +10365,9 @@ set_builtin_user_assembler_name (tree decl, const char *asmspec) if (DECL_FUNCTION_CODE (decl) == BUILT_IN_FFS && INT_TYPE_SIZE < BITS_PER_WORD) { + scalar_int_mode mode = int_mode_for_size (INT_TYPE_SIZE, 0).require (); set_user_assembler_libfunc ("ffs", asmspec); - set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0), - "ffs"); + set_optab_libfunc (ffs_optab, mode, "ffs"); } } diff --git a/gcc/calls.c b/gcc/calls.c index 7599928c7cb..d5bd5049cb9 100644 --- a/gcc/calls.c +++ b/gcc/calls.c @@ -1047,12 +1047,15 @@ save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_sa *high_to_save = high; num_to_save = high - low + 1; - save_mode = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1); /* If we don't have the required alignment, must do this in BLKmode. */ - if ((low & (MIN (GET_MODE_SIZE (save_mode), - BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1))) + scalar_int_mode imode; + if (int_mode_for_size (num_to_save * BITS_PER_UNIT, 1).exists (&imode) + && (low & (MIN (GET_MODE_SIZE (imode), + BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)) == 0) + save_mode = imode; + else save_mode = BLKmode; if (ARGS_GROW_DOWNWARD) diff --git a/gcc/combine.c b/gcc/combine.c index 1a6eda609ed..a717ca5644d 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -7306,19 +7306,16 @@ expand_field_assignment (const_rtx x) /* Don't attempt bitwise arithmetic on non scalar integer modes. */ if (! SCALAR_INT_MODE_P (compute_mode)) { - machine_mode imode; - /* Don't do anything for vector or complex integral types. */ if (! FLOAT_MODE_P (compute_mode)) break; /* Try to find an integral mode to pun with. */ - imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0); - if (imode == BLKmode) + if (!int_mode_for_size (GET_MODE_BITSIZE (compute_mode), 0) + .exists (&compute_mode)) break; - compute_mode = imode; - inner = gen_lowpart (imode, inner); + inner = gen_lowpart (compute_mode, inner); } /* Compute a mask of LEN bits, if we can do this on the host machine. */ @@ -7389,7 +7386,6 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, machine_mode wanted_inner_reg_mode = word_mode; machine_mode pos_mode = word_mode; machine_mode extraction_mode = word_mode; - machine_mode tmode = mode_for_size (len, MODE_INT, 1); rtx new_rtx = 0; rtx orig_pos_rtx = pos_rtx; HOST_WIDE_INT orig_pos; @@ -7437,7 +7433,8 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, For MEM, we can avoid an extract if the field starts on an appropriate boundary and we can change the mode of the memory reference. */ - if (tmode != BLKmode + scalar_int_mode tmode; + if (int_mode_for_size (len, 1).exists (&tmode) && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0 && !MEM_P (inner) && (pos == 0 || REG_P (inner)) @@ -10444,8 +10441,8 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, && ! mode_dependent_address_p (XEXP (varop, 0), MEM_ADDR_SPACE (varop)) && ! MEM_VOLATILE_P (varop) - && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count, - MODE_INT, 1)) != BLKmode) + && (int_mode_for_size (GET_MODE_BITSIZE (mode) - count, 1) + .exists (&tmode))) { new_rtx = adjust_address_nv (varop, tmode, BYTES_BIG_ENDIAN ? 0 @@ -12371,7 +12368,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) & GET_MODE_MASK (mode)) + 1)) >= 0 && const_op >> i == 0 - && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode) + && int_mode_for_size (i, 1).exists (&tmode)) { op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0)); continue; @@ -12531,8 +12528,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && CONST_INT_P (XEXP (op0, 1)) && GET_CODE (XEXP (op0, 0)) == ASHIFT && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) - && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), - MODE_INT, 1)) != BLKmode + && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1) + .exists (&tmode)) && (((unsigned HOST_WIDE_INT) const_op + (GET_MODE_MASK (tmode) >> 1) + 1) <= GET_MODE_MASK (tmode))) @@ -12550,8 +12547,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1) - && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), - MODE_INT, 1)) != BLKmode + && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1) + .exists (&tmode)) && (((unsigned HOST_WIDE_INT) const_op + (GET_MODE_MASK (tmode) >> 1) + 1) <= GET_MODE_MASK (tmode))) diff --git a/gcc/dojump.c b/gcc/dojump.c index 7a389663ab4..34492f32e67 100644 --- a/gcc/dojump.c +++ b/gcc/dojump.c @@ -594,7 +594,7 @@ do_jump (tree exp, rtx_code_label *if_false_label, && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0 - && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode + && int_mode_for_size (i + 1, 0).exists (&mode) && (type = lang_hooks.types.type_for_mode (mode, 1)) != 0 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp)) && have_insn_for (COMPARE, TYPE_MODE (type))) diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index aafacf475cd..fdee0096dbb 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -15251,13 +15251,12 @@ mem_loc_descriptor (rtx rtl, machine_mode mode, || GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT)) { dw_die_ref type_die = base_type_for_mode (mode, 1); - machine_mode amode; + scalar_int_mode amode; if (type_die == NULL) return NULL; - amode = mode_for_size (DWARF2_ADDR_SIZE * BITS_PER_UNIT, - MODE_INT, 0); if (INTVAL (rtl) >= 0 - && amode != BLKmode + && (int_mode_for_size (DWARF2_ADDR_SIZE * BITS_PER_UNIT, 0) + .exists (&amode)) && trunc_int_for_mode (INTVAL (rtl), amode) == INTVAL (rtl) /* const DW_OP_convert vs. DW_OP_const_type . */ diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index 399c5d6b895..3785eca5014 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -5879,8 +5879,7 @@ init_derived_machine_modes (void) byte_mode = opt_byte_mode.require (); word_mode = opt_word_mode.require (); - ptr_mode = as_a (mode_for_size (POINTER_SIZE, - MODE_INT, 0)); + ptr_mode = int_mode_for_size (POINTER_SIZE, 0).require (); } /* Create some permanent unique rtl objects shared between all functions. */ diff --git a/gcc/expmed.c b/gcc/expmed.c index 48a006004ff..4c39096460b 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -364,7 +364,7 @@ check_reverse_float_storage_order_support (void) rtx flip_storage_order (machine_mode mode, rtx x) { - machine_mode int_mode; + scalar_int_mode int_mode; rtx result; if (mode == QImode) @@ -384,16 +384,13 @@ flip_storage_order (machine_mode mode, rtx x) if (__builtin_expect (reverse_storage_order_supported < 0, 0)) check_reverse_storage_order_support (); - if (SCALAR_INT_MODE_P (mode)) - int_mode = mode; - else + if (!is_a (mode, &int_mode)) { if (FLOAT_MODE_P (mode) && __builtin_expect (reverse_float_storage_order_supported < 0, 0)) check_reverse_float_storage_order_support (); - int_mode = mode_for_size (GET_MODE_PRECISION (mode), MODE_INT, 0); - if (int_mode == BLKmode) + if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode)) { sorry ("reverse storage order for %smode", GET_MODE_NAME (mode)); return x; @@ -1429,11 +1426,10 @@ convert_extracted_bit_field (rtx x, machine_mode mode, value via a SUBREG. */ if (!SCALAR_INT_MODE_P (tmode)) { - machine_mode smode; - - smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0); - x = convert_to_mode (smode, x, unsignedp); - x = force_reg (smode, x); + scalar_int_mode int_mode + = int_mode_for_size (GET_MODE_BITSIZE (tmode), 0).require (); + x = convert_to_mode (int_mode, x, unsignedp); + x = force_reg (int_mode, x); return gen_lowpart (tmode, x); } diff --git a/gcc/expr.c b/gcc/expr.c index c5cc0cbc155..ac6f959af12 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -2671,9 +2671,9 @@ copy_blkmode_from_reg (rtx target, rtx srcreg, tree type) copy_mode = word_mode; if (MEM_P (target)) { - machine_mode mem_mode = mode_for_size (bitsize, MODE_INT, 1); - if (mem_mode != BLKmode) - copy_mode = mem_mode; + opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1); + if (mem_mode.exists ()) + copy_mode = mem_mode.require (); } else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD) copy_mode = tmode; diff --git a/gcc/graphite-isl-ast-to-gimple.c b/gcc/graphite-isl-ast-to-gimple.c index 5b2bc1c399f..964d6c97553 100644 --- a/gcc/graphite-isl-ast-to-gimple.c +++ b/gcc/graphite-isl-ast-to-gimple.c @@ -61,7 +61,7 @@ along with GCC; see the file COPYING3. If not see should use isl to derive the optimal type for each subexpression. */ static int max_mode_int_precision = - GET_MODE_PRECISION (mode_for_size (MAX_FIXED_MODE_SIZE, MODE_INT, 0)); + GET_MODE_PRECISION (int_mode_for_size (MAX_FIXED_MODE_SIZE, 0).require ()); static int graphite_expression_type_precision = 128 <= max_mode_int_precision ? 128 : max_mode_int_precision; diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c index 8ac4f6a9826..017a469f2be 100644 --- a/gcc/internal-fn.c +++ b/gcc/internal-fn.c @@ -1459,7 +1459,7 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1, { struct separate_ops ops; int prec = GET_MODE_PRECISION (mode); - machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1); + scalar_int_mode hmode; machine_mode wmode; ops.op0 = make_tree (type, op0); ops.op1 = make_tree (type, op1); @@ -1495,7 +1495,8 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1, profile_probability::very_likely ()); } } - else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec) + else if (int_mode_for_size (prec / 2, 1).exists (&hmode) + && 2 * GET_MODE_PRECISION (hmode) == prec) { rtx_code_label *large_op0 = gen_label_rtx (); rtx_code_label *small_op0_large_op1 = gen_label_rtx (); diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c index ac19e8f52ad..bdec0aa4cc0 100644 --- a/gcc/lower-subreg.c +++ b/gcc/lower-subreg.c @@ -349,8 +349,7 @@ simple_move (rtx_insn *insn, bool speed_p) size. */ mode = GET_MODE (SET_DEST (set)); if (!SCALAR_INT_MODE_P (mode) - && (mode_for_size (GET_MODE_SIZE (mode) * BITS_PER_UNIT, MODE_INT, 0) - == BLKmode)) + && !int_mode_for_size (GET_MODE_BITSIZE (mode), 0).exists ()) return NULL_RTX; /* Reject PARTIAL_INT modes. They are used for processor specific diff --git a/gcc/machmode.h b/gcc/machmode.h index 25e72a83330..dcbfa0f4a3b 100644 --- a/gcc/machmode.h +++ b/gcc/machmode.h @@ -557,6 +557,16 @@ extern const unsigned char mode_complex[NUM_MACHINE_MODES]; extern machine_mode mode_for_size (unsigned int, enum mode_class, int); +/* Return the machine mode to use for a MODE_INT of SIZE bits, if one + exists. If LIMIT is nonzero, modes wider than MAX_FIXED_MODE_SIZE + will not be used. */ + +inline opt_scalar_int_mode +int_mode_for_size (unsigned int size, int limit) +{ + return dyn_cast (mode_for_size (size, MODE_INT, limit)); +} + /* Return the machine mode to use for a MODE_FLOAT of SIZE bits, if one exists. */ diff --git a/gcc/optabs-libfuncs.c b/gcc/optabs-libfuncs.c index 13463102f61..151a473a66b 100644 --- a/gcc/optabs-libfuncs.c +++ b/gcc/optabs-libfuncs.c @@ -858,8 +858,10 @@ init_optabs (void) /* The ffs function operates on `int'. Fall back on it if we do not have a libgcc2 function for that width. */ if (INT_TYPE_SIZE < BITS_PER_WORD) - set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0), - "ffs"); + { + scalar_int_mode mode = int_mode_for_size (INT_TYPE_SIZE, 0).require (); + set_optab_libfunc (ffs_optab, mode, "ffs"); + } /* Explicitly initialize the bswap libfuncs since we need them to be valid for things other than word_mode. */ diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 60ffdbdb364..83e98b6c8d6 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -1493,12 +1493,11 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op) && XEXP (XEXP (op, 0), 1) == XEXP (op, 1) && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1))) { - machine_mode tmode - = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op)) - - INTVAL (XEXP (op, 1)), MODE_INT, 1); + scalar_int_mode tmode; gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (op))); - if (tmode != BLKmode) + if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op)) + - INTVAL (XEXP (op, 1)), 1).exists (&tmode)) { rtx inner = rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0)); @@ -1610,10 +1609,9 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op) && XEXP (XEXP (op, 0), 1) == XEXP (op, 1) && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1))) { - machine_mode tmode - = mode_for_size (GET_MODE_PRECISION (GET_MODE (op)) - - INTVAL (XEXP (op, 1)), MODE_INT, 1); - if (tmode != BLKmode) + scalar_int_mode tmode; + if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op)) + - INTVAL (XEXP (op, 1)), 1).exists (&tmode)) { rtx inner = rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0)); diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c index b0563fe7c32..1269fa8a2bc 100644 --- a/gcc/tree-ssa-strlen.c +++ b/gcc/tree-ssa-strlen.c @@ -2122,8 +2122,8 @@ handle_builtin_memcmp (gimple_stmt_iterator *gsi) unsigned align1 = get_pointer_alignment (arg1); unsigned align2 = get_pointer_alignment (arg2); unsigned align = MIN (align1, align2); - machine_mode mode = mode_for_size (leni, MODE_INT, 1); - if (mode != BLKmode + scalar_int_mode mode; + if (int_mode_for_size (leni, 1).exists (&mode) && (align >= leni || !SLOW_UNALIGNED_ACCESS (mode, align))) { location_t loc = gimple_location (stmt2); diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index 60f2539b3c0..070c707fdaf 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -60,15 +60,14 @@ static bool vect_lanes_optab_supported_p (const char *name, convert_optab optab, tree vectype, unsigned HOST_WIDE_INT count) { - machine_mode mode, array_mode; + machine_mode mode; + scalar_int_mode array_mode; bool limit_p; mode = TYPE_MODE (vectype); limit_p = !targetm.array_mode_supported_p (mode, count); - array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode), - MODE_INT, limit_p); - - if (array_mode == BLKmode) + if (!int_mode_for_size (count * GET_MODE_BITSIZE (mode), + limit_p).exists (&array_mode)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index 2415f9489d8..e88a58fd0b2 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -288,7 +288,6 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type, enum tree_code code) { tree result, compute_type; - machine_mode mode; int n_words = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD; location_t loc = gimple_location (gsi_stmt (*gsi)); @@ -312,7 +311,8 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type, else { /* Use a single scalar operation with a mode no wider than word_mode. */ - mode = mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), MODE_INT, 0); + scalar_int_mode mode + = int_mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), 0).require (); compute_type = lang_hooks.types.type_for_mode (mode, 1); result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code, type); warning_at (loc, OPT_Wvector_operation_performance, diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 48ee5ba98b9..3d5e6ada2e4 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -6040,7 +6040,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, supported. */ unsigned lsize = group_size * GET_MODE_BITSIZE (elmode); - elmode = mode_for_size (lsize, MODE_INT, 0); + elmode = int_mode_for_size (lsize, 0).require (); vmode = mode_for_vector (elmode, nunits / group_size); /* If we can't construct such a vector fall back to element extracts from the original vector type and @@ -7086,7 +7086,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, to a larger load. */ unsigned lsize = group_size * TYPE_PRECISION (TREE_TYPE (vectype)); - elmode = mode_for_size (lsize, MODE_INT, 0); + elmode = int_mode_for_size (lsize, 0).require (); vmode = mode_for_vector (elmode, nunits / group_size); /* If we can't construct such a vector fall back to element loads of the original vector type. */ diff --git a/gcc/tree.c b/gcc/tree.c index 946ad945e6a..cbb770f6693 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -12639,10 +12639,11 @@ vector_type_mode (const_tree t) /* For integers, try mapping it to a same-sized scalar mode. */ if (GET_MODE_CLASS (innermode) == MODE_INT) { - mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t) - * GET_MODE_BITSIZE (innermode), MODE_INT, 0); - - if (mode != VOIDmode && have_regs_of_mode[mode]) + unsigned int size = (TYPE_VECTOR_SUBPARTS (t) + * GET_MODE_BITSIZE (innermode)); + scalar_int_mode mode; + if (int_mode_for_size (size, 0).exists (&mode) + && have_regs_of_mode[mode]) return mode; } -- 2.30.2