From: Richard Sandiford Date: Wed, 30 Aug 2017 11:10:53 +0000 (+0000) Subject: [20/77] Replace MODE_INT checks with is_int_mode X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=b4206259f10455603e0c90825566de1ea777c04a;p=gcc.git [20/77] Replace MODE_INT checks with is_int_mode Replace checks of "GET_MODE_CLASS (...) == MODE_INT" with "is_int_mode (..., &var)", in cases where it becomes useful to refer to the mode as a scalar_int_mode. 2017-08-30 Richard Sandiford Alan Hayward David Sherwood gcc/ * machmode.h (is_int_mode): New fuction. * combine.c (find_split_point): Use it. (combine_simplify_rtx): Likewise. (simplify_if_then_else): Likewise. (simplify_set): Likewise. (simplify_shift_const_1): Likewise. (simplify_comparison): Likewise. * config/aarch64/aarch64.c (aarch64_rtx_costs): Likewise. * cse.c (notreg_cost): Likewise. (cse_insn): Likewise. * cselib.c (cselib_lookup_1): Likewise. * dojump.c (do_jump_1): Likewise. (do_compare_rtx_and_jump): Likewise. * dse.c (get_call_args): Likewise. * dwarf2out.c (rtl_for_decl_init): Likewise. (native_encode_initializer): Likewise. * expmed.c (emit_store_flag_1): Likewise. (emit_store_flag): Likewise. * expr.c (convert_modes): Likewise. (store_field): Likewise. (expand_expr_real_1): Likewise. * fold-const.c (fold_read_from_constant_string): Likewise. * gimple-ssa-sprintf.c (get_format_string): Likewise. * optabs-libfuncs.c (gen_int_libfunc): Likewise. * optabs.c (expand_binop): Likewise. (expand_unop): Likewise. (expand_abs_nojump): Likewise. (expand_one_cmpl_abs_nojump): Likewise. * simplify-rtx.c (mode_signbit_p): Likewise. (val_signbit_p): Likewise. (val_signbit_known_set_p): Likewise. (val_signbit_known_clear_p): Likewise. (simplify_relational_operation_1): Likewise. * tree.c (vector_type_mode): Likewise. gcc/go/ * go-lang.c (go_langhook_type_for_mode): Use is_int_mode. Co-Authored-By: Alan Hayward Co-Authored-By: David Sherwood From-SVN: r251472 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 15ec88d4eba..654ea7e3e1e 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,42 @@ +2017-08-30 Richard Sandiford + Alan Hayward + David Sherwood + + * machmode.h (is_int_mode): New fuction. + * combine.c (find_split_point): Use it. + (combine_simplify_rtx): Likewise. + (simplify_if_then_else): Likewise. + (simplify_set): Likewise. + (simplify_shift_const_1): Likewise. + (simplify_comparison): Likewise. + * config/aarch64/aarch64.c (aarch64_rtx_costs): Likewise. + * cse.c (notreg_cost): Likewise. + (cse_insn): Likewise. + * cselib.c (cselib_lookup_1): Likewise. + * dojump.c (do_jump_1): Likewise. + (do_compare_rtx_and_jump): Likewise. + * dse.c (get_call_args): Likewise. + * dwarf2out.c (rtl_for_decl_init): Likewise. + (native_encode_initializer): Likewise. + * expmed.c (emit_store_flag_1): Likewise. + (emit_store_flag): Likewise. + * expr.c (convert_modes): Likewise. + (store_field): Likewise. + (expand_expr_real_1): Likewise. + * fold-const.c (fold_read_from_constant_string): Likewise. + * gimple-ssa-sprintf.c (get_format_string): Likewise. + * optabs-libfuncs.c (gen_int_libfunc): Likewise. + * optabs.c (expand_binop): Likewise. + (expand_unop): Likewise. + (expand_abs_nojump): Likewise. + (expand_one_cmpl_abs_nojump): Likewise. + * simplify-rtx.c (mode_signbit_p): Likewise. + (val_signbit_p): Likewise. + (val_signbit_known_set_p): Likewise. + (val_signbit_known_clear_p): Likewise. + (simplify_relational_operation_1): Likewise. + * tree.c (vector_type_mode): Likewise. + 2017-08-30 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/combine.c b/gcc/combine.c index 44e378a160c..39c26feb24a 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -4791,6 +4791,7 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src) HOST_WIDE_INT pos = 0; int unsignedp = 0; rtx inner = NULL_RTX; + scalar_int_mode inner_mode; /* First special-case some codes. */ switch (code) @@ -5032,12 +5033,12 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src) /* We can't optimize if either mode is a partial integer mode as we don't know how many bits are significant in those modes. */ - if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT + if (!is_int_mode (GET_MODE (inner), &inner_mode) || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT) break; pos = 0; - len = GET_MODE_PRECISION (GET_MODE (inner)); + len = GET_MODE_PRECISION (inner_mode); unsignedp = 0; break; @@ -5560,6 +5561,7 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, { enum rtx_code code = GET_CODE (x); machine_mode mode = GET_MODE (x); + scalar_int_mode int_mode; rtx temp; int i; @@ -6070,47 +6072,51 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, ; else if (STORE_FLAG_VALUE == 1 - && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT - && op1 == const0_rtx - && mode == GET_MODE (op0) - && nonzero_bits (op0, mode) == 1) - return gen_lowpart (mode, + && new_code == NE + && is_int_mode (mode, &int_mode) + && op1 == const0_rtx + && int_mode == GET_MODE (op0) + && nonzero_bits (op0, int_mode) == 1) + return gen_lowpart (int_mode, expand_compound_operation (op0)); else if (STORE_FLAG_VALUE == 1 - && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT + && new_code == NE + && is_int_mode (mode, &int_mode) && op1 == const0_rtx - && mode == GET_MODE (op0) - && (num_sign_bit_copies (op0, mode) - == GET_MODE_PRECISION (mode))) + && int_mode == GET_MODE (op0) + && (num_sign_bit_copies (op0, int_mode) + == GET_MODE_PRECISION (int_mode))) { op0 = expand_compound_operation (op0); - return simplify_gen_unary (NEG, mode, - gen_lowpart (mode, op0), - mode); + return simplify_gen_unary (NEG, int_mode, + gen_lowpart (int_mode, op0), + int_mode); } else if (STORE_FLAG_VALUE == 1 - && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT + && new_code == EQ + && is_int_mode (mode, &int_mode) && op1 == const0_rtx - && mode == GET_MODE (op0) - && nonzero_bits (op0, mode) == 1) + && int_mode == GET_MODE (op0) + && nonzero_bits (op0, int_mode) == 1) { op0 = expand_compound_operation (op0); - return simplify_gen_binary (XOR, mode, - gen_lowpart (mode, op0), + return simplify_gen_binary (XOR, int_mode, + gen_lowpart (int_mode, op0), const1_rtx); } else if (STORE_FLAG_VALUE == 1 - && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT + && new_code == EQ + && is_int_mode (mode, &int_mode) && op1 == const0_rtx - && mode == GET_MODE (op0) - && (num_sign_bit_copies (op0, mode) - == GET_MODE_PRECISION (mode))) + && int_mode == GET_MODE (op0) + && (num_sign_bit_copies (op0, int_mode) + == GET_MODE_PRECISION (int_mode))) { op0 = expand_compound_operation (op0); - return plus_constant (mode, gen_lowpart (mode, op0), 1); + return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1); } /* If STORE_FLAG_VALUE is -1, we have cases similar to @@ -6119,48 +6125,51 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, ; else if (STORE_FLAG_VALUE == -1 - && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT + && new_code == NE + && is_int_mode (mode, &int_mode) && op1 == const0_rtx - && mode == GET_MODE (op0) - && (num_sign_bit_copies (op0, mode) - == GET_MODE_PRECISION (mode))) - return gen_lowpart (mode, - expand_compound_operation (op0)); + && int_mode == GET_MODE (op0) + && (num_sign_bit_copies (op0, int_mode) + == GET_MODE_PRECISION (int_mode))) + return gen_lowpart (int_mode, expand_compound_operation (op0)); else if (STORE_FLAG_VALUE == -1 - && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT + && new_code == NE + && is_int_mode (mode, &int_mode) && op1 == const0_rtx - && mode == GET_MODE (op0) - && nonzero_bits (op0, mode) == 1) + && int_mode == GET_MODE (op0) + && nonzero_bits (op0, int_mode) == 1) { op0 = expand_compound_operation (op0); - return simplify_gen_unary (NEG, mode, - gen_lowpart (mode, op0), - mode); + return simplify_gen_unary (NEG, int_mode, + gen_lowpart (int_mode, op0), + int_mode); } else if (STORE_FLAG_VALUE == -1 - && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT + && new_code == EQ + && is_int_mode (mode, &int_mode) && op1 == const0_rtx - && mode == GET_MODE (op0) - && (num_sign_bit_copies (op0, mode) - == GET_MODE_PRECISION (mode))) + && int_mode == GET_MODE (op0) + && (num_sign_bit_copies (op0, int_mode) + == GET_MODE_PRECISION (int_mode))) { op0 = expand_compound_operation (op0); - return simplify_gen_unary (NOT, mode, - gen_lowpart (mode, op0), - mode); + return simplify_gen_unary (NOT, int_mode, + gen_lowpart (int_mode, op0), + int_mode); } /* If X is 0/1, (eq X 0) is X-1. */ else if (STORE_FLAG_VALUE == -1 - && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT + && new_code == EQ + && is_int_mode (mode, &int_mode) && op1 == const0_rtx - && mode == GET_MODE (op0) - && nonzero_bits (op0, mode) == 1) + && int_mode == GET_MODE (op0) + && nonzero_bits (op0, int_mode) == 1) { op0 = expand_compound_operation (op0); - return plus_constant (mode, gen_lowpart (mode, op0), -1); + return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1); } /* If STORE_FLAG_VALUE says to just test the sign bit and X has just @@ -6168,16 +6177,17 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, (ashift x c) where C puts the bit in the sign bit. Remove any AND with STORE_FLAG_VALUE when we are done, since we are only going to test the sign bit. */ - if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT - && HWI_COMPUTABLE_MODE_P (mode) - && val_signbit_p (mode, STORE_FLAG_VALUE) + if (new_code == NE + && is_int_mode (mode, &int_mode) + && HWI_COMPUTABLE_MODE_P (int_mode) + && val_signbit_p (int_mode, STORE_FLAG_VALUE) && op1 == const0_rtx - && mode == GET_MODE (op0) - && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0) + && int_mode == GET_MODE (op0) + && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0) { - x = simplify_shift_const (NULL_RTX, ASHIFT, mode, + x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode, expand_compound_operation (op0), - GET_MODE_PRECISION (mode) - 1 - i); + GET_MODE_PRECISION (int_mode) - 1 - i); if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx) return XEXP (x, 0); else @@ -6263,6 +6273,7 @@ simplify_if_then_else (rtx x) int i; enum rtx_code false_code; rtx reversed; + scalar_int_mode int_mode; /* Simplify storing of the truth value. */ if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx) @@ -6443,7 +6454,7 @@ simplify_if_then_else (rtx x) if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && comparison_p - && GET_MODE_CLASS (mode) == MODE_INT + && is_int_mode (mode, &int_mode) && ! side_effects_p (x)) { rtx t = make_compound_operation (true_rtx, SET); @@ -6451,7 +6462,7 @@ simplify_if_then_else (rtx x) rtx cond_op0 = XEXP (cond, 0); rtx cond_op1 = XEXP (cond, 1); enum rtx_code op = UNKNOWN, extend_op = UNKNOWN; - machine_mode m = mode; + machine_mode m = int_mode; rtx z = 0, c1 = NULL_RTX; if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS @@ -6480,7 +6491,7 @@ simplify_if_then_else (rtx x) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) - (GET_MODE_PRECISION (mode) + (GET_MODE_PRECISION (int_mode) - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0)))))) { c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); @@ -6496,7 +6507,7 @@ simplify_if_then_else (rtx x) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) - (GET_MODE_PRECISION (mode) + (GET_MODE_PRECISION (int_mode) - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1)))))) { c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); @@ -6512,7 +6523,7 @@ simplify_if_then_else (rtx x) || GET_CODE (XEXP (t, 0)) == LSHIFTRT || GET_CODE (XEXP (t, 0)) == ASHIFTRT) && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG - && HWI_COMPUTABLE_MODE_P (mode) + && HWI_COMPUTABLE_MODE_P (int_mode) && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && ((nonzero_bits (f, GET_MODE (f)) @@ -6528,7 +6539,7 @@ simplify_if_then_else (rtx x) || GET_CODE (XEXP (t, 0)) == IOR || GET_CODE (XEXP (t, 0)) == XOR) && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG - && HWI_COMPUTABLE_MODE_P (mode) + && HWI_COMPUTABLE_MODE_P (int_mode) && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && ((nonzero_bits (f, GET_MODE (f)) @@ -6552,7 +6563,7 @@ simplify_if_then_else (rtx x) temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp); if (extend_op != UNKNOWN) - temp = simplify_gen_unary (extend_op, mode, temp, m); + temp = simplify_gen_unary (extend_op, int_mode, temp, m); return temp; } @@ -6605,6 +6616,7 @@ simplify_set (rtx x) = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest); rtx_insn *other_insn; rtx *cc_use; + scalar_int_mode int_mode; /* (set (pc) (return)) gets written as (return). */ if (GET_CODE (dest) == PC && ANY_RETURN_P (src)) @@ -6871,15 +6883,14 @@ simplify_set (rtx x) if (GET_CODE (dest) != PC && GET_CODE (src) == IF_THEN_ELSE - && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT + && is_int_mode (GET_MODE (src), &int_mode) && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE) && XEXP (XEXP (src, 0), 1) == const0_rtx - && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0)) + && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0)) && (!HAVE_conditional_move - || ! can_conditionally_move_p (GET_MODE (src))) - && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), - GET_MODE (XEXP (XEXP (src, 0), 0))) - == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0)))) + || ! can_conditionally_move_p (int_mode)) + && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode) + == GET_MODE_PRECISION (int_mode)) && ! side_effects_p (src)) { rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE @@ -6901,17 +6912,17 @@ simplify_set (rtx x) && rtx_equal_p (XEXP (false_rtx, 1), true_rtx)) term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx; - term2 = simplify_gen_binary (AND, GET_MODE (src), + term2 = simplify_gen_binary (AND, int_mode, XEXP (XEXP (src, 0), 0), true_rtx); - term3 = simplify_gen_binary (AND, GET_MODE (src), - simplify_gen_unary (NOT, GET_MODE (src), + term3 = simplify_gen_binary (AND, int_mode, + simplify_gen_unary (NOT, int_mode, XEXP (XEXP (src, 0), 0), - GET_MODE (src)), + int_mode), false_rtx); SUBST (SET_SRC (x), - simplify_gen_binary (IOR, GET_MODE (src), - simplify_gen_binary (IOR, GET_MODE (src), + simplify_gen_binary (IOR, int_mode, + simplify_gen_binary (IOR, int_mode, term1, term2), term3)); @@ -10296,7 +10307,8 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, rtx orig_varop = varop; int count; machine_mode mode = result_mode; - machine_mode shift_mode, tmode; + machine_mode shift_mode; + scalar_int_mode tmode, inner_mode; unsigned int mode_words = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD; /* We form (outer_op (code varop count) (outer_const)). */ @@ -10464,17 +10476,16 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, the same number of words as what we've seen so far. Then store the widest mode in MODE. */ if (subreg_lowpart_p (varop) - && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop))) - > GET_MODE_SIZE (GET_MODE (varop))) - && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop))) + && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode) + && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (GET_MODE (varop)) + && (unsigned int) ((GET_MODE_SIZE (inner_mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == mode_words - && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT - && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT) + && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT) { varop = SUBREG_REG (varop); - if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode)) - mode = GET_MODE (varop); + if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (mode)) + mode = inner_mode; continue; } break; @@ -11717,7 +11728,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) rtx op1 = *pop1; rtx tem, tem1; int i; - machine_mode mode, tmode; + scalar_int_mode mode, inner_mode; + machine_mode tmode; /* Try a few ways of applying the same transformation to both operands. */ while (1) @@ -12128,19 +12140,17 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) ; else if (subreg_lowpart_p (op0) && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT - && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT + && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode) && (code == NE || code == EQ) - && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) - <= HOST_BITS_PER_WIDE_INT) + && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT && !paradoxical_subreg_p (op0) - && (nonzero_bits (SUBREG_REG (op0), - GET_MODE (SUBREG_REG (op0))) + && (nonzero_bits (SUBREG_REG (op0), inner_mode) & ~GET_MODE_MASK (GET_MODE (op0))) == 0) { /* Remove outer subregs that don't do anything. */ - tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1); + tem = gen_lowpart (inner_mode, op1); - if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0))) + if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (GET_MODE (op0))) == 0) { op0 = SUBREG_REG (op0); @@ -12658,8 +12668,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) op1 = make_compound_operation (op1, SET); if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0) - && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT - && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT + && is_int_mode (GET_MODE (op0), &mode) + && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode) && (code == NE || code == EQ)) { if (paradoxical_subreg_p (op0)) @@ -12669,19 +12679,16 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) if (REG_P (SUBREG_REG (op0))) { op0 = SUBREG_REG (op0); - op1 = gen_lowpart (GET_MODE (op0), op1); + op1 = gen_lowpart (inner_mode, op1); } } - else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) - <= HOST_BITS_PER_WIDE_INT) - && (nonzero_bits (SUBREG_REG (op0), - GET_MODE (SUBREG_REG (op0))) - & ~GET_MODE_MASK (GET_MODE (op0))) == 0) + else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT + && (nonzero_bits (SUBREG_REG (op0), inner_mode) + & ~GET_MODE_MASK (mode)) == 0) { - tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1); + tem = gen_lowpart (inner_mode, op1); - if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0))) - & ~GET_MODE_MASK (GET_MODE (op0))) == 0) + if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0) op0 = SUBREG_REG (op0), op1 = tem; } } @@ -12692,8 +12699,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) mode for which we can do the compare. There are a number of cases in which we can use the wider mode. */ - mode = GET_MODE (op0); - if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT + if (is_int_mode (GET_MODE (op0), &mode) && GET_MODE_SIZE (mode) < UNITS_PER_WORD && ! have_insn_for (COMPARE, mode)) FOR_EACH_WIDER_MODE (tmode, mode) diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 54c30a2952a..7ca12db76e4 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -6844,6 +6844,7 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED, const struct cpu_cost_table *extra_cost = aarch64_tune_params.insn_extra_cost; int code = GET_CODE (x); + scalar_int_mode int_mode; /* By default, assume that everything has equivalent cost to the cheapest instruction. Any additional costs are applied as a delta @@ -7426,28 +7427,29 @@ cost_plus: return true; } - if (GET_MODE_CLASS (mode) == MODE_INT) + if (is_int_mode (mode, &int_mode)) { if (CONST_INT_P (op1)) { /* We have a mask + shift version of a UBFIZ i.e. the *andim_ashift_bfiz pattern. */ if (GET_CODE (op0) == ASHIFT - && aarch64_mask_and_shift_for_ubfiz_p (mode, op1, - XEXP (op0, 1))) + && aarch64_mask_and_shift_for_ubfiz_p (int_mode, op1, + XEXP (op0, 1))) { - *cost += rtx_cost (XEXP (op0, 0), mode, + *cost += rtx_cost (XEXP (op0, 0), int_mode, (enum rtx_code) code, 0, speed); if (speed) *cost += extra_cost->alu.bfx; return true; } - else if (aarch64_bitmask_imm (INTVAL (op1), mode)) + else if (aarch64_bitmask_imm (INTVAL (op1), int_mode)) { /* We possibly get the immediate for free, this is not modelled. */ - *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed); + *cost += rtx_cost (op0, int_mode, + (enum rtx_code) code, 0, speed); if (speed) *cost += extra_cost->alu.logical; @@ -7482,8 +7484,10 @@ cost_plus: } /* In both cases we want to cost both operands. */ - *cost += rtx_cost (new_op0, mode, (enum rtx_code) code, 0, speed); - *cost += rtx_cost (op1, mode, (enum rtx_code) code, 1, speed); + *cost += rtx_cost (new_op0, int_mode, (enum rtx_code) code, + 0, speed); + *cost += rtx_cost (op1, int_mode, (enum rtx_code) code, + 1, speed); return true; } diff --git a/gcc/cse.c b/gcc/cse.c index 58572e6a093..8ef90639753 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -720,13 +720,14 @@ preferable (int cost_a, int regcost_a, int cost_b, int regcost_b) static int notreg_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno) { + scalar_int_mode int_mode, inner_mode; return ((GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)) - && GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT - && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) + && is_int_mode (mode, &int_mode) + && is_int_mode (GET_MODE (SUBREG_REG (x)), &inner_mode) + && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode) && subreg_lowpart_p (x) - && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (SUBREG_REG (x)))) + && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, inner_mode)) ? 0 : rtx_cost (x, mode, outer, opno, optimize_this_for_speed_p) * 2); } @@ -4603,6 +4604,7 @@ cse_insn (rtx_insn *insn) /* Set nonzero if we need to call force_const_mem on with the contents of src_folded before using it. */ int src_folded_force_flag = 0; + scalar_int_mode int_mode; dest = SET_DEST (sets[i].rtl); src = SET_SRC (sets[i].rtl); @@ -4840,13 +4842,13 @@ cse_insn (rtx_insn *insn) wider mode. */ if (src_const && src_related == 0 && CONST_INT_P (src_const) - && GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_PRECISION (mode) < BITS_PER_WORD) + && is_int_mode (mode, &int_mode) + && GET_MODE_PRECISION (int_mode) < BITS_PER_WORD) { - machine_mode wider_mode; - - FOR_EACH_WIDER_MODE (wider_mode, mode) + opt_scalar_int_mode wider_mode_iter; + FOR_EACH_WIDER_MODE (wider_mode_iter, int_mode) { + scalar_int_mode wider_mode = wider_mode_iter.require (); if (GET_MODE_PRECISION (wider_mode) > BITS_PER_WORD) break; @@ -4860,7 +4862,7 @@ cse_insn (rtx_insn *insn) const_elt; const_elt = const_elt->next_same_value) if (REG_P (const_elt->exp)) { - src_related = gen_lowpart (mode, const_elt->exp); + src_related = gen_lowpart (int_mode, const_elt->exp); break; } diff --git a/gcc/cselib.c b/gcc/cselib.c index 74c25ac1f97..41e0e4fbb34 100644 --- a/gcc/cselib.c +++ b/gcc/cselib.c @@ -2009,6 +2009,8 @@ cselib_lookup_1 (rtx x, machine_mode mode, e = new_cselib_val (next_uid, GET_MODE (x), x); new_elt_loc_list (e, x); + + scalar_int_mode int_mode; if (REG_VALUES (i) == 0) { /* Maintain the invariant that the first entry of @@ -2018,27 +2020,27 @@ cselib_lookup_1 (rtx x, machine_mode mode, REG_VALUES (i) = new_elt_list (REG_VALUES (i), NULL); } else if (cselib_preserve_constants - && GET_MODE_CLASS (mode) == MODE_INT) + && is_int_mode (mode, &int_mode)) { /* During var-tracking, try harder to find equivalences for SUBREGs. If a setter sets say a DImode register and user uses that register only in SImode, add a lowpart subreg location. */ struct elt_list *lwider = NULL; + scalar_int_mode lmode; l = REG_VALUES (i); if (l && l->elt == NULL) l = l->next; for (; l; l = l->next) - if (GET_MODE_CLASS (GET_MODE (l->elt->val_rtx)) == MODE_INT - && GET_MODE_SIZE (GET_MODE (l->elt->val_rtx)) - > GET_MODE_SIZE (mode) + if (is_int_mode (GET_MODE (l->elt->val_rtx), &lmode) + && GET_MODE_SIZE (lmode) > GET_MODE_SIZE (int_mode) && (lwider == NULL - || GET_MODE_SIZE (GET_MODE (l->elt->val_rtx)) + || GET_MODE_SIZE (lmode) < GET_MODE_SIZE (GET_MODE (lwider->elt->val_rtx)))) { struct elt_loc_list *el; if (i < FIRST_PSEUDO_REGISTER - && hard_regno_nregs[i][GET_MODE (l->elt->val_rtx)] != 1) + && hard_regno_nregs[i][lmode] != 1) continue; for (el = l->elt->locs; el; el = el->next) if (!REG_P (el->loc)) @@ -2048,7 +2050,7 @@ cselib_lookup_1 (rtx x, machine_mode mode, } if (lwider) { - rtx sub = lowpart_subreg (mode, lwider->elt->val_rtx, + rtx sub = lowpart_subreg (int_mode, lwider->elt->val_rtx, GET_MODE (lwider->elt->val_rtx)); if (sub) new_elt_loc_list (e, sub); diff --git a/gcc/dojump.c b/gcc/dojump.c index 34492f32e67..b0842535051 100644 --- a/gcc/dojump.c +++ b/gcc/dojump.c @@ -203,6 +203,7 @@ do_jump_1 (enum tree_code code, tree op0, tree op1, { machine_mode mode; rtx_code_label *drop_through_label = 0; + scalar_int_mode int_mode; switch (code) { @@ -218,8 +219,8 @@ do_jump_1 (enum tree_code code, tree op0, tree op1, if (integer_zerop (op1)) do_jump (op0, if_true_label, if_false_label, prob.invert ()); - else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT - && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump)) + else if (is_int_mode (TYPE_MODE (inner_type), &int_mode) + && !can_compare_p (EQ, int_mode, ccp_jump)) do_jump_by_parts_equality (op0, op1, if_false_label, if_true_label, prob); else @@ -239,8 +240,8 @@ do_jump_1 (enum tree_code code, tree op0, tree op1, if (integer_zerop (op1)) do_jump (op0, if_false_label, if_true_label, prob); - else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT - && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump)) + else if (is_int_mode (TYPE_MODE (inner_type), &int_mode) + && !can_compare_p (NE, int_mode, ccp_jump)) do_jump_by_parts_equality (op0, op1, if_true_label, if_false_label, prob.invert ()); else @@ -251,10 +252,10 @@ do_jump_1 (enum tree_code code, tree op0, tree op1, case LT_EXPR: mode = TYPE_MODE (TREE_TYPE (op0)); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (LT, mode, ccp_jump)) - do_jump_by_parts_greater (op0, op1, 1, if_false_label, if_true_label, - prob); + if (is_int_mode (mode, &int_mode) + && ! can_compare_p (LT, int_mode, ccp_jump)) + do_jump_by_parts_greater (op0, op1, 1, if_false_label, + if_true_label, prob); else do_compare_and_jump (op0, op1, LT, LTU, if_false_label, if_true_label, prob); @@ -262,8 +263,8 @@ do_jump_1 (enum tree_code code, tree op0, tree op1, case LE_EXPR: mode = TYPE_MODE (TREE_TYPE (op0)); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (LE, mode, ccp_jump)) + if (is_int_mode (mode, &int_mode) + && ! can_compare_p (LE, int_mode, ccp_jump)) do_jump_by_parts_greater (op0, op1, 0, if_true_label, if_false_label, prob.invert ()); else @@ -273,10 +274,10 @@ do_jump_1 (enum tree_code code, tree op0, tree op1, case GT_EXPR: mode = TYPE_MODE (TREE_TYPE (op0)); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (GT, mode, ccp_jump)) - do_jump_by_parts_greater (op0, op1, 0, if_false_label, if_true_label, - prob); + if (is_int_mode (mode, &int_mode) + && ! can_compare_p (GT, int_mode, ccp_jump)) + do_jump_by_parts_greater (op0, op1, 0, if_false_label, + if_true_label, prob); else do_compare_and_jump (op0, op1, GT, GTU, if_false_label, if_true_label, prob); @@ -284,8 +285,8 @@ do_jump_1 (enum tree_code code, tree op0, tree op1, case GE_EXPR: mode = TYPE_MODE (TREE_TYPE (op0)); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (GE, mode, ccp_jump)) + if (is_int_mode (mode, &int_mode) + && ! can_compare_p (GE, int_mode, ccp_jump)) do_jump_by_parts_greater (op0, op1, 1, if_true_label, if_false_label, prob.invert ()); else @@ -1024,62 +1025,63 @@ do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp, if (! if_true_label) dummy_label = if_true_label = gen_label_rtx (); - if (GET_MODE_CLASS (mode) == MODE_INT - && ! can_compare_p (code, mode, ccp_jump)) + scalar_int_mode int_mode; + if (is_int_mode (mode, &int_mode) + && ! can_compare_p (code, int_mode, ccp_jump)) { switch (code) { case LTU: - do_jump_by_parts_greater_rtx (mode, 1, op1, op0, + do_jump_by_parts_greater_rtx (int_mode, 1, op1, op0, if_false_label, if_true_label, prob); break; case LEU: - do_jump_by_parts_greater_rtx (mode, 1, op0, op1, + do_jump_by_parts_greater_rtx (int_mode, 1, op0, op1, if_true_label, if_false_label, prob.invert ()); break; case GTU: - do_jump_by_parts_greater_rtx (mode, 1, op0, op1, + do_jump_by_parts_greater_rtx (int_mode, 1, op0, op1, if_false_label, if_true_label, prob); break; case GEU: - do_jump_by_parts_greater_rtx (mode, 1, op1, op0, + do_jump_by_parts_greater_rtx (int_mode, 1, op1, op0, if_true_label, if_false_label, prob.invert ()); break; case LT: - do_jump_by_parts_greater_rtx (mode, 0, op1, op0, + do_jump_by_parts_greater_rtx (int_mode, 0, op1, op0, if_false_label, if_true_label, prob); break; case LE: - do_jump_by_parts_greater_rtx (mode, 0, op0, op1, + do_jump_by_parts_greater_rtx (int_mode, 0, op0, op1, if_true_label, if_false_label, prob.invert ()); break; case GT: - do_jump_by_parts_greater_rtx (mode, 0, op0, op1, + do_jump_by_parts_greater_rtx (int_mode, 0, op0, op1, if_false_label, if_true_label, prob); break; case GE: - do_jump_by_parts_greater_rtx (mode, 0, op1, op0, + do_jump_by_parts_greater_rtx (int_mode, 0, op1, op0, if_true_label, if_false_label, prob.invert ()); break; case EQ: - do_jump_by_parts_equality_rtx (mode, op0, op1, if_false_label, + do_jump_by_parts_equality_rtx (int_mode, op0, op1, if_false_label, if_true_label, prob); break; case NE: - do_jump_by_parts_equality_rtx (mode, op0, op1, if_true_label, + do_jump_by_parts_equality_rtx (int_mode, op0, op1, if_true_label, if_false_label, prob.invert ()); break; diff --git a/gcc/dse.c b/gcc/dse.c index e6643321887..65b4b868aec 100644 --- a/gcc/dse.c +++ b/gcc/dse.c @@ -2185,11 +2185,14 @@ get_call_args (rtx call_insn, tree fn, rtx *args, int nargs) arg != void_list_node && idx < nargs; arg = TREE_CHAIN (arg), idx++) { - machine_mode mode = TYPE_MODE (TREE_VALUE (arg)); + scalar_int_mode mode; rtx reg, link, tmp; + + if (!is_int_mode (TYPE_MODE (TREE_VALUE (arg)), &mode)) + return false; + reg = targetm.calls.function_arg (args_so_far, mode, NULL_TREE, true); - if (!reg || !REG_P (reg) || GET_MODE (reg) != mode - || GET_MODE_CLASS (mode) != MODE_INT) + if (!reg || !REG_P (reg) || GET_MODE (reg) != mode) return false; for (link = CALL_INSN_FUNCTION_USAGE (call_insn); @@ -2197,15 +2200,14 @@ get_call_args (rtx call_insn, tree fn, rtx *args, int nargs) link = XEXP (link, 1)) if (GET_CODE (XEXP (link, 0)) == USE) { + scalar_int_mode arg_mode; args[idx] = XEXP (XEXP (link, 0), 0); if (REG_P (args[idx]) && REGNO (args[idx]) == REGNO (reg) && (GET_MODE (args[idx]) == mode - || (GET_MODE_CLASS (GET_MODE (args[idx])) == MODE_INT - && (GET_MODE_SIZE (GET_MODE (args[idx])) - <= UNITS_PER_WORD) - && (GET_MODE_SIZE (GET_MODE (args[idx])) - > GET_MODE_SIZE (mode))))) + || (is_int_mode (GET_MODE (args[idx]), &arg_mode) + && (GET_MODE_SIZE (arg_mode) <= UNITS_PER_WORD) + && (GET_MODE_SIZE (arg_mode) > GET_MODE_SIZE (mode))))) break; } if (!link) diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index fdee0096dbb..f974f180d4f 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -18840,9 +18840,10 @@ rtl_for_decl_init (tree init, tree type) { tree enttype = TREE_TYPE (type); tree domain = TYPE_DOMAIN (type); - machine_mode mode = TYPE_MODE (enttype); + scalar_int_mode mode; - if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) == 1 + if (is_int_mode (TYPE_MODE (enttype), &mode) + && GET_MODE_SIZE (mode) == 1 && domain && integer_zerop (TYPE_MIN_VALUE (domain)) && compare_tree_int (TYPE_MAX_VALUE (domain), @@ -19323,9 +19324,10 @@ native_encode_initializer (tree init, unsigned char *array, int size) if (TREE_CODE (type) == ARRAY_TYPE) { tree enttype = TREE_TYPE (type); - machine_mode mode = TYPE_MODE (enttype); + scalar_int_mode mode; - if (GET_MODE_CLASS (mode) != MODE_INT || GET_MODE_SIZE (mode) != 1) + if (!is_int_mode (TYPE_MODE (enttype), &mode) + || GET_MODE_SIZE (mode) != 1) return false; if (int_size_in_bytes (type) != size) return false; diff --git a/gcc/expmed.c b/gcc/expmed.c index f7ac82145de..815c766cec0 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -5446,8 +5446,9 @@ emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1, /* If we are comparing a double-word integer with zero or -1, we can convert the comparison into one involving a single word. */ - if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2 - && GET_MODE_CLASS (mode) == MODE_INT + scalar_int_mode int_mode; + if (is_int_mode (mode, &int_mode) + && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0))) { rtx tem; @@ -5458,8 +5459,8 @@ emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1, /* Do a logical OR or AND of the two words and compare the result. */ - op00 = simplify_gen_subreg (word_mode, op0, mode, 0); - op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD); + op00 = simplify_gen_subreg (word_mode, op0, int_mode, 0); + op01 = simplify_gen_subreg (word_mode, op0, int_mode, UNITS_PER_WORD); tem = expand_binop (word_mode, op1 == const0_rtx ? ior_optab : and_optab, op00, op01, NULL_RTX, unsignedp, @@ -5474,9 +5475,9 @@ emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1, rtx op0h; /* If testing the sign bit, can just test on high word. */ - op0h = simplify_gen_subreg (word_mode, op0, mode, + op0h = simplify_gen_subreg (word_mode, op0, int_mode, subreg_highpart_offset (word_mode, - mode)); + int_mode)); tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode, unsignedp, normalizep); } @@ -5501,21 +5502,21 @@ emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1, /* If this is A < 0 or A >= 0, we can do this by taking the ones complement of A (for GE) and shifting the sign bit to the low bit. */ if (op1 == const0_rtx && (code == LT || code == GE) - && GET_MODE_CLASS (mode) == MODE_INT + && is_int_mode (mode, &int_mode) && (normalizep || STORE_FLAG_VALUE == 1 - || val_signbit_p (mode, STORE_FLAG_VALUE))) + || val_signbit_p (int_mode, STORE_FLAG_VALUE))) { subtarget = target; if (!target) - target_mode = mode; + target_mode = int_mode; /* If the result is to be wider than OP0, it is best to convert it first. If it is to be narrower, it is *incorrect* to convert it first. */ - else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode)) + else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (int_mode)) { - op0 = convert_modes (target_mode, mode, op0, 0); + op0 = convert_modes (target_mode, int_mode, op0, 0); mode = target_mode; } @@ -5925,8 +5926,9 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, /* The remaining tricks only apply to integer comparisons. */ - if (GET_MODE_CLASS (mode) == MODE_INT) - return emit_store_flag_int (target, subtarget, code, op0, op1, mode, + scalar_int_mode int_mode; + if (is_int_mode (mode, &int_mode)) + return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode, unsignedp, normalizep, trueval); return 0; diff --git a/gcc/expr.c b/gcc/expr.c index 776c7190495..225b8c2925e 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -630,6 +630,7 @@ rtx convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp) { rtx temp; + scalar_int_mode int_mode; /* If FROM is a SUBREG that indicates that we have already done at least the required extension, strip it. */ @@ -645,7 +646,8 @@ convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp) if (mode == oldmode) return x; - if (CONST_SCALAR_INT_P (x) && GET_MODE_CLASS (mode) == MODE_INT) + if (CONST_SCALAR_INT_P (x) + && is_int_mode (mode, &int_mode)) { /* If the caller did not tell us the old mode, then there is not much to do with respect to canonicalization. We have to @@ -653,24 +655,24 @@ convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp) if (GET_MODE_CLASS (oldmode) != MODE_INT) oldmode = MAX_MODE_INT; wide_int w = wide_int::from (rtx_mode_t (x, oldmode), - GET_MODE_PRECISION (mode), + GET_MODE_PRECISION (int_mode), unsignedp ? UNSIGNED : SIGNED); - return immed_wide_int_const (w, mode); + return immed_wide_int_const (w, int_mode); } /* We can do this with a gen_lowpart if both desired and current modes are integer, and this is either a constant integer, a register, or a non-volatile MEM. */ - if (GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_CLASS (oldmode) == MODE_INT - && GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode) - && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) mode]) + scalar_int_mode int_oldmode; + if (is_int_mode (mode, &int_mode) + && is_int_mode (oldmode, &int_oldmode) + && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode) + && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode]) || (REG_P (x) && (!HARD_REGISTER_P (x) - || HARD_REGNO_MODE_OK (REGNO (x), mode)) - && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))))) - - return gen_lowpart (mode, x); + || HARD_REGNO_MODE_OK (REGNO (x), int_mode)) + && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x))))) + return gen_lowpart (int_mode, x); /* Converting from integer constant into mode is always equivalent to an subreg operation. */ @@ -6872,20 +6874,21 @@ store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, case, if it has reverse storage order, it needs to be accessed as a scalar field with reverse storage order and we must first put the value into target order. */ + scalar_int_mode temp_mode; if (AGGREGATE_TYPE_P (TREE_TYPE (exp)) - && GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT) + && is_int_mode (GET_MODE (temp), &temp_mode)) { - HOST_WIDE_INT size = GET_MODE_BITSIZE (GET_MODE (temp)); + HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode); reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp)); if (reverse) - temp = flip_storage_order (GET_MODE (temp), temp); + temp = flip_storage_order (temp_mode, temp); if (bitsize < size && reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN && !(mode == BLKmode && bitsize > BITS_PER_WORD)) - temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp, + temp = expand_shift (RSHIFT_EXPR, temp_mode, temp, size - bitsize, NULL_RTX, 1); } @@ -9959,13 +9962,15 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode, || GET_MODE_CLASS (mode) == MODE_VECTOR_ACCUM || GET_MODE_CLASS (mode) == MODE_VECTOR_UACCUM) return const_vector_from_tree (exp); - if (GET_MODE_CLASS (mode) == MODE_INT) + scalar_int_mode int_mode; + if (is_int_mode (mode, &int_mode)) { if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp))) return const_scalar_mask_from_tree (exp); else { - tree type_for_mode = lang_hooks.types.type_for_mode (mode, 1); + tree type_for_mode + = lang_hooks.types.type_for_mode (int_mode, 1); if (type_for_mode) tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR, type_for_mode, exp); @@ -10360,9 +10365,9 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode, && compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0) { tree type = TREE_TYPE (TREE_TYPE (init)); - machine_mode mode = TYPE_MODE (type); + scalar_int_mode mode; - if (GET_MODE_CLASS (mode) == MODE_INT + if (is_int_mode (TYPE_MODE (type), &mode) && GET_MODE_SIZE (mode) == 1) return gen_int_mode (TREE_STRING_POINTER (init) [TREE_INT_CST_LOW (index1)], @@ -10380,6 +10385,7 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode, { unsigned HOST_WIDE_INT idx; tree field, value; + scalar_int_mode field_mode; FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0), idx, field, value) @@ -10392,8 +10398,8 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode, the bitfield does not meet either of those conditions, we can't do this optimization. */ && (! DECL_BIT_FIELD (field) - || ((GET_MODE_CLASS (DECL_MODE (field)) == MODE_INT) - && (GET_MODE_PRECISION (DECL_MODE (field)) + || (is_int_mode (DECL_MODE (field), &field_mode) + && (GET_MODE_PRECISION (field_mode) <= HOST_BITS_PER_WIDE_INT)))) { if (DECL_BIT_FIELD (field) @@ -10727,18 +10733,19 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode, and this is for big-endian data, we must put the field into the high-order bits. And we must also put it back into memory order if it has been previously reversed. */ + scalar_int_mode op0_mode; if (TREE_CODE (type) == RECORD_TYPE - && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT) + && is_int_mode (GET_MODE (op0), &op0_mode)) { - HOST_WIDE_INT size = GET_MODE_BITSIZE (GET_MODE (op0)); + HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode); if (bitsize < size && reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN) - op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0, + op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0, size - bitsize, op0, 1); if (reversep) - op0 = flip_storage_order (GET_MODE (op0), op0); + op0 = flip_storage_order (op0_mode, op0); } /* If the result type is BLKmode, store the data into a temporary diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 27f4f4eb59f..b9f2a39ee85 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -13652,14 +13652,15 @@ fold_read_from_constant_string (tree exp) string = exp1; } + scalar_int_mode char_mode; if (string && TYPE_MODE (TREE_TYPE (exp)) == TYPE_MODE (TREE_TYPE (TREE_TYPE (string))) && TREE_CODE (string) == STRING_CST && TREE_CODE (index) == INTEGER_CST && compare_tree_int (index, TREE_STRING_LENGTH (string)) < 0 - && (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) - == MODE_INT) - && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) == 1)) + && is_int_mode (TYPE_MODE (TREE_TYPE (TREE_TYPE (string))), + &char_mode) + && GET_MODE_SIZE (char_mode) == 1) return build_int_cst_type (TREE_TYPE (exp), (TREE_STRING_POINTER (string) [TREE_INT_CST_LOW (index)])); diff --git a/gcc/gimple-ssa-sprintf.c b/gcc/gimple-ssa-sprintf.c index 519e9966080..7899e09195f 100644 --- a/gcc/gimple-ssa-sprintf.c +++ b/gcc/gimple-ssa-sprintf.c @@ -528,8 +528,9 @@ get_format_string (tree format, location_t *ploc) tree type = TREE_TYPE (format); - if (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (type))) != MODE_INT - || GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type))) != 1) + scalar_int_mode char_mode; + if (!is_int_mode (TYPE_MODE (TREE_TYPE (type)), &char_mode) + || GET_MODE_SIZE (char_mode) != 1) { /* Wide format string. */ return NULL; diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog index f65a1d47ce0..ea47870590d 100644 --- a/gcc/go/ChangeLog +++ b/gcc/go/ChangeLog @@ -1,3 +1,9 @@ +2017-08-30 Richard Sandiford + Alan Hayward + David Sherwood + + * go-lang.c (go_langhook_type_for_mode): Use is_int_mode. + 2017-08-30 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/go/go-lang.c b/gcc/go/go-lang.c index 614d8902953..d470d5ab20a 100644 --- a/gcc/go/go-lang.c +++ b/gcc/go/go-lang.c @@ -382,10 +382,11 @@ go_langhook_type_for_mode (machine_mode mode, int unsignedp) return NULL_TREE; } + scalar_int_mode imode; scalar_float_mode fmode; enum mode_class mc = GET_MODE_CLASS (mode); - if (mc == MODE_INT) - return go_langhook_type_for_size (GET_MODE_BITSIZE (mode), unsignedp); + if (is_int_mode (mode, &imode)) + return go_langhook_type_for_size (GET_MODE_BITSIZE (imode), unsignedp); else if (is_float_mode (mode, &fmode)) { switch (GET_MODE_BITSIZE (fmode)) diff --git a/gcc/machmode.h b/gcc/machmode.h index 6fd10b42a8f..497b0fbde54 100644 --- a/gcc/machmode.h +++ b/gcc/machmode.h @@ -695,6 +695,21 @@ struct int_n_data_t { extern bool int_n_enabled_p[NUM_INT_N_ENTS]; extern const int_n_data_t int_n_data[NUM_INT_N_ENTS]; +/* Return true if MODE has class MODE_INT, storing it as a scalar_int_mode + in *INT_MODE if so. */ + +template +inline bool +is_int_mode (machine_mode mode, T *int_mode) +{ + if (GET_MODE_CLASS (mode) == MODE_INT) + { + *int_mode = scalar_int_mode (scalar_int_mode::from_int (mode)); + return true; + } + return false; +} + /* Return true if MODE has class MODE_FLOAT, storing it as a scalar_float_mode in *FLOAT_MODE if so. */ diff --git a/gcc/optabs-libfuncs.c b/gcc/optabs-libfuncs.c index 151a473a66b..5ad3c6d86f5 100644 --- a/gcc/optabs-libfuncs.c +++ b/gcc/optabs-libfuncs.c @@ -189,8 +189,9 @@ gen_int_libfunc (optab optable, const char *opname, char suffix, { int maxsize = 2 * BITS_PER_WORD; int minsize = BITS_PER_WORD; + scalar_int_mode int_mode; - if (GET_MODE_CLASS (mode) != MODE_INT) + if (!is_int_mode (mode, &int_mode)) return; if (maxsize < LONG_LONG_TYPE_SIZE) maxsize = LONG_LONG_TYPE_SIZE; @@ -198,10 +199,10 @@ gen_int_libfunc (optab optable, const char *opname, char suffix, && (trapv_binoptab_p (optable) || trapv_unoptab_p (optable))) minsize = INT_TYPE_SIZE; - if (GET_MODE_BITSIZE (mode) < minsize - || GET_MODE_BITSIZE (mode) > maxsize) + if (GET_MODE_BITSIZE (int_mode) < minsize + || GET_MODE_BITSIZE (int_mode) > maxsize) return; - gen_libfunc (optable, opname, suffix, mode); + gen_libfunc (optable, opname, suffix, int_mode); } /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */ diff --git a/gcc/optabs.c b/gcc/optabs.c index ee3b4e9cfaa..65a098eb90e 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -1113,6 +1113,7 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, ? OPTAB_WIDEN : methods); enum mode_class mclass; machine_mode wider_mode; + scalar_int_mode int_mode; rtx libfunc; rtx temp; rtx_insn *entry_last = get_last_insn (); @@ -1161,22 +1162,22 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, && optab_handler (rotr_optab, mode) != CODE_FOR_nothing) || (binoptab == rotr_optab && optab_handler (rotl_optab, mode) != CODE_FOR_nothing)) - && mclass == MODE_INT) + && is_int_mode (mode, &int_mode)) { optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab); rtx newop1; - unsigned int bits = GET_MODE_PRECISION (mode); + unsigned int bits = GET_MODE_PRECISION (int_mode); if (CONST_INT_P (op1)) newop1 = GEN_INT (bits - INTVAL (op1)); - else if (targetm.shift_truncation_mask (mode) == bits - 1) + else if (targetm.shift_truncation_mask (int_mode) == bits - 1) newop1 = negate_rtx (GET_MODE (op1), op1); else newop1 = expand_binop (GET_MODE (op1), sub_optab, gen_int_mode (bits, GET_MODE (op1)), op1, NULL_RTX, unsignedp, OPTAB_DIRECT); - temp = expand_binop_directly (mode, otheroptab, op0, newop1, + temp = expand_binop_directly (int_mode, otheroptab, op0, newop1, target, unsignedp, methods, last); if (temp) return temp; @@ -1320,8 +1321,8 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, /* These can be done a word at a time. */ if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab) - && mclass == MODE_INT - && GET_MODE_SIZE (mode) > UNITS_PER_WORD + && is_int_mode (mode, &int_mode) + && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD && optab_handler (binoptab, word_mode) != CODE_FOR_nothing) { int i; @@ -1333,17 +1334,17 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, || target == op0 || target == op1 || !valid_multiword_target_p (target)) - target = gen_reg_rtx (mode); + target = gen_reg_rtx (int_mode); start_sequence (); /* Do the actual arithmetic. */ - for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) + for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++) { - rtx target_piece = operand_subword (target, i, 1, mode); + rtx target_piece = operand_subword (target, i, 1, int_mode); rtx x = expand_binop (word_mode, binoptab, - operand_subword_force (op0, i, mode), - operand_subword_force (op1, i, mode), + operand_subword_force (op0, i, int_mode), + operand_subword_force (op1, i, int_mode), target_piece, unsignedp, next_methods); if (x == 0) @@ -1356,7 +1357,7 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, insns = get_insns (); end_sequence (); - if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD) + if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD) { emit_insn (insns); return target; @@ -1366,10 +1367,10 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, /* Synthesize double word shifts from single word shifts. */ if ((binoptab == lshr_optab || binoptab == ashl_optab || binoptab == ashr_optab) - && mclass == MODE_INT + && is_int_mode (mode, &int_mode) && (CONST_INT_P (op1) || optimize_insn_for_speed_p ()) - && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD - && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode) + && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD + && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode) && optab_handler (binoptab, word_mode) != CODE_FOR_nothing && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing) @@ -1377,7 +1378,7 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, unsigned HOST_WIDE_INT shift_mask, double_shift_mask; machine_mode op1_mode; - double_shift_mask = targetm.shift_truncation_mask (mode); + double_shift_mask = targetm.shift_truncation_mask (int_mode); shift_mask = targetm.shift_truncation_mask (word_mode); op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode; @@ -1405,7 +1406,7 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, || target == op0 || target == op1 || !valid_multiword_target_p (target)) - target = gen_reg_rtx (mode); + target = gen_reg_rtx (int_mode); start_sequence (); @@ -1417,11 +1418,11 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, left_shift = binoptab == ashl_optab; outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; - outof_target = operand_subword (target, outof_word, 1, mode); - into_target = operand_subword (target, 1 - outof_word, 1, mode); + outof_target = operand_subword (target, outof_word, 1, int_mode); + into_target = operand_subword (target, 1 - outof_word, 1, int_mode); - outof_input = operand_subword_force (op0, outof_word, mode); - into_input = operand_subword_force (op0, 1 - outof_word, mode); + outof_input = operand_subword_force (op0, outof_word, int_mode); + into_input = operand_subword_force (op0, 1 - outof_word, int_mode); if (expand_doubleword_shift (op1_mode, binoptab, outof_input, into_input, op1, @@ -1440,9 +1441,9 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, /* Synthesize double word rotates from single word shifts. */ if ((binoptab == rotl_optab || binoptab == rotr_optab) - && mclass == MODE_INT + && is_int_mode (mode, &int_mode) && CONST_INT_P (op1) - && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD + && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing) { @@ -1463,7 +1464,7 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, || target == op1 || !REG_P (target) || !valid_multiword_target_p (target)) - target = gen_reg_rtx (mode); + target = gen_reg_rtx (int_mode); start_sequence (); @@ -1477,11 +1478,11 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, left_shift = (binoptab == rotl_optab); outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; - outof_target = operand_subword (target, outof_word, 1, mode); - into_target = operand_subword (target, 1 - outof_word, 1, mode); + outof_target = operand_subword (target, outof_word, 1, int_mode); + into_target = operand_subword (target, 1 - outof_word, 1, int_mode); - outof_input = operand_subword_force (op0, outof_word, mode); - into_input = operand_subword_force (op0, 1 - outof_word, mode); + outof_input = operand_subword_force (op0, outof_word, int_mode); + into_input = operand_subword_force (op0, 1 - outof_word, int_mode); if (shift_count == BITS_PER_WORD) { @@ -1557,13 +1558,13 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, /* These can be done a word at a time by propagating carries. */ if ((binoptab == add_optab || binoptab == sub_optab) - && mclass == MODE_INT - && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD + && is_int_mode (mode, &int_mode) + && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD && optab_handler (binoptab, word_mode) != CODE_FOR_nothing) { unsigned int i; optab otheroptab = binoptab == add_optab ? sub_optab : add_optab; - const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; + const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; rtx carry_in = NULL_RTX, carry_out = NULL_RTX; rtx xop0, xop1, xtarget; @@ -1577,10 +1578,10 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, #endif /* Prepare the operands. */ - xop0 = force_reg (mode, op0); - xop1 = force_reg (mode, op1); + xop0 = force_reg (int_mode, op0); + xop1 = force_reg (int_mode, op1); - xtarget = gen_reg_rtx (mode); + xtarget = gen_reg_rtx (int_mode); if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target)) target = xtarget; @@ -1593,9 +1594,9 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, for (i = 0; i < nwords; i++) { int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); - rtx target_piece = operand_subword (xtarget, index, 1, mode); - rtx op0_piece = operand_subword_force (xop0, index, mode); - rtx op1_piece = operand_subword_force (xop1, index, mode); + rtx target_piece = operand_subword (xtarget, index, 1, int_mode); + rtx op0_piece = operand_subword_force (xop0, index, int_mode); + rtx op1_piece = operand_subword_force (xop1, index, int_mode); rtx x; /* Main add/subtract of the input operands. */ @@ -1654,16 +1655,16 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, carry_in = carry_out; } - if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD) + if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD) { - if (optab_handler (mov_optab, mode) != CODE_FOR_nothing + if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing || ! rtx_equal_p (target, xtarget)) { rtx_insn *temp = emit_move_insn (target, xtarget); set_dst_reg_note (temp, REG_EQUAL, gen_rtx_fmt_ee (optab_to_code (binoptab), - mode, copy_rtx (xop0), + int_mode, copy_rtx (xop0), copy_rtx (xop1)), target); } @@ -1683,26 +1684,26 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, try using a signed widening multiply. */ if (binoptab == smul_optab - && mclass == MODE_INT - && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD + && is_int_mode (mode, &int_mode) + && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing && optab_handler (add_optab, word_mode) != CODE_FOR_nothing) { rtx product = NULL_RTX; - if (widening_optab_handler (umul_widen_optab, mode, word_mode) - != CODE_FOR_nothing) + if (widening_optab_handler (umul_widen_optab, int_mode, word_mode) + != CODE_FOR_nothing) { - product = expand_doubleword_mult (mode, op0, op1, target, + product = expand_doubleword_mult (int_mode, op0, op1, target, true, methods); if (!product) delete_insns_since (last); } if (product == NULL_RTX - && widening_optab_handler (smul_widen_optab, mode, word_mode) - != CODE_FOR_nothing) + && (widening_optab_handler (smul_widen_optab, int_mode, word_mode) + != CODE_FOR_nothing)) { - product = expand_doubleword_mult (mode, op0, op1, target, + product = expand_doubleword_mult (int_mode, op0, op1, target, false, methods); if (!product) delete_insns_since (last); @@ -1710,13 +1711,13 @@ expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, if (product != NULL_RTX) { - if (optab_handler (mov_optab, mode) != CODE_FOR_nothing) + if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing) { rtx_insn *move = emit_move_insn (target ? target : product, product); set_dst_reg_note (move, REG_EQUAL, - gen_rtx_fmt_ee (MULT, mode, + gen_rtx_fmt_ee (MULT, int_mode, copy_rtx (op0), copy_rtx (op1)), target ? target : product); @@ -2696,6 +2697,7 @@ expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target, { enum mode_class mclass = GET_MODE_CLASS (mode); machine_mode wider_mode; + scalar_int_mode int_mode; scalar_float_mode float_mode; rtx temp; rtx libfunc; @@ -2853,24 +2855,24 @@ expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target, /* These can be done a word at a time. */ if (unoptab == one_cmpl_optab - && mclass == MODE_INT - && GET_MODE_SIZE (mode) > UNITS_PER_WORD + && is_int_mode (mode, &int_mode) + && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD && optab_handler (unoptab, word_mode) != CODE_FOR_nothing) { int i; rtx_insn *insns; if (target == 0 || target == op0 || !valid_multiword_target_p (target)) - target = gen_reg_rtx (mode); + target = gen_reg_rtx (int_mode); start_sequence (); /* Do the actual arithmetic. */ - for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) + for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++) { - rtx target_piece = operand_subword (target, i, 1, mode); + rtx target_piece = operand_subword (target, i, 1, int_mode); rtx x = expand_unop (word_mode, unoptab, - operand_subword_force (op0, i, mode), + operand_subword_force (op0, i, int_mode), target_piece, unsignedp); if (target_piece != x) @@ -3116,18 +3118,20 @@ expand_abs_nojump (machine_mode mode, rtx op0, rtx target, value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)), where W is the width of MODE. */ - if (GET_MODE_CLASS (mode) == MODE_INT + scalar_int_mode int_mode; + if (is_int_mode (mode, &int_mode) && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2) { - rtx extended = expand_shift (RSHIFT_EXPR, mode, op0, - GET_MODE_PRECISION (mode) - 1, + rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0, + GET_MODE_PRECISION (int_mode) - 1, NULL_RTX, 0); - temp = expand_binop (mode, xor_optab, extended, op0, target, 0, + temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0, OPTAB_LIB_WIDEN); if (temp != 0) - temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab, + temp = expand_binop (int_mode, + result_unsignedp ? sub_optab : subv_optab, temp, extended, target, 0, OPTAB_LIB_WIDEN); if (temp != 0) @@ -3220,15 +3224,16 @@ expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target) /* If this machine has expensive jumps, we can do one's complement absolute value of X as (((signed) x >> (W-1)) ^ x). */ - if (GET_MODE_CLASS (mode) == MODE_INT + scalar_int_mode int_mode; + if (is_int_mode (mode, &int_mode) && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2) { - rtx extended = expand_shift (RSHIFT_EXPR, mode, op0, - GET_MODE_PRECISION (mode) - 1, + rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0, + GET_MODE_PRECISION (int_mode) - 1, NULL_RTX, 0); - temp = expand_binop (mode, xor_optab, extended, op0, target, 0, + temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0, OPTAB_LIB_WIDEN); if (temp != 0) diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 83e98b6c8d6..c4fd0e9f8f7 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -77,11 +77,12 @@ mode_signbit_p (machine_mode mode, const_rtx x) { unsigned HOST_WIDE_INT val; unsigned int width; + scalar_int_mode int_mode; - if (GET_MODE_CLASS (mode) != MODE_INT) + if (!is_int_mode (mode, &int_mode)) return false; - width = GET_MODE_PRECISION (mode); + width = GET_MODE_PRECISION (int_mode); if (width == 0) return false; @@ -129,15 +130,16 @@ bool val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val) { unsigned int width; + scalar_int_mode int_mode; - if (GET_MODE_CLASS (mode) != MODE_INT) + if (!is_int_mode (mode, &int_mode)) return false; - width = GET_MODE_PRECISION (mode); + width = GET_MODE_PRECISION (int_mode); if (width == 0 || width > HOST_BITS_PER_WIDE_INT) return false; - val &= GET_MODE_MASK (mode); + val &= GET_MODE_MASK (int_mode); return val == (HOST_WIDE_INT_1U << (width - 1)); } @@ -148,10 +150,11 @@ val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val) { unsigned int width; - if (GET_MODE_CLASS (mode) != MODE_INT) + scalar_int_mode int_mode; + if (!is_int_mode (mode, &int_mode)) return false; - width = GET_MODE_PRECISION (mode); + width = GET_MODE_PRECISION (int_mode); if (width == 0 || width > HOST_BITS_PER_WIDE_INT) return false; @@ -166,10 +169,11 @@ val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val) { unsigned int width; - if (GET_MODE_CLASS (mode) != MODE_INT) + scalar_int_mode int_mode; + if (!is_int_mode (mode, &int_mode)) return false; - width = GET_MODE_PRECISION (mode); + width = GET_MODE_PRECISION (int_mode); if (width == 0 || width > HOST_BITS_PER_WIDE_INT) return false; @@ -4828,18 +4832,19 @@ simplify_relational_operation_1 (enum rtx_code code, machine_mode mode, /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is the same as (zero_extract:SI FOO (const_int 1) BAR). */ + scalar_int_mode int_mode; if (code == NE && op1 == const0_rtx - && GET_MODE_CLASS (mode) == MODE_INT + && is_int_mode (mode, &int_mode) && cmp_mode != VOIDmode /* ??? Work-around BImode bugs in the ia64 backend. */ - && mode != BImode + && int_mode != BImode && cmp_mode != BImode && nonzero_bits (op0, cmp_mode) == 1 && STORE_FLAG_VALUE == 1) - return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode) - ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode) - : lowpart_subreg (mode, op0, cmp_mode); + return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (cmp_mode) + ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, cmp_mode) + : lowpart_subreg (int_mode, op0, cmp_mode); /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */ if ((code == EQ || code == NE) diff --git a/gcc/tree.c b/gcc/tree.c index cbb770f6693..c36183487a5 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -12634,10 +12634,10 @@ vector_type_mode (const_tree t) && (!targetm.vector_mode_supported_p (mode) || !have_regs_of_mode[mode])) { - machine_mode innermode = TREE_TYPE (t)->type_common.mode; + scalar_int_mode innermode; /* For integers, try mapping it to a same-sized scalar mode. */ - if (GET_MODE_CLASS (innermode) == MODE_INT) + if (is_int_mode (TREE_TYPE (t)->type_common.mode, &innermode)) { unsigned int size = (TYPE_VECTOR_SUBPARTS (t) * GET_MODE_BITSIZE (innermode));