From 673bf5a6b65e51d177d2cf9fc4002171b1f467ab Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Wed, 30 Aug 2017 11:11:16 +0000 Subject: [PATCH] [23/77] Replace != VOIDmode checks with is_a This patch replaces some checks against VOIDmode with checks of is_a , in cases where scalar integer modes were the only useful alternatives left. gcc/ 2017-08-30 Richard Sandiford Alan Hayward David Sherwood * cfgexpand.c (expand_debug_expr): Use is_a instead of != VOIDmode. * combine.c (if_then_else_cond): Likewise. (change_zero_ext): Likewise. * dwarf2out.c (mem_loc_descriptor): Likewise. (loc_descriptor): Likewise. * rtlanal.c (canonicalize_condition): Likewise. * simplify-rtx.c (simplify_relational_operation_1): Likewise. From-SVN: r251475 --- gcc/cfgexpand.c | 26 +++++++++++++------------- gcc/combine.c | 15 +++++++-------- gcc/dwarf2out.c | 10 +++++----- gcc/rtlanal.c | 21 ++++++++++----------- gcc/simplify-rtx.c | 14 +++++++------- 5 files changed, 42 insertions(+), 44 deletions(-) diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index ec242bbb081..ad6f05606ab 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -4139,7 +4139,7 @@ expand_debug_expr (tree exp) machine_mode inner_mode = VOIDmode; int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp)); addr_space_t as; - scalar_int_mode op1_mode; + scalar_int_mode op0_mode, op1_mode; switch (TREE_CODE_CLASS (TREE_CODE (exp))) { @@ -4581,23 +4581,23 @@ expand_debug_expr (tree exp) size_t, we need to check for mis-matched modes and correct the addend. */ if (op0 && op1 - && GET_MODE (op0) != VOIDmode && GET_MODE (op1) != VOIDmode - && GET_MODE (op0) != GET_MODE (op1)) + && is_a (GET_MODE (op0), &op0_mode) + && is_a (GET_MODE (op1), &op1_mode) + && op0_mode != op1_mode) { - if (GET_MODE_BITSIZE (GET_MODE (op0)) < GET_MODE_BITSIZE (GET_MODE (op1)) - /* If OP0 is a partial mode, then we must truncate, even if it has - the same bitsize as OP1 as GCC's representation of partial modes - is opaque. */ - || (GET_MODE_CLASS (GET_MODE (op0)) == MODE_PARTIAL_INT - && GET_MODE_BITSIZE (GET_MODE (op0)) == GET_MODE_BITSIZE (GET_MODE (op1)))) - op1 = simplify_gen_unary (TRUNCATE, GET_MODE (op0), op1, - GET_MODE (op1)); + if (GET_MODE_BITSIZE (op0_mode) < GET_MODE_BITSIZE (op1_mode) + /* If OP0 is a partial mode, then we must truncate, even + if it has the same bitsize as OP1 as GCC's + representation of partial modes is opaque. */ + || (GET_MODE_CLASS (op0_mode) == MODE_PARTIAL_INT + && (GET_MODE_BITSIZE (op0_mode) + == GET_MODE_BITSIZE (op1_mode)))) + op1 = simplify_gen_unary (TRUNCATE, op0_mode, op1, op1_mode); else /* We always sign-extend, regardless of the signedness of the operand, because the operand is always unsigned here even if the original C expression is signed. */ - op1 = simplify_gen_unary (SIGN_EXTEND, GET_MODE (op0), op1, - GET_MODE (op1)); + op1 = simplify_gen_unary (SIGN_EXTEND, op0_mode, op1, op1_mode); } /* Fall through. */ case PLUS_EXPR: diff --git a/gcc/combine.c b/gcc/combine.c index b2f6c093231..7239ae3e74c 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -9019,6 +9019,7 @@ if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) enum rtx_code code = GET_CODE (x); rtx cond0, cond1, true0, true1, false0, false1; unsigned HOST_WIDE_INT nz; + scalar_int_mode int_mode; /* If we are comparing a value against zero, we are done. */ if ((code == NE || code == EQ) @@ -9215,8 +9216,9 @@ if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) /* If X is known to be either 0 or -1, those are the true and false values when testing X. */ else if (x == constm1_rtx || x == const0_rtx - || (mode != VOIDmode && mode != BLKmode - && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode))) + || (is_a (mode, &int_mode) + && (num_sign_bit_copies (x, int_mode) + == GET_MODE_PRECISION (int_mode)))) { *ptrue = constm1_rtx, *pfalse = const0_rtx; return x; @@ -11271,7 +11273,7 @@ change_zero_ext (rtx pat) FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST) { rtx x = **iter; - scalar_int_mode mode; + scalar_int_mode mode, inner_mode; if (!is_a (GET_MODE (x), &mode)) continue; int size; @@ -11279,12 +11281,9 @@ change_zero_ext (rtx pat) if (GET_CODE (x) == ZERO_EXTRACT && CONST_INT_P (XEXP (x, 1)) && CONST_INT_P (XEXP (x, 2)) - && GET_MODE (XEXP (x, 0)) != VOIDmode - && GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - <= GET_MODE_PRECISION (mode)) + && is_a (GET_MODE (XEXP (x, 0)), &inner_mode) + && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode)) { - machine_mode inner_mode = GET_MODE (XEXP (x, 0)); - size = INTVAL (XEXP (x, 1)); int start = INTVAL (XEXP (x, 2)); diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index b24250ac3b1..7dd9f26be53 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -14689,7 +14689,7 @@ mem_loc_descriptor (rtx rtl, machine_mode mode, if (mode != GET_MODE (rtl) && GET_MODE (rtl) != VOIDmode) return NULL; - scalar_int_mode int_mode, inner_mode; + scalar_int_mode int_mode, inner_mode, op1_mode; switch (GET_CODE (rtl)) { case POST_INC: @@ -15127,9 +15127,8 @@ mem_loc_descriptor (rtx rtl, machine_mode mode, VAR_INIT_STATUS_INITIALIZED); { rtx rtlop1 = XEXP (rtl, 1); - if (GET_MODE (rtlop1) != VOIDmode - && GET_MODE_BITSIZE (GET_MODE (rtlop1)) - < GET_MODE_BITSIZE (int_mode)) + if (is_a (GET_MODE (rtlop1), &op1_mode) + && GET_MODE_BITSIZE (op1_mode) < GET_MODE_BITSIZE (int_mode)) rtlop1 = gen_rtx_ZERO_EXTEND (int_mode, rtlop1); op1 = mem_loc_descriptor (rtlop1, int_mode, mem_mode, VAR_INIT_STATUS_INITIALIZED); @@ -15960,7 +15959,8 @@ loc_descriptor (rtx rtl, machine_mode mode, break; /* FALLTHROUGH */ case LABEL_REF: - if (mode != VOIDmode && GET_MODE_SIZE (mode) == DWARF2_ADDR_SIZE + if (is_a (mode, &int_mode) + && GET_MODE_SIZE (int_mode) == DWARF2_ADDR_SIZE && (dwarf_version >= 4 || !dwarf_strict)) { loc_result = new_addr_loc_descr (rtl, dtprel_false); diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 157f592ee97..08c43a9322c 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -5559,40 +5559,39 @@ canonicalize_condition (rtx_insn *insn, rtx cond, int reverse, if we can do computations in the relevant mode and we do not overflow. */ - if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC - && CONST_INT_P (op1) - && GET_MODE (op0) != VOIDmode - && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) + scalar_int_mode op0_mode; + if (CONST_INT_P (op1) + && is_a (GET_MODE (op0), &op0_mode) + && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT) { HOST_WIDE_INT const_val = INTVAL (op1); unsigned HOST_WIDE_INT uconst_val = const_val; unsigned HOST_WIDE_INT max_val - = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0)); + = (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode); switch (code) { case LE: if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1) - code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0)); + code = LT, op1 = gen_int_mode (const_val + 1, op0_mode); break; /* When cross-compiling, const_val might be sign-extended from BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ case GE: if ((const_val & max_val) - != (HOST_WIDE_INT_1U - << (GET_MODE_PRECISION (GET_MODE (op0)) - 1))) - code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0)); + != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1))) + code = GT, op1 = gen_int_mode (const_val - 1, op0_mode); break; case LEU: if (uconst_val < max_val) - code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0)); + code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode); break; case GEU: if (uconst_val != 0) - code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0)); + code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode); break; default: diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index bf8abdb6578..0c91dbb830c 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -4829,19 +4829,19 @@ simplify_relational_operation_1 (enum rtx_code code, machine_mode mode, /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is the same as (zero_extract:SI FOO (const_int 1) BAR). */ - scalar_int_mode int_mode; + scalar_int_mode int_mode, int_cmp_mode; if (code == NE && op1 == const0_rtx && is_int_mode (mode, &int_mode) - && cmp_mode != VOIDmode + && is_a (cmp_mode, &int_cmp_mode) /* ??? Work-around BImode bugs in the ia64 backend. */ && int_mode != BImode - && cmp_mode != BImode - && nonzero_bits (op0, cmp_mode) == 1 + && int_cmp_mode != BImode + && nonzero_bits (op0, int_cmp_mode) == 1 && STORE_FLAG_VALUE == 1) - return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (cmp_mode) - ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, cmp_mode) - : lowpart_subreg (int_mode, op0, cmp_mode); + return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode) + ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode) + : lowpart_subreg (int_mode, op0, int_cmp_mode); /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */ if ((code == EQ || code == NE) -- 2.30.2