From: Kyrylo Tkachov Date: Thu, 30 Apr 2015 13:34:05 +0000 (+0000) Subject: [AArch64] Properly handle SHIFT ops and EXTEND in aarch64_rtx_mult_cost X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=0a78ebe4c7814b10f12e2d2ee67235fd83f94621;p=gcc.git [AArch64] Properly handle SHIFT ops and EXTEND in aarch64_rtx_mult_cost * config/aarch64/aarch64.c (aarch64_shift_p): New function. (aarch64_rtx_mult_cost): Update comment to reflect that it also handles combined arithmetic-shift ops. Properly handle all shift and extend operations that can occur in combination with PLUS/MINUS. Rename maybe_fma to compound_p. (aarch64_rtx_costs): Use aarch64_shift_p when costing compound arithmetic and shift operations. From-SVN: r222624 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 263f6ac7032..4eacfc92d1a 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,13 @@ +2015-04-30 Kyrylo Tkachov + + * config/aarch64/aarch64.c (aarch64_shift_p): New function. + (aarch64_rtx_mult_cost): Update comment to reflect that it also handles + combined arithmetic-shift ops. Properly handle all shift and extend + operations that can occur in combination with PLUS/MINUS. + Rename maybe_fma to compound_p. + (aarch64_rtx_costs): Use aarch64_shift_p when costing compound + arithmetic and shift operations. + 2015-04-30 Kyrylo Tkachov * config/aarch64/aarch64.c (aarch64_rtx_costs): Use extend_arith diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index df76267a32b..595e185d44d 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -5158,9 +5158,17 @@ aarch64_strip_extend (rtx x) return x; } +/* Return true iff CODE is a shift supported in combination + with arithmetic instructions. */ +static bool +aarch64_shift_p (enum rtx_code code) +{ + return code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT; +} + /* Helper function for rtx cost calculation. Calculate the cost of - a MULT, which may be part of a multiply-accumulate rtx. Return - the calculated cost of the expression, recursing manually in to + a MULT or ASHIFT, which may be part of a compound PLUS/MINUS rtx. + Return the calculated cost of the expression, recursing manually in to operands where needed. */ static int @@ -5170,7 +5178,7 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed) const struct cpu_cost_table *extra_cost = aarch64_tune_params->insn_extra_cost; int cost = 0; - bool maybe_fma = (outer == PLUS || outer == MINUS); + bool compound_p = (outer == PLUS || outer == MINUS); machine_mode mode = GET_MODE (x); gcc_checking_assert (code == MULT); @@ -5185,18 +5193,35 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed) if (GET_MODE_CLASS (mode) == MODE_INT) { /* The multiply will be canonicalized as a shift, cost it as such. */ - if (CONST_INT_P (op1) - && exact_log2 (INTVAL (op1)) > 0) + if (aarch64_shift_p (GET_CODE (x)) + || (CONST_INT_P (op1) + && exact_log2 (INTVAL (op1)) > 0)) { + bool is_extend = GET_CODE (op0) == ZERO_EXTEND + || GET_CODE (op0) == SIGN_EXTEND; if (speed) { - if (maybe_fma) - /* ADD (shifted register). */ - cost += extra_cost->alu.arith_shift; + if (compound_p) + { + if (REG_P (op1)) + /* ARITH + shift-by-register. */ + cost += extra_cost->alu.arith_shift_reg; + else if (is_extend) + /* ARITH + extended register. We don't have a cost field + for ARITH+EXTEND+SHIFT, so use extend_arith here. */ + cost += extra_cost->alu.extend_arith; + else + /* ARITH + shift-by-immediate. */ + cost += extra_cost->alu.arith_shift; + } else /* LSL (immediate). */ - cost += extra_cost->alu.shift; + cost += extra_cost->alu.shift; + } + /* Strip extends as we will have costed them in the case above. */ + if (is_extend) + op0 = aarch64_strip_extend (op0); cost += rtx_cost (op0, GET_CODE (op0), 0, speed); @@ -5214,7 +5239,7 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed) if (speed) { - if (maybe_fma) + if (compound_p) /* MADD/SMADDL/UMADDL. */ cost += extra_cost->mult[0].extend_add; else @@ -5232,7 +5257,7 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed) if (speed) { - if (maybe_fma) + if (compound_p) /* MADD. */ cost += extra_cost->mult[mode == DImode].add; else @@ -5253,7 +5278,7 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed) if (GET_CODE (op1) == NEG) op1 = XEXP (op1, 0); - if (maybe_fma) + if (compound_p) /* FMADD/FNMADD/FNMSUB/FMSUB. */ cost += extra_cost->fp[mode == DFmode].fma; else @@ -5831,7 +5856,7 @@ cost_minus: /* Cost this as an FMA-alike operation. */ if ((GET_CODE (new_op1) == MULT - || GET_CODE (new_op1) == ASHIFT) + || aarch64_shift_p (GET_CODE (new_op1))) && code != COMPARE) { *cost += aarch64_rtx_mult_cost (new_op1, MULT, @@ -5901,7 +5926,7 @@ cost_plus: new_op0 = aarch64_strip_extend (op0); if (GET_CODE (new_op0) == MULT - || GET_CODE (new_op0) == ASHIFT) + || aarch64_shift_p (GET_CODE (new_op0))) { *cost += aarch64_rtx_mult_cost (new_op0, MULT, PLUS, speed);