From: Andrew Stubbs Date: Mon, 10 Feb 2020 13:23:29 +0000 (+0000) Subject: amdgcn: Add fold_left_plus vector reductions X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=bf628a97efaf11204ab02527b30ca71d7759ca37;p=gcc.git amdgcn: Add fold_left_plus vector reductions These aren't real in-order instructions, because the ISA can't do that quickly, but a means to allow regular out-of-order reductions when that's good enough, but the middle-end doesn't know so. gcc/ * config/gcn/gcn-valu.md (fold_left_plus_): New. --- diff --git a/gcc/config/gcn/gcn-valu.md b/gcc/config/gcn/gcn-valu.md index 6d7fecaa12c..26559ff765e 100644 --- a/gcc/config/gcn/gcn-valu.md +++ b/gcc/config/gcn/gcn-valu.md @@ -3076,6 +3076,26 @@ DONE; }) +;; Warning: This "-ffast-math" implementation converts in-order reductions +;; into associative reductions. It's also used where OpenMP or +;; OpenACC paralellization has already broken the in-order semantics. +(define_expand "fold_left_plus_" + [(match_operand: 0 "register_operand") + (match_operand: 1 "gcn_alu_operand") + (match_operand:V_FP 2 "gcn_alu_operand")] + "can_create_pseudo_p () + && (flag_openacc || flag_openmp + || flag_associative_math)" + { + rtx dest = operands[0]; + rtx scalar = operands[1]; + rtx vector = operands[2]; + rtx tmp = gen_reg_rtx (mode); + + emit_insn (gen_reduc_plus_scal_ (tmp, vector)); + emit_insn (gen_add3 (dest, scalar, tmp)); + DONE; + }) (define_insn "*_dpp_shr_" [(set (match_operand:V_1REG 0 "register_operand" "=v")