From: Richard Sandiford Date: Mon, 6 Nov 2017 20:02:35 +0000 (+0000) Subject: [AArch64] Pass number of units to aarch64_simd_vect_par_cnst_half X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=f5cbabc1ccfe4ac8dc3226553cbc6a8fbe6286a3;p=gcc.git [AArch64] Pass number of units to aarch64_simd_vect_par_cnst_half This patch passes the number of units to aarch64_simd_vect_par_cnst_half, which avoids a to_constant () once GET_MODE_NUNITS is variable. 2017-11-06 Richard Sandiford Alan Hayward David Sherwood gcc/ * config/aarch64/aarch64-protos.h (aarch64_simd_vect_par_cnst_half): Take the number of units too. * config/aarch64/aarch64.c (aarch64_simd_vect_par_cnst_half): Likewise. (aarch64_simd_check_vect_par_cnst_half): Update call accordingly, but check for a vector mode before rather than after the call. * config/aarch64/aarch64-simd.md (aarch64_split_simd_mov) (move_hi_quad_, vec_unpack_hi_) (vec_unpack_lo_mult_lo_) (vec_widen_mult_hi_, vec_unpacks_lo_) (vec_unpacks_hi_, aarch64_saddl2, aarch64_uaddl2) (aarch64_ssubl2, aarch64_usubl2, widen_ssum3) (widen_usum3, aarch64_saddw2, aarch64_uaddw2) (aarch64_ssubw2, aarch64_usubw2, aarch64_sqdmlal2) (aarch64_sqdmlsl2, aarch64_sqdmlal2_lane) (aarch64_sqdmlal2_laneq, aarch64_sqdmlsl2_lane) (aarch64_sqdmlsl2_laneq, aarch64_sqdmlal2_n) (aarch64_sqdmlsl2_n, aarch64_sqdmull2) (aarch64_sqdmull2_lane, aarch64_sqdmull2_laneq) (aarch64_sqdmull2_n): Update accordingly. Reviewed-by: James Greenhalgh Co-Authored-By: Alan Hayward Co-Authored-By: David Sherwood From-SVN: r254468 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index ad78a257288..5d11224126d 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,27 @@ +2017-11-06 Richard Sandiford + Alan Hayward + David Sherwood + + * config/aarch64/aarch64-protos.h (aarch64_simd_vect_par_cnst_half): + Take the number of units too. + * config/aarch64/aarch64.c (aarch64_simd_vect_par_cnst_half): Likewise. + (aarch64_simd_check_vect_par_cnst_half): Update call accordingly, + but check for a vector mode before rather than after the call. + * config/aarch64/aarch64-simd.md (aarch64_split_simd_mov) + (move_hi_quad_, vec_unpack_hi_) + (vec_unpack_lo_mult_lo_) + (vec_widen_mult_hi_, vec_unpacks_lo_) + (vec_unpacks_hi_, aarch64_saddl2, aarch64_uaddl2) + (aarch64_ssubl2, aarch64_usubl2, widen_ssum3) + (widen_usum3, aarch64_saddw2, aarch64_uaddw2) + (aarch64_ssubw2, aarch64_usubw2, aarch64_sqdmlal2) + (aarch64_sqdmlsl2, aarch64_sqdmlal2_lane) + (aarch64_sqdmlal2_laneq, aarch64_sqdmlsl2_lane) + (aarch64_sqdmlsl2_laneq, aarch64_sqdmlal2_n) + (aarch64_sqdmlsl2_n, aarch64_sqdmull2) + (aarch64_sqdmull2_lane, aarch64_sqdmull2_laneq) + (aarch64_sqdmull2_n): Update accordingly. + 2017-11-06 Richard Sandiford Alan Hayward David Sherwood diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 39691155aff..4fdded76dac 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -391,7 +391,7 @@ const char *aarch64_output_move_struct (rtx *operands); rtx aarch64_return_addr (int, rtx); rtx aarch64_simd_gen_const_vector_dup (machine_mode, HOST_WIDE_INT); bool aarch64_simd_mem_operand_p (rtx); -rtx aarch64_simd_vect_par_cnst_half (machine_mode, bool); +rtx aarch64_simd_vect_par_cnst_half (machine_mode, int, bool); rtx aarch64_tls_get_addr (void); tree aarch64_fold_builtin (tree, int, tree *, bool); unsigned aarch64_dbx_register_number (unsigned); diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 642f4b1bfa3..0699e56e6e8 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -253,8 +253,8 @@ { rtx dst_low_part = gen_lowpart (mode, dst); rtx dst_high_part = gen_highpart (mode, dst); - rtx lo = aarch64_simd_vect_par_cnst_half (mode, false); - rtx hi = aarch64_simd_vect_par_cnst_half (mode, true); + rtx lo = aarch64_simd_vect_par_cnst_half (mode, , false); + rtx hi = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_simd_mov_from_low (dst_low_part, src, lo)); @@ -1437,7 +1437,7 @@ (match_operand: 1 "register_operand" "")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, false); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , false); if (BYTES_BIG_ENDIAN) emit_insn (gen_aarch64_simd_move_hi_quad_be_ (operands[0], operands[1], p)); @@ -1521,7 +1521,7 @@ (ANY_EXTEND: (match_operand:VQW 1 "register_operand"))] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_simd_vec_unpack_hi_ (operands[0], operands[1], p)); DONE; @@ -1533,7 +1533,7 @@ (ANY_EXTEND: (match_operand:VQW 1 "register_operand" ""))] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, false); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , false); emit_insn (gen_aarch64_simd_vec_unpack_lo_ (operands[0], operands[1], p)); DONE; @@ -1653,7 +1653,7 @@ (ANY_EXTEND: (match_operand:VQW 2 "register_operand" ""))] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, false); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , false); emit_insn (gen_aarch64_simd_vec_mult_lo_ (operands[0], operands[1], operands[2], p)); @@ -1680,7 +1680,7 @@ (ANY_EXTEND: (match_operand:VQW 2 "register_operand" ""))] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_simd_vec_mult_hi_ (operands[0], operands[1], operands[2], p)); @@ -2084,7 +2084,7 @@ (match_operand:VQ_HSF 1 "register_operand" "")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, false); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , false); emit_insn (gen_aarch64_simd_vec_unpacks_lo_ (operands[0], operands[1], p)); DONE; @@ -2107,7 +2107,7 @@ (match_operand:VQ_HSF 1 "register_operand" "")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_simd_vec_unpacks_lo_ (operands[0], operands[1], p)); DONE; @@ -3028,7 +3028,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_saddl_hi_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3040,7 +3040,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_uaddl_hi_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3052,7 +3052,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_ssubl_hi_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3064,7 +3064,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_usubl_hi_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3090,7 +3090,7 @@ (match_operand: 2 "register_operand" "")))] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, false); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , false); rtx temp = gen_reg_rtx (GET_MODE (operands[0])); emit_insn (gen_aarch64_saddw_internal (temp, operands[2], @@ -3118,7 +3118,7 @@ (match_operand: 2 "register_operand" "")))] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, false); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , false); rtx temp = gen_reg_rtx (GET_MODE (operands[0])); emit_insn (gen_aarch64_uaddw_internal (temp, operands[2], @@ -3179,7 +3179,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_saddw2_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3191,7 +3191,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_uaddw2_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3204,7 +3204,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_ssubw2_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3216,7 +3216,7 @@ (match_operand:VQW 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_usubw2_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -3736,7 +3736,7 @@ (match_operand:VQ_HSI 3 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlal2_internal (operands[0], operands[1], operands[2], operands[3], p)); DONE; @@ -3749,7 +3749,7 @@ (match_operand:VQ_HSI 3 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlsl2_internal (operands[0], operands[1], operands[2], operands[3], p)); DONE; @@ -3817,7 +3817,7 @@ (match_operand:SI 4 "immediate_operand" "i")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlal2_lane_internal (operands[0], operands[1], operands[2], operands[3], operands[4], p)); @@ -3832,7 +3832,7 @@ (match_operand:SI 4 "immediate_operand" "i")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlal2_laneq_internal (operands[0], operands[1], operands[2], operands[3], operands[4], p)); @@ -3847,7 +3847,7 @@ (match_operand:SI 4 "immediate_operand" "i")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlsl2_lane_internal (operands[0], operands[1], operands[2], operands[3], operands[4], p)); @@ -3862,7 +3862,7 @@ (match_operand:SI 4 "immediate_operand" "i")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlsl2_laneq_internal (operands[0], operands[1], operands[2], operands[3], operands[4], p)); @@ -3895,7 +3895,7 @@ (match_operand: 3 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlal2_n_internal (operands[0], operands[1], operands[2], operands[3], p)); @@ -3909,7 +3909,7 @@ (match_operand: 3 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmlsl2_n_internal (operands[0], operands[1], operands[2], operands[3], p)); @@ -4063,7 +4063,7 @@ (match_operand:VQ_HSI 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmull2_internal (operands[0], operands[1], operands[2], p)); DONE; @@ -4124,7 +4124,7 @@ (match_operand:SI 3 "immediate_operand" "i")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmull2_lane_internal (operands[0], operands[1], operands[2], operands[3], p)); @@ -4138,7 +4138,7 @@ (match_operand:SI 3 "immediate_operand" "i")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmull2_laneq_internal (operands[0], operands[1], operands[2], operands[3], p)); @@ -4171,7 +4171,7 @@ (match_operand: 2 "register_operand" "w")] "TARGET_SIMD" { - rtx p = aarch64_simd_vect_par_cnst_half (mode, true); + rtx p = aarch64_simd_vect_par_cnst_half (mode, , true); emit_insn (gen_aarch64_sqdmull2_n_internal (operands[0], operands[1], operands[2], p)); DONE; diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index b3ce7f6d271..d209f816663 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -11757,12 +11757,12 @@ Architecture 3 2 1 0 3 2 1 0 Low Mask: { 2, 3 } { 0, 1 } High Mask: { 0, 1 } { 2, 3 } -*/ + + MODE Is the mode of the vector and NUNITS is the number of units in it. */ rtx -aarch64_simd_vect_par_cnst_half (machine_mode mode, bool high) +aarch64_simd_vect_par_cnst_half (machine_mode mode, int nunits, bool high) { - int nunits = GET_MODE_NUNITS (mode); rtvec v = rtvec_alloc (nunits / 2); int high_base = nunits / 2; int low_base = 0; @@ -11791,14 +11791,15 @@ bool aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode, bool high) { - rtx ideal = aarch64_simd_vect_par_cnst_half (mode, high); + if (!VECTOR_MODE_P (mode)) + return false; + + rtx ideal = aarch64_simd_vect_par_cnst_half (mode, GET_MODE_NUNITS (mode), + high); HOST_WIDE_INT count_op = XVECLEN (op, 0); HOST_WIDE_INT count_ideal = XVECLEN (ideal, 0); int i = 0; - if (!VECTOR_MODE_P (mode)) - return false; - if (count_op != count_ideal) return false;