From: Richard Sandiford Date: Sat, 25 Jan 2020 12:43:28 +0000 (+0000) Subject: aarch64: Add vector/vector vec_extract patterns [PR92822] X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=c15893df6eafc32efd6184379dd7f02c36da7d12;p=gcc.git aarch64: Add vector/vector vec_extract patterns [PR92822] Part of the problem in this PR is that we don't provide patterns to extract a 64-bit vector from one half of a 128-bit vector. Adding them fixes: FAIL: gcc.target/aarch64/fmul_intrinsic_1.c scan-assembler-times fmul\\td[0-9]+, d[0-9]+, d[0-9]+ 1 FAIL: gcc.target/aarch64/fmul_intrinsic_1.c scan-assembler-times fmul\\tv[0-9]+.2d, v[0-9]+.2d, v[0-9]+.d\\[[0-9]+\\] 3 The 2s failures need target-independent changes, after which they rely on these patterns too. 2020-01-27 Richard Sandiford gcc/ PR target/92822 * config/aarch64/aarch64-simd.md (aarch64_get_half): New expander. (@aarch64_split_simd_mov): Use it. (aarch64_simd_mov_from_low): Add a GPR alternative. Leave the vec_extract patterns to handle 2-element vectors. (aarch64_simd_mov_from_high): Likewise. (vec_extract): New expander. (vec_extractv2dfv1df): Likewise. --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5bbe0e31351..c1ef6110477 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,15 @@ +2020-01-27 Richard Sandiford + + PR target/92822 + * config/aarch64/aarch64-simd.md (aarch64_get_half): New + expander. + (@aarch64_split_simd_mov): Use it. + (aarch64_simd_mov_from_low): Add a GPR alternative. + Leave the vec_extract patterns to handle 2-element vectors. + (aarch64_simd_mov_from_high): Likewise. + (vec_extract): New expander. + (vec_extractv2dfv1df): Likewise. + 2020-01-27 Richard Sandiford * config/aarch64/aarch64.c (aarch64_if_then_else_costs): Match diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 97f46f96968..5a58051cf7e 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -282,37 +282,51 @@ rtx dst_high_part = gen_highpart (mode, dst); rtx lo = aarch64_simd_vect_par_cnst_half (mode, , false); rtx hi = aarch64_simd_vect_par_cnst_half (mode, , true); - - emit_insn - (gen_aarch64_simd_mov_from_low (dst_low_part, src, lo)); - emit_insn - (gen_aarch64_simd_mov_from_high (dst_high_part, src, hi)); + emit_insn (gen_aarch64_get_half (dst_low_part, src, lo)); + emit_insn (gen_aarch64_get_half (dst_high_part, src, hi)); } DONE; } ) -(define_insn "aarch64_simd_mov_from_low" - [(set (match_operand: 0 "register_operand" "=r") +(define_expand "aarch64_get_half" + [(set (match_operand: 0 "register_operand") (vec_select: - (match_operand:VQMOV 1 "register_operand" "w") - (match_operand:VQMOV 2 "vect_par_cnst_lo_half" "")))] - "TARGET_SIMD && reload_completed" - "umov\t%0, %1.d[0]" - [(set_attr "type" "neon_to_gp") - (set_attr "length" "4") - ]) + (match_operand:VQMOV 1 "register_operand") + (match_operand 2 "ascending_int_parallel")))] + "TARGET_SIMD" +) + +(define_insn_and_split "aarch64_simd_mov_from_low" + [(set (match_operand: 0 "register_operand" "=w,?r") + (vec_select: + (match_operand:VQMOV_NO2E 1 "register_operand" "w,w") + (match_operand:VQMOV_NO2E 2 "vect_par_cnst_lo_half" "")))] + "TARGET_SIMD" + "@ + # + umov\t%0, %1.d[0]" + "&& reload_completed && aarch64_simd_register (operands[0], mode)" + [(set (match_dup 0) (match_dup 1))] + { + operands[1] = aarch64_replace_reg_mode (operands[1], mode); + } + [(set_attr "type" "mov_reg,neon_to_gp") + (set_attr "length" "4")] +) (define_insn "aarch64_simd_mov_from_high" - [(set (match_operand: 0 "register_operand" "=r") + [(set (match_operand: 0 "register_operand" "=w,?r") (vec_select: - (match_operand:VQMOV 1 "register_operand" "w") - (match_operand:VQMOV 2 "vect_par_cnst_hi_half" "")))] - "TARGET_SIMD && reload_completed" - "umov\t%0, %1.d[1]" - [(set_attr "type" "neon_to_gp") - (set_attr "length" "4") - ]) + (match_operand:VQMOV_NO2E 1 "register_operand" "w,w") + (match_operand:VQMOV_NO2E 2 "vect_par_cnst_hi_half" "")))] + "TARGET_SIMD" + "@ + dup\\t%d0, %1.d[1] + umov\t%0, %1.d[1]" + [(set_attr "type" "neon_dup,neon_to_gp") + (set_attr "length" "4")] +) (define_insn "orn3" [(set (match_operand:VDQ_I 0 "register_operand" "=w") @@ -6140,6 +6154,35 @@ DONE; }) +;; Extract a 64-bit vector from one half of a 128-bit vector. +(define_expand "vec_extract" + [(match_operand: 0 "register_operand") + (match_operand:VQMOV_NO2E 1 "register_operand") + (match_operand 2 "immediate_operand")] + "TARGET_SIMD" +{ + int start = INTVAL (operands[2]); + if (start != 0 && start != / 2) + FAIL; + rtx sel = aarch64_gen_stepped_int_parallel ( / 2, start, 1); + emit_insn (gen_aarch64_get_half (operands[0], operands[1], sel)); + DONE; +}) + +;; Extract a single-element 64-bit vector from one half of a 128-bit vector. +(define_expand "vec_extractv2dfv1df" + [(match_operand:V1DF 0 "register_operand") + (match_operand:V2DF 1 "register_operand") + (match_operand 2 "immediate_operand")] + "TARGET_SIMD" +{ + /* V1DF is rarely used by other patterns, so it should be better to hide + it in a subreg destination of a normal DF op. */ + rtx scalar0 = gen_lowpart (DFmode, operands[0]); + emit_insn (gen_vec_extractv2dfdf (scalar0, operands[1], operands[2])); + DONE; +}) + ;; aes (define_insn "aarch64_crypto_aesv16qi"