From: Richard Sandiford Date: Tue, 30 Jan 2018 09:48:24 +0000 (+0000) Subject: [AArch64] Fix sve/extract_[12].c for big-endian SVE X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=8711e791deaf97590d68ee82ff7a0b81d54e944d;p=gcc.git [AArch64] Fix sve/extract_[12].c for big-endian SVE sve/extract_[12].c were relying on the target-independent optimisation that removes a redundant vec_select, so that we don't end up with things like: dup v0.4s, v0.4s[0] ...use s0... But that optimisation rightly doesn't trigger for big-endian targets, because GCC expects lane 0 to be in the high part of the register rather than the low part. SVE breaks this assumption -- see the comment at the head of aarch64-sve.md for details -- so the optimisation is valid for both endiannesses. Long term, we probably need some kind of target hook to make GCC aware of this. But there's another problem with the current extract pattern: it doesn't tell the register allocator how cheap an extraction of lane 0 is with tied registers. It seems better to split the lane 0 case out into its own pattern and use tied operands for the FPR<-SIMD case, so that using different registers has the cost of an extra reload. I think we want this for both endiannesses, regardless of the hook described above. Also, the gen_lowpart in this pattern fails for aarch64_be due to TARGET_CAN_CHANGE_MODE_CLASS restrictions, so the patch uses gen_rtx_REG instead. We're only creating this rtl in order to print it, so there's no need for anything fancier. 2018-01-30 Richard Sandiford gcc/ * config/aarch64/aarch64-sve.md (*vec_extract_0): New pattern. (*vec_extract_v128): Require a nonzero lane number. Use gen_rtx_REG rather than gen_lowpart. Reviewed-by: James Greenhalgh From-SVN: r257178 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 771aa014ebd..119be1b6c01 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2018-01-30 Richard Sandiford + + * config/aarch64/aarch64-sve.md (*vec_extract_0): New + pattern. + (*vec_extract_v128): Require a nonzero lane number. + Use gen_rtx_REG rather than gen_lowpart. + 2018-01-30 Richard Sandiford * lra-constraints.c (match_reload): Use subreg_lowpart_offset diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index 8da10c158a7..ee942dfad78 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -484,18 +484,52 @@ } ) +;; Extract element zero. This is a special case because we want to force +;; the registers to be the same for the second alternative, and then +;; split the instruction into nothing after RA. +(define_insn_and_split "*vec_extract_0" + [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv") + (vec_select: + (match_operand:SVE_ALL 1 "register_operand" "w, 0, w") + (parallel [(const_int 0)])))] + "TARGET_SVE" + { + operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); + switch (which_alternative) + { + case 0: + return "umov\\t%0, %1.[0]"; + case 1: + return "#"; + case 2: + return "st1\\t{%1.}[0], %0"; + default: + gcc_unreachable (); + } + } + "&& reload_completed + && REG_P (operands[0]) + && REGNO (operands[0]) == REGNO (operands[1])" + [(const_int 0)] + { + emit_note (NOTE_INSN_DELETED); + DONE; + } + [(set_attr "type" "neon_to_gp_q, untyped, neon_store1_one_lane_q")] +) + ;; Extract an element from the Advanced SIMD portion of the register. ;; We don't just reuse the aarch64-simd.md pattern because we don't -;; want any chnage in lane number on big-endian targets. +;; want any change in lane number on big-endian targets. (define_insn "*vec_extract_v128" [(set (match_operand: 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv") (vec_select: (match_operand:SVE_ALL 1 "register_operand" "w, w, w") (parallel [(match_operand:SI 2 "const_int_operand")])))] "TARGET_SVE - && IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (mode), 0, 15)" + && IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (mode), 1, 15)" { - operands[1] = gen_lowpart (mode, operands[1]); + operands[1] = gen_rtx_REG (mode, REGNO (operands[1])); switch (which_alternative) { case 0: