+2019-08-13 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/iterators.md (perm_insn): Include the "1"/"2" suffix.
+ (perm_hilo): Remove UNSPEC_ZIP*, UNSEPC_TRN* and UNSPEC_UZP*.
+ * config/aarch64/aarch64-simd.md
+ (aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>): Rename to..
+ (aarch64_<PERMUTE:perm_insn><mode>): ...this and remove perm_hilo
+ from the asm template.
+ * config/aarch64/aarch64-sve.md
+ (aarch64_<perm_insn><perm_hilo><PRED_ALL:mode>): Rename to..
+ (aarch64_<perm_insn><PRED_ALL:mode>): ...this and remove perm_hilo
+ from the asm template.
+ (aarch64_<perm_insn><perm_hilo><SVE_ALL:mode>): Rename to..
+ (aarch64_<perm_insn><SVE_ALL:mode>): ...this and remove perm_hilo
+ from the asm template.
+ * config/aarch64/aarch64-simd-builtins.def: Update comment.
+
2019-08-13 Martin Liska <mliska@suse.cz>
* value-prof.c (gimple_ic_transform): Add new line.
BUILTIN_VB (UNOP, rbit, 0)
/* Implemented by
- aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>. */
+ aarch64_<PERMUTE:perm_insn><mode>. */
BUILTIN_VALL (BINOP, zip1, 0)
BUILTIN_VALL (BINOP, zip2, 0)
BUILTIN_VALL (BINOP, uzp1, 0)
;; This instruction's pattern is generated directly by
;; aarch64_expand_vec_perm_const, so any changes to the pattern would
;; need corresponding changes there.
-(define_insn "aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>"
+(define_insn "aarch64_<PERMUTE:perm_insn><mode>"
[(set (match_operand:VALL_F16 0 "register_operand" "=w")
(unspec:VALL_F16 [(match_operand:VALL_F16 1 "register_operand" "w")
(match_operand:VALL_F16 2 "register_operand" "w")]
PERMUTE))]
"TARGET_SIMD"
- "<PERMUTE:perm_insn><PERMUTE:perm_hilo>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ "<PERMUTE:perm_insn>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
[(set_attr "type" "neon_permute<q>")]
)
;; Permutes that take half the elements from one vector and half the
;; elements from the other.
-(define_insn "aarch64_sve_<perm_insn><perm_hilo><mode>"
+(define_insn "aarch64_sve_<perm_insn><mode>"
[(set (match_operand:SVE_ALL 0 "register_operand" "=w")
(unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w")
(match_operand:SVE_ALL 2 "register_operand" "w")]
PERMUTE))]
"TARGET_SVE"
- "<perm_insn><perm_hilo>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+ "<perm_insn>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
)
;; Concatenate two vectors and extract a subvector. Note that the
;; Permutes that take half the elements from one vector and half the
;; elements from the other.
-(define_insn "*aarch64_sve_<perm_insn><perm_hilo><mode>"
+(define_insn "*aarch64_sve_<perm_insn><mode>"
[(set (match_operand:PRED_ALL 0 "register_operand" "=Upa")
(unspec:PRED_ALL [(match_operand:PRED_ALL 1 "register_operand" "Upa")
(match_operand:PRED_ALL 2 "register_operand" "Upa")]
PERMUTE))]
"TARGET_SVE"
- "<perm_insn><perm_hilo>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
+ "<perm_insn>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
)
;; =========================================================================
(UNSPEC_AUTIA1716 "12")
(UNSPEC_AUTIB1716 "14")])
-(define_int_attr perm_insn [(UNSPEC_ZIP1 "zip") (UNSPEC_ZIP2 "zip")
- (UNSPEC_TRN1 "trn") (UNSPEC_TRN2 "trn")
- (UNSPEC_UZP1 "uzp") (UNSPEC_UZP2 "uzp")])
+(define_int_attr perm_insn [(UNSPEC_ZIP1 "zip1") (UNSPEC_ZIP2 "zip2")
+ (UNSPEC_TRN1 "trn1") (UNSPEC_TRN2 "trn2")
+ (UNSPEC_UZP1 "uzp1") (UNSPEC_UZP2 "uzp2")])
; op code for REV instructions (size within which elements are reversed).
(define_int_attr rev_op [(UNSPEC_REV64 "64") (UNSPEC_REV32 "32")
(UNSPEC_REV16 "16")])
-(define_int_attr perm_hilo [(UNSPEC_ZIP1 "1") (UNSPEC_ZIP2 "2")
- (UNSPEC_TRN1 "1") (UNSPEC_TRN2 "2")
- (UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")
- (UNSPEC_UNPACKSHI "hi") (UNSPEC_UNPACKUHI "hi")
+(define_int_attr perm_hilo [(UNSPEC_UNPACKSHI "hi") (UNSPEC_UNPACKUHI "hi")
(UNSPEC_UNPACKSLO "lo") (UNSPEC_UNPACKULO "lo")])
;; Return true if the associated optab refers to the high-numbered lanes,