;; in combination with a separate predicate operand, e.g.
;;
;; (unspec [(match_operand:<VPRED> 1 "register_operand" "Upl")
-;; (sqrt:SVE_F 2 "register_operand" "w")]
+;; (sqrt:SVE_FULL_F 2 "register_operand" "w")]
;; ....)
;;
;; because (sqrt ...) can raise an exception for any lane, including
;; -------------------------------------------------------------------------
(define_expand "mov<mode>"
- [(set (match_operand:SVE_ALL 0 "nonimmediate_operand")
- (match_operand:SVE_ALL 1 "general_operand"))]
+ [(set (match_operand:SVE_FULL 0 "nonimmediate_operand")
+ (match_operand:SVE_FULL 1 "general_operand"))]
"TARGET_SVE"
{
/* Use the predicated load and store patterns where possible.
)
(define_expand "movmisalign<mode>"
- [(set (match_operand:SVE_ALL 0 "nonimmediate_operand")
- (match_operand:SVE_ALL 1 "general_operand"))]
+ [(set (match_operand:SVE_FULL 0 "nonimmediate_operand")
+ (match_operand:SVE_FULL 1 "general_operand"))]
"TARGET_SVE"
{
/* Equivalent to a normal move for our purpooses. */
;; during and after RA; before RA we want the predicated load and store
;; patterns to be used instead.
(define_insn "*aarch64_sve_mov<mode>_le"
- [(set (match_operand:SVE_ALL 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w")
- (match_operand:SVE_ALL 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))]
+ [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand" "=w, Utr, w, w")
+ (match_operand:SVE_FULL 1 "aarch64_sve_general_operand" "Utr, w, w, Dn"))]
"TARGET_SVE
&& (<MODE>mode == VNx16QImode || !BYTES_BIG_ENDIAN)
&& ((lra_in_progress || reload_completed)
;; Unpredicated moves (non-byte big-endian). Memory accesses require secondary
;; reloads.
(define_insn "*aarch64_sve_mov<mode>_be"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w")
- (match_operand:SVE_ALL 1 "aarch64_nonmemory_operand" "w, Dn"))]
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w, w")
+ (match_operand:SVE_FULL 1 "aarch64_nonmemory_operand" "w, Dn"))]
"TARGET_SVE && BYTES_BIG_ENDIAN && <MODE>mode != VNx16QImode"
"@
mov\t%0.d, %1.d
;; Note that this pattern is generated directly by aarch64_emit_sve_pred_move,
;; so changes to this pattern will need changes there as well.
(define_insn_and_split "@aarch64_pred_mov<mode>"
- [(set (match_operand:SVE_ALL 0 "nonimmediate_operand" "=w, w, m")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "nonimmediate_operand" "=w, w, m")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (match_operand:SVE_ALL 2 "nonimmediate_operand" "w, m, w")]
+ (match_operand:SVE_FULL 2 "nonimmediate_operand" "w, m, w")]
UNSPEC_PRED_X))]
"TARGET_SVE
&& (register_operand (operands[0], <MODE>mode)
;; for details. We use a special predicate for operand 2 to reduce
;; the number of patterns.
(define_insn_and_split "*aarch64_sve_mov<mode>_subreg_be"
- [(set (match_operand:SVE_ALL 0 "aarch64_sve_nonimmediate_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "aarch64_sve_nonimmediate_operand" "=w")
+ (unspec:SVE_FULL
[(match_operand:VNx16BI 1 "register_operand" "Upl")
(match_operand 2 "aarch64_any_register_operand" "w")]
UNSPEC_REV_SUBREG))]
;; This is equivalent to a subreg on little-endian targets but not for
;; big-endian; see the comment at the head of the file for details.
(define_expand "@aarch64_sve_reinterpret<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (unspec:SVE_ALL [(match_operand 1 "aarch64_any_register_operand")]
- UNSPEC_REINTERPRET))]
+ [(set (match_operand:SVE_FULL 0 "register_operand")
+ (unspec:SVE_FULL
+ [(match_operand 1 "aarch64_any_register_operand")]
+ UNSPEC_REINTERPRET))]
"TARGET_SVE"
{
if (!BYTES_BIG_ENDIAN)
;; A pattern for handling type punning on big-endian targets. We use a
;; special predicate for operand 1 to reduce the number of patterns.
(define_insn_and_split "*aarch64_sve_reinterpret<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL [(match_operand 1 "aarch64_any_register_operand" "w")]
- UNSPEC_REINTERPRET))]
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(match_operand 1 "aarch64_any_register_operand" "w")]
+ UNSPEC_REINTERPRET))]
"TARGET_SVE"
"#"
"&& reload_completed"
;; Predicated LD1.
(define_insn "maskload<mode><vpred>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:SVE_ALL 1 "memory_operand" "m")]
+ (match_operand:SVE_FULL 1 "memory_operand" "m")]
UNSPEC_LD1_SVE))]
"TARGET_SVE"
"ld1<Vesize>\t%0.<Vetype>, %2/z, %1"
;; Contiguous non-extending first-faulting or non-faulting loads.
(define_insn "@aarch64_ld<fn>f1<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:SVE_ALL 1 "aarch64_sve_ld<fn>f1_operand" "Ut<fn>")
+ (match_operand:SVE_FULL 1 "aarch64_sve_ld<fn>f1_operand" "Ut<fn>")
(reg:VNx16BI FFRT_REGNUM)]
SVE_LDFF1_LDNF1))]
"TARGET_SVE"
;; Predicated contiguous non-temporal load.
(define_insn "@aarch64_ldnt1<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:SVE_ALL 1 "memory_operand" "m")]
+ (match_operand:SVE_FULL 1 "memory_operand" "m")]
UNSPEC_LDNT1_SVE))]
"TARGET_SVE"
"ldnt1<Vesize>\t%0.<Vetype>, %2/z, %1"
;; Unpredicated gather loads.
(define_expand "gather_load<mode><v_int_equiv>"
- [(set (match_operand:SVE_SD 0 "register_operand")
- (unspec:SVE_SD
+ [(set (match_operand:SVE_FULL_SD 0 "register_operand")
+ (unspec:SVE_FULL_SD
[(match_dup 5)
(match_operand:DI 1 "aarch64_sve_gather_offset_<Vesize>")
(match_operand:<V_INT_EQUIV> 2 "register_operand")
;; Predicated gather loads for 32-bit elements. Operand 3 is true for
;; unsigned extension and false for signed extension.
(define_insn "mask_gather_load<mode><v_int_equiv>"
- [(set (match_operand:SVE_S 0 "register_operand" "=w, w, w, w, w, w")
- (unspec:SVE_S
+ [(set (match_operand:SVE_FULL_S 0 "register_operand" "=w, w, w, w, w, w")
+ (unspec:SVE_FULL_S
[(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_w" "Z, vgw, rk, rk, rk, rk")
(match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
;; Predicated gather loads for 64-bit elements. The value of operand 3
;; doesn't matter in this case.
(define_insn "mask_gather_load<mode><v_int_equiv>"
- [(set (match_operand:SVE_D 0 "register_operand" "=w, w, w, w")
- (unspec:SVE_D
+ [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w, w, w")
+ (unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_d" "Z, vgd, rk, rk")
(match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn "*mask_gather_load<mode><v_int_equiv>_sxtw"
- [(set (match_operand:SVE_D 0 "register_operand" "=w, w")
- (unspec:SVE_D
+ [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
+ (unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(unspec:VNx2DI
;; Likewise, but with the offset being zero-extended from 32 bits.
(define_insn "*mask_gather_load<mode><v_int_equiv>_uxtw"
- [(set (match_operand:SVE_D 0 "register_operand" "=w, w")
- (unspec:SVE_D
+ [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
+ (unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(and:VNx2DI
;; Predicated first-faulting gather loads for 32-bit elements. Operand
;; 3 is true for unsigned extension and false for signed extension.
(define_insn "@aarch64_ldff1_gather<mode>"
- [(set (match_operand:SVE_S 0 "register_operand" "=w, w, w, w, w, w")
- (unspec:SVE_S
+ [(set (match_operand:SVE_FULL_S 0 "register_operand" "=w, w, w, w, w, w")
+ (unspec:SVE_FULL_S
[(match_operand:VNx4BI 5 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_w" "Z, vgw, rk, rk, rk, rk")
(match_operand:VNx4SI 2 "register_operand" "w, w, w, w, w, w")
;; Predicated first-faulting gather loads for 64-bit elements. The value
;; of operand 3 doesn't matter in this case.
(define_insn "@aarch64_ldff1_gather<mode>"
- [(set (match_operand:SVE_D 0 "register_operand" "=w, w, w, w")
- (unspec:SVE_D
+ [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w, w, w")
+ (unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl, Upl, Upl")
(match_operand:DI 1 "aarch64_sve_gather_offset_d" "Z, vgd, rk, rk")
(match_operand:VNx2DI 2 "register_operand" "w, w, w, w")
;; Likewise, but with the offset being sign-extended from 32 bits.
(define_insn_and_rewrite "*aarch64_ldff1_gather<mode>_sxtw"
- [(set (match_operand:SVE_D 0 "register_operand" "=w, w")
- (unspec:SVE_D
+ [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
+ (unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(unspec:VNx2DI
;; Likewise, but with the offset being zero-extended from 32 bits.
(define_insn "*aarch64_ldff1_gather<mode>_uxtw"
- [(set (match_operand:SVE_D 0 "register_operand" "=w, w")
- (unspec:SVE_D
+ [(set (match_operand:SVE_FULL_D 0 "register_operand" "=w, w")
+ (unspec:SVE_FULL_D
[(match_operand:VNx2BI 5 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(and:VNx2DI
(define_insn "@aarch64_sve_prefetch<mode>"
[(prefetch (unspec:DI
[(match_operand:<VPRED> 0 "register_operand" "Upl")
- (match_operand:SVE_I 1 "aarch64_sve_prefetch_operand" "UP<Vesize>")
+ (match_operand:SVE_FULL_I 1 "aarch64_sve_prefetch_operand" "UP<Vesize>")
(match_operand:DI 2 "const_int_operand")]
UNSPEC_SVE_PREFETCH)
(match_operand:DI 3 "const_int_operand")
;; 6: the prefetch operator (an svprfop)
;; 7: the normal RTL prefetch rw flag
;; 8: the normal RTL prefetch locality value
-(define_insn "@aarch64_sve_gather_prefetch<SVE_I:mode><VNx4SI_ONLY:mode>"
+(define_insn "@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx4SI_ONLY:mode>"
[(prefetch (unspec:DI
[(match_operand:VNx4BI 0 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_I:Vesize>" "Z, vg<SVE_I:Vesize>, rk, rk, rk, rk")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_FULL_I:Vesize>" "Z, vg<SVE_FULL_I:Vesize>, rk, rk, rk, rk")
(match_operand:VNx4SI_ONLY 2 "register_operand" "w, w, w, w, w, w")
(match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_I:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
- (match_operand:SVE_I 5 "aarch64_simd_imm_zero")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_FULL_I:Vesize>" "Ui1, Ui1, Ui1, Ui1, i, i")
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
(match_operand:DI 6 "const_int_operand")]
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
"TARGET_SVE"
{
static const char *const insns[][2] = {
- "prf<SVE_I:Vesize>", "%0, [%2.s]",
- "prf<SVE_I:Vesize>", "%0, [%2.s, #%1]",
+ "prf<SVE_FULL_I:Vesize>", "%0, [%2.s]",
+ "prf<SVE_FULL_I:Vesize>", "%0, [%2.s, #%1]",
"prfb", "%0, [%1, %2.s, sxtw]",
"prfb", "%0, [%1, %2.s, uxtw]",
- "prf<SVE_I:Vesize>", "%0, [%1, %2.s, sxtw %p4]",
- "prf<SVE_I:Vesize>", "%0, [%1, %2.s, uxtw %p4]"
+ "prf<SVE_FULL_I:Vesize>", "%0, [%1, %2.s, sxtw %p4]",
+ "prf<SVE_FULL_I:Vesize>", "%0, [%1, %2.s, uxtw %p4]"
};
const char *const *parts = insns[which_alternative];
return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
;; Predicated gather prefetches for 64-bit elements. The value of operand 3
;; doesn't matter in this case.
-(define_insn "@aarch64_sve_gather_prefetch<SVE_I:mode><VNx2DI_ONLY:mode>"
+(define_insn "@aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>"
[(prefetch (unspec:DI
[(match_operand:VNx2BI 0 "register_operand" "Upl, Upl, Upl, Upl")
- (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_I:Vesize>" "Z, vg<SVE_I:Vesize>, rk, rk")
+ (match_operand:DI 1 "aarch64_sve_gather_offset_<SVE_FULL_I:Vesize>" "Z, vg<SVE_FULL_I:Vesize>, rk, rk")
(match_operand:VNx2DI_ONLY 2 "register_operand" "w, w, w, w")
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_I:Vesize>" "Ui1, Ui1, Ui1, i")
- (match_operand:SVE_I 5 "aarch64_simd_imm_zero")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_FULL_I:Vesize>" "Ui1, Ui1, Ui1, i")
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
(match_operand:DI 6 "const_int_operand")]
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
"TARGET_SVE"
{
static const char *const insns[][2] = {
- "prf<SVE_I:Vesize>", "%0, [%2.d]",
- "prf<SVE_I:Vesize>", "%0, [%2.d, #%1]",
+ "prf<SVE_FULL_I:Vesize>", "%0, [%2.d]",
+ "prf<SVE_FULL_I:Vesize>", "%0, [%2.d, #%1]",
"prfb", "%0, [%1, %2.d]",
- "prf<SVE_I:Vesize>", "%0, [%1, %2.d, lsl %p4]"
+ "prf<SVE_FULL_I:Vesize>", "%0, [%1, %2.d, lsl %p4]"
};
const char *const *parts = insns[which_alternative];
return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
)
;; Likewise, but with the offset being sign-extended from 32 bits.
-(define_insn_and_rewrite "*aarch64_sve_gather_prefetch<SVE_I:mode><VNx2DI_ONLY:mode>_sxtw"
+(define_insn_and_rewrite "*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_sxtw"
[(prefetch (unspec:DI
[(match_operand:VNx2BI 0 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(match_operand:VNx2DI 2 "register_operand" "w, w")))]
UNSPEC_PRED_X)
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_I:Vesize>" "Ui1, i")
- (match_operand:SVE_I 5 "aarch64_simd_imm_zero")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_FULL_I:Vesize>" "Ui1, i")
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
(match_operand:DI 6 "const_int_operand")]
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
{
static const char *const insns[][2] = {
"prfb", "%0, [%1, %2.d, sxtw]",
- "prf<SVE_I:Vesize>", "%0, [%1, %2.d, sxtw %p4]"
+ "prf<SVE_FULL_I:Vesize>", "%0, [%1, %2.d, sxtw %p4]"
};
const char *const *parts = insns[which_alternative];
return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
)
;; Likewise, but with the offset being zero-extended from 32 bits.
-(define_insn "*aarch64_sve_gather_prefetch<SVE_I:mode><VNx2DI_ONLY:mode>_uxtw"
+(define_insn "*aarch64_sve_gather_prefetch<SVE_FULL_I:mode><VNx2DI_ONLY:mode>_uxtw"
[(prefetch (unspec:DI
[(match_operand:VNx2BI 0 "register_operand" "Upl, Upl")
(match_operand:DI 1 "register_operand" "rk, rk")
(match_operand:VNx2DI 2 "register_operand" "w, w")
(match_operand:VNx2DI 9 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 3 "const_int_operand")
- (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_I:Vesize>" "Ui1, i")
- (match_operand:SVE_I 5 "aarch64_simd_imm_zero")
+ (match_operand:DI 4 "aarch64_gather_scale_operand_<SVE_FULL_I:Vesize>" "Ui1, i")
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
(match_operand:DI 6 "const_int_operand")]
UNSPEC_SVE_PREFETCH_GATHER)
(match_operand:DI 7 "const_int_operand")
{
static const char *const insns[][2] = {
"prfb", "%0, [%1, %2.d, uxtw]",
- "prf<SVE_I:Vesize>", "%0, [%1, %2.d, uxtw %p4]"
+ "prf<SVE_FULL_I:Vesize>", "%0, [%1, %2.d, uxtw %p4]"
};
const char *const *parts = insns[which_alternative];
return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
;; Predicated ST1.
(define_insn "maskstore<mode><vpred>"
- [(set (match_operand:SVE_ALL 0 "memory_operand" "+m")
- (unspec:SVE_ALL [(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:SVE_ALL 1 "register_operand" "w")
- (match_dup 0)]
- UNSPEC_ST1_SVE))]
+ [(set (match_operand:SVE_FULL 0 "memory_operand" "+m")
+ (unspec:SVE_FULL
+ [(match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:SVE_FULL 1 "register_operand" "w")
+ (match_dup 0)]
+ UNSPEC_ST1_SVE))]
"TARGET_SVE"
"st1<Vesize>\t%1.<Vetype>, %2, %0"
)
;; -------------------------------------------------------------------------
(define_insn "@aarch64_stnt1<mode>"
- [(set (match_operand:SVE_ALL 0 "memory_operand" "+m")
- (unspec:SVE_ALL [(match_operand:<VPRED> 2 "register_operand" "Upl")
- (match_operand:SVE_ALL 1 "register_operand" "w")
- (match_dup 0)]
- UNSPEC_STNT1_SVE))]
+ [(set (match_operand:SVE_FULL 0 "memory_operand" "+m")
+ (unspec:SVE_FULL
+ [(match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:SVE_FULL 1 "register_operand" "w")
+ (match_dup 0)]
+ UNSPEC_STNT1_SVE))]
"TARGET_SVE"
"stnt1<Vesize>\t%1.<Vetype>, %2, %0"
)
(match_operand:<V_INT_EQUIV> 1 "register_operand")
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_<Vesize>")
- (match_operand:SVE_SD 4 "register_operand")]
+ (match_operand:SVE_FULL_SD 4 "register_operand")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
{
(match_operand:VNx4SI 1 "register_operand" "w, w, w, w, w, w")
(match_operand:DI 2 "const_int_operand" "Ui1, Ui1, Z, Ui1, Z, Ui1")
(match_operand:DI 3 "aarch64_gather_scale_operand_w" "Ui1, Ui1, Ui1, Ui1, i, i")
- (match_operand:SVE_S 4 "register_operand" "w, w, w, w, w, w")]
+ (match_operand:SVE_FULL_S 4 "register_operand" "w, w, w, w, w, w")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
"@
(match_operand:VNx2DI 1 "register_operand" "w, w, w, w")
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_d" "Ui1, Ui1, Ui1, i")
- (match_operand:SVE_D 4 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_D 4 "register_operand" "w, w, w, w")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
"@
UNSPEC_PRED_X)
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_d" "Ui1, i")
- (match_operand:SVE_D 4 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_D 4 "register_operand" "w, w")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
"@
(match_operand:VNx2DI 6 "aarch64_sve_uxtw_immediate"))
(match_operand:DI 2 "const_int_operand")
(match_operand:DI 3 "aarch64_gather_scale_operand_d" "Ui1, i")
- (match_operand:SVE_D 4 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_D 4 "register_operand" "w, w")]
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
"@
(define_expand "vec_duplicate<mode>"
[(parallel
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (vec_duplicate:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand")
+ (vec_duplicate:SVE_FULL
(match_operand:<VEL> 1 "aarch64_sve_dup_operand")))
(clobber (scratch:VNx16BI))])]
"TARGET_SVE"
;; the load at the first opportunity in order to allow the PTRUE to be
;; optimized with surrounding code.
(define_insn_and_split "*vec_duplicate<mode>_reg"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w")
- (vec_duplicate:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w, w, w")
+ (vec_duplicate:SVE_FULL
(match_operand:<VEL> 1 "aarch64_sve_dup_operand" "r, w, Uty")))
(clobber (match_scratch:VNx16BI 2 "=X, X, Upl"))]
"TARGET_SVE"
;; Duplicate an Advanced SIMD vector to fill an SVE vector (LE version).
(define_insn "@aarch64_vec_duplicate_vq<mode>_le"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (vec_duplicate:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (vec_duplicate:SVE_FULL
(match_operand:<V128> 1 "register_operand" "w")))]
"TARGET_SVE && !BYTES_BIG_ENDIAN"
{
;; lsb into the register lsb. We therefore have to describe this in rtl
;; terms as a reverse of the V128 vector followed by a duplicate.
(define_insn "@aarch64_vec_duplicate_vq<mode>_be"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (vec_duplicate:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (vec_duplicate:SVE_FULL
(vec_select:<V128>
(match_operand:<V128> 1 "register_operand" "w")
(match_operand 2 "descending_int_parallel"))))]
;; be used by combine to optimize selects of a a vec_duplicate<mode>
;; with zero.
(define_insn "sve_ld1r<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 1 "register_operand" "Upl")
- (vec_duplicate:SVE_ALL
+ (vec_duplicate:SVE_FULL
(match_operand:<VEL> 2 "aarch64_sve_ld1r_operand" "Uty"))
- (match_operand:SVE_ALL 3 "aarch64_simd_imm_zero")]
+ (match_operand:SVE_FULL 3 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
"ld1r<Vesize>\t%0.<Vetype>, %1/z, %2"
;; Load 128 bits from memory under predicate control and duplicate to
;; fill a vector.
(define_insn "@aarch64_sve_ld1rq<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 2 "register_operand" "Upl")
(match_operand:<V128> 1 "aarch64_sve_ld1rq_operand" "UtQ")]
UNSPEC_LD1RQ))]
;; -------------------------------------------------------------------------
(define_expand "vec_init<mode><Vel>"
- [(match_operand:SVE_ALL 0 "register_operand")
+ [(match_operand:SVE_FULL 0 "register_operand")
(match_operand 1 "")]
"TARGET_SVE"
{
;; Shift an SVE vector left and insert a scalar into element 0.
(define_insn "vec_shl_insert_<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=?w, w, ??&w, ?&w")
- (unspec:SVE_ALL
- [(match_operand:SVE_ALL 1 "register_operand" "0, 0, w, w")
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=?w, w, ??&w, ?&w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "0, 0, w, w")
(match_operand:<VEL> 2 "aarch64_reg_or_zero" "rZ, w, rZ, w")]
UNSPEC_INSR))]
"TARGET_SVE"
;; -------------------------------------------------------------------------
(define_insn "vec_series<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w")
- (vec_series:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w")
+ (vec_series:SVE_FULL_I
(match_operand:<VEL> 1 "aarch64_sve_index_operand" "Usi, r, r")
(match_operand:<VEL> 2 "aarch64_sve_index_operand" "r, Usi, r")))]
"TARGET_SVE"
;; Optimize {x, x, x, x, ...} + {0, n, 2*n, 3*n, ...} if n is in range
;; of an INDEX instruction.
(define_insn "*vec_series<mode>_plus"
- [(set (match_operand:SVE_I 0 "register_operand" "=w")
- (plus:SVE_I
- (vec_duplicate:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (plus:SVE_FULL_I
+ (vec_duplicate:SVE_FULL_I
(match_operand:<VEL> 1 "register_operand" "r"))
- (match_operand:SVE_I 2 "immediate_operand")))]
+ (match_operand:SVE_FULL_I 2 "immediate_operand")))]
"TARGET_SVE && aarch64_check_zero_based_sve_index_immediate (operands[2])"
{
operands[2] = aarch64_check_zero_based_sve_index_immediate (operands[2]);
(define_expand "vec_extract<mode><Vel>"
[(set (match_operand:<VEL> 0 "register_operand")
(vec_select:<VEL>
- (match_operand:SVE_ALL 1 "register_operand")
+ (match_operand:SVE_FULL 1 "register_operand")
(parallel [(match_operand:SI 2 "nonmemory_operand")])))]
"TARGET_SVE"
{
(define_insn_and_split "*vec_extract<mode><Vel>_0"
[(set (match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv")
(vec_select:<VEL>
- (match_operand:SVE_ALL 1 "register_operand" "w, 0, w")
+ (match_operand:SVE_FULL 1 "register_operand" "w, 0, w")
(parallel [(const_int 0)])))]
"TARGET_SVE"
{
(define_insn "*vec_extract<mode><Vel>_v128"
[(set (match_operand:<VEL> 0 "aarch64_simd_nonimmediate_operand" "=r, w, Utv")
(vec_select:<VEL>
- (match_operand:SVE_ALL 1 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL 1 "register_operand" "w, w, w")
(parallel [(match_operand:SI 2 "const_int_operand")])))]
"TARGET_SVE
&& IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (<VEL>mode), 1, 15)"
(define_insn "*vec_extract<mode><Vel>_dup"
[(set (match_operand:<VEL> 0 "register_operand" "=w")
(vec_select:<VEL>
- (match_operand:SVE_ALL 1 "register_operand" "w")
+ (match_operand:SVE_FULL 1 "register_operand" "w")
(parallel [(match_operand:SI 2 "const_int_operand")])))]
"TARGET_SVE
&& IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (<VEL>mode), 16, 63)"
(define_insn "*vec_extract<mode><Vel>_ext"
[(set (match_operand:<VEL> 0 "register_operand" "=w, ?&w")
(vec_select:<VEL>
- (match_operand:SVE_ALL 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL 1 "register_operand" "0, w")
(parallel [(match_operand:SI 2 "const_int_operand")])))]
"TARGET_SVE && INTVAL (operands[2]) * GET_MODE_SIZE (<VEL>mode) >= 64"
{
[(set (match_operand:<VEL> 0 "register_operand" "=?r, w")
(unspec:<VEL>
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_ALL 2 "register_operand" "w, w")]
+ (match_operand:SVE_FULL 2 "register_operand" "w, w")]
LAST))]
"TARGET_SVE"
"@
(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 2 "nonmemory_operand")
;; Dummy operand to which we can attach the iterator.
- (reg:SVE_I V0_REGNUM)]
+ (reg:SVE_FULL_I V0_REGNUM)]
"TARGET_SVE"
{
rtx tmp = gen_reg_rtx (<MODE>mode);
;; Unpredicated integer unary arithmetic.
(define_expand "<optab><mode>2"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_dup 2)
- (SVE_INT_UNARY:SVE_I (match_operand:SVE_I 1 "register_operand"))]
+ (SVE_INT_UNARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{
;; Integer unary arithmetic predicated with a PTRUE.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl")
- (SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w"))]
+ (SVE_INT_UNARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
"<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
;; Predicated integer unary arithmetic with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand"))
- (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero")]
+ (SVE_INT_UNARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand"))
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated integer unary arithmetic, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w"))
+ (SVE_INT_UNARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, ?&w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (SVE_INT_UNARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w"))
- (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (SVE_INT_UNARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w, w"))
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[3])"
"@
;; Predicated integer unary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "w")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand" "w")]
SVE_INT_UNARY)]
UNSPEC_PRED_X))]
"TARGET_SVE && <elem_bits> >= <min_elem_bits>"
;; Predicated integer unary operations with merging.
(define_insn "@cond_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "w, w, w")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand" "w, w, w")]
SVE_INT_UNARY)
- (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE && <elem_bits> >= <min_elem_bits>"
"@
;; -------------------------------------------------------------------------
;; Predicated SXT[BHW].
-(define_insn "@aarch64_pred_sxt<SVE_HSDI:mode><SVE_PARTIAL:mode>"
- [(set (match_operand:SVE_HSDI 0 "register_operand" "=w")
- (unspec:SVE_HSDI
+(define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL:mode>"
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w")
+ (unspec:SVE_FULL_HSDI
[(match_operand:<VPRED> 1 "register_operand" "Upl")
- (sign_extend:SVE_HSDI
+ (sign_extend:SVE_FULL_HSDI
(truncate:SVE_PARTIAL
- (match_operand:SVE_HSDI 2 "register_operand" "w")))]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand" "w")))]
UNSPEC_PRED_X))]
"TARGET_SVE && (~<narrower_mask> & <self_mask>) == 0"
- "sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
+ "sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
)
;; Predicated SXT[BHW] with merging.
-(define_insn "@aarch64_cond_sxt<SVE_HSDI:mode><SVE_PARTIAL:mode>"
- [(set (match_operand:SVE_HSDI 0 "register_operand" "=w, ?&w, ?&w")
- (unspec:SVE_HSDI
+(define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL:mode>"
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w, ?&w")
+ (unspec:SVE_FULL_HSDI
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (sign_extend:SVE_HSDI
+ (sign_extend:SVE_FULL_HSDI
(truncate:SVE_PARTIAL
- (match_operand:SVE_HSDI 2 "register_operand" "w, w, w")))
- (match_operand:SVE_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")))
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE && (~<narrower_mask> & <self_mask>) == 0"
"@
- sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
- movprfx\t%0.<SVE_HSDI:Vetype>, %1/z, %2.<SVE_HSDI:Vetype>\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
- movprfx\t%0, %3\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
+ sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ movprfx\t%0, %3\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
[(set_attr "movprfx" "*,yes,yes")]
)
;; Match UXT[BHW] as a conditional AND of a constant, merging with the
;; first input.
(define_insn "*cond_uxt<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (and:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
+ (and:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_uxt_immediate"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; as early-clobber helps to make the instruction more regular to the
;; register allocator.
(define_insn "*cond_uxt<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, ?&w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (and:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
- (match_operand:SVE_I 3 "aarch64_sve_uxt_immediate"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (and:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_uxt_immediate"))
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
"@
;; Predicated logical inverse.
(define_expand "@aarch64_pred_cnot<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(unspec:<VPRED>
[(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
(eq:<VPRED>
- (match_operand:SVE_I 3 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")
(match_dup 4))]
UNSPEC_PRED_Z)
(match_dup 5)
)
(define_insn "*cnot<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (unspec:SVE_FULL_I
[(unspec:<VPRED>
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(match_operand:SI 5 "aarch64_sve_ptrue_flag")
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w")
- (match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
+ (match_operand:SVE_FULL_I 2 "register_operand" "w")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
- (match_operand:SVE_I 4 "aarch64_simd_imm_one")
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_one")
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated logical inverse with merging.
(define_expand "@cond_cnot<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_I
+ (unspec:SVE_FULL_I
[(unspec:<VPRED>
[(match_dup 4)
(const_int SVE_KNOWN_PTRUE)
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand")
(match_dup 5))]
UNSPEC_PRED_Z)
(match_dup 6)
(match_dup 5)]
UNSPEC_SEL)
- (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; Predicated logical inverse, merging with the first input.
(define_insn_and_rewrite "*cond_cnot<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
;; Logical inverse of operand 2 (as above).
- (unspec:SVE_I
+ (unspec:SVE_FULL_I
[(unspec:<VPRED>
[(match_operand 5)
(const_int SVE_KNOWN_PTRUE)
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
- (match_operand:SVE_I 4 "aarch64_simd_imm_one")
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_one")
(match_dup 3)]
UNSPEC_SEL)
(match_dup 2)]
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn_and_rewrite "*cond_cnot<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, ?&w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, ?&w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
;; Logical inverse of operand 2 (as above).
- (unspec:SVE_I
+ (unspec:SVE_FULL_I
[(unspec:<VPRED>
[(match_operand 5)
(const_int SVE_KNOWN_PTRUE)
(eq:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
- (match_operand:SVE_I 3 "aarch64_simd_imm_zero"))]
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_imm_zero"))]
UNSPEC_PRED_Z)
- (match_operand:SVE_I 4 "aarch64_simd_imm_one")
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_one")
(match_dup 3)]
UNSPEC_SEL)
- (match_operand:SVE_I 6 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_I 6 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[6])"
"@
;; Unpredicated unary operations that take an integer and return a float.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (unspec:SVE_F [(match_operand:<V_INT_EQUIV> 1 "register_operand" "w")]
- SVE_FP_UNARY_INT))]
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (unspec:SVE_FULL_F
+ [(match_operand:<V_INT_EQUIV> 1 "register_operand" "w")]
+ SVE_FP_UNARY_INT))]
"TARGET_SVE"
"<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>"
)
;; Unpredicated floating-point unary operations.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (unspec:SVE_F [(match_operand:SVE_F 1 "register_operand" "w")]
- SVE_FP_UNARY))]
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 1 "register_operand" "w")]
+ SVE_FP_UNARY))]
"TARGET_SVE"
"<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>"
)
;; Unpredicated floating-point unary operations.
(define_expand "<optab><mode>2"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_dup 2)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_F 1 "register_operand")]
+ (match_operand:SVE_FULL_F 1 "register_operand")]
SVE_COND_FP_UNARY))]
"TARGET_SVE"
{
;; Predicated floating-point unary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
SVE_COND_FP_UNARY))]
"TARGET_SVE"
"<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
;; Predicated floating-point unary arithmetic with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FP_UNARY)
- (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated floating-point unary arithmetic, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 3)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")]
SVE_COND_FP_UNARY)
(match_dup 2)]
UNSPEC_SEL))]
;; as earlyclobber helps to make the instruction more regular to the
;; register allocator.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, ?&w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, ?&w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
SVE_COND_FP_UNARY)
- (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[3])
;; Unpredicated integer binary operations that have an immediate form.
(define_expand "<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_dup 3)
- (SVE_INT_BINARY_IMM:SVE_I
- (match_operand:SVE_I 1 "register_operand")
- (match_operand:SVE_I 2 "aarch64_sve_<sve_imm_con>_operand"))]
+ (SVE_INT_BINARY_IMM:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_<sve_imm_con>_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{
;; and would make the instruction seem less uniform to the register
;; allocator.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
- (SVE_INT_BINARY_IMM:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, 0, w, w")
- (match_operand:SVE_I 3 "aarch64_sve_<sve_imm_con>_operand" "<sve_imm_con>, w, <sve_imm_con>, w"))]
+ (SVE_INT_BINARY_IMM:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "%0, 0, w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_<sve_imm_con>_operand" "<sve_imm_con>, w, <sve_imm_con>, w"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
"@
; the unnecessary PTRUE.
"&& reload_completed
&& !register_operand (operands[3], <MODE>mode)"
- [(set (match_dup 0) (SVE_INT_BINARY_IMM:SVE_I (match_dup 2) (match_dup 3)))]
+ [(set (match_dup 0)
+ (SVE_INT_BINARY_IMM:SVE_FULL_I (match_dup 2) (match_dup 3)))]
""
[(set_attr "movprfx" "*,*,yes,yes")]
)
;; These are generated by splitting a predicated instruction whose
;; predicate is unused.
(define_insn "*post_ra_<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (SVE_INT_BINARY_IMM:SVE_I
- (match_operand:SVE_I 1 "register_operand" "0, w")
- (match_operand:SVE_I 2 "aarch64_sve_<sve_imm_con>_immediate")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (SVE_INT_BINARY_IMM:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_<sve_imm_con>_immediate")))]
"TARGET_SVE && reload_completed"
"@
<sve_int_op>\t%0.<Vetype>, %0.<Vetype>, #%<sve_imm_prefix>2
;; Predicated integer operations with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "<sve_pred_int_rhs2_operand>"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
+ (SVE_INT_BINARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "<sve_pred_int_rhs2_operand>"))
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated integer operations, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
+ (SVE_INT_BINARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer operations, merging with the second input.
(define_insn "*cond_<optab><mode>_3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "0, w"))
+ (SVE_INT_BINARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "0, w"))
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer operations, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
- (SVE_INT_BINARY:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (SVE_INT_BINARY:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w, w, w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w, w, w"))
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; -------------------------------------------------------------------------
(define_insn "add<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?w, ?w, w")
- (plus:SVE_I
- (match_operand:SVE_I 1 "register_operand" "%0, 0, 0, w, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_add_operand" "vsa, vsn, vsi, vsa, vsn, w")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w, ?w, ?w, w")
+ (plus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand" "%0, 0, 0, w, w, w")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_add_operand" "vsa, vsn, vsi, vsa, vsn, w")))]
"TARGET_SVE"
"@
add\t%0.<Vetype>, %0.<Vetype>, #%D2
;; -------------------------------------------------------------------------
(define_insn "sub<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
- (minus:SVE_I
- (match_operand:SVE_I 1 "aarch64_sve_arith_operand" "w, vsa, vsa")
- (match_operand:SVE_I 2 "register_operand" "w, 0, w")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w")
+ (minus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "aarch64_sve_arith_operand" "w, vsa, vsa")
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, 0, w")))]
"TARGET_SVE"
"@
sub\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>
;; An unshifted and unscaled ADR. This is functionally equivalent to an ADD,
;; but the svadrb intrinsics should preserve the user's choice.
(define_insn "@aarch64_adr<mode>"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=w")
- (unspec:SVE_SDI
- [(match_operand:SVE_SDI 1 "register_operand" "w")
- (match_operand:SVE_SDI 2 "register_operand" "w")]
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w")
+ (unspec:SVE_FULL_SDI
+ [(match_operand:SVE_FULL_SDI 1 "register_operand" "w")
+ (match_operand:SVE_FULL_SDI 2 "register_operand" "w")]
UNSPEC_ADR))]
"TARGET_SVE"
"adr\t%0.<Vetype>, [%1.<Vetype>, %2.<Vetype>]"
;; ADR with a nonzero shift.
(define_expand "@aarch64_adr<mode>_shift"
- [(set (match_operand:SVE_SDI 0 "register_operand")
- (plus:SVE_SDI
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
+ (plus:SVE_FULL_SDI
+ (unspec:SVE_FULL_SDI
[(match_dup 4)
- (ashift:SVE_SDI
- (match_operand:SVE_SDI 2 "register_operand")
- (match_operand:SVE_SDI 3 "const_1_to_3_operand"))]
+ (ashift:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "const_1_to_3_operand"))]
UNSPEC_PRED_X)
- (match_operand:SVE_SDI 1 "register_operand")))]
+ (match_operand:SVE_FULL_SDI 1 "register_operand")))]
"TARGET_SVE"
{
operands[4] = CONSTM1_RTX (<VPRED>mode);
)
(define_insn_and_rewrite "*aarch64_adr<mode>_shift"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=w")
- (plus:SVE_SDI
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w")
+ (plus:SVE_FULL_SDI
+ (unspec:SVE_FULL_SDI
[(match_operand 4)
- (ashift:SVE_SDI
- (match_operand:SVE_SDI 2 "register_operand" "w")
- (match_operand:SVE_SDI 3 "const_1_to_3_operand"))]
+ (ashift:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 2 "register_operand" "w")
+ (match_operand:SVE_FULL_SDI 3 "const_1_to_3_operand"))]
UNSPEC_PRED_X)
- (match_operand:SVE_SDI 1 "register_operand" "w")))]
+ (match_operand:SVE_FULL_SDI 1 "register_operand" "w")))]
"TARGET_SVE"
"adr\t%0.<Vetype>, [%1.<Vetype>, %2.<Vetype>, lsl %3]"
"&& !CONSTANT_P (operands[4])"
;; Unpredicated integer absolute difference.
(define_expand "<su>abd<mode>_3"
- [(use (match_operand:SVE_I 0 "register_operand"))
- (USMAX:SVE_I (match_operand:SVE_I 1 "register_operand")
- (match_operand:SVE_I 2 "register_operand"))]
+ [(use (match_operand:SVE_FULL_I 0 "register_operand"))
+ (USMAX:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand"))]
"TARGET_SVE"
{
rtx pred = aarch64_ptrue_reg (<VPRED>mode);
;; Predicated integer absolute difference.
(define_insn "@aarch64_pred_<su>abd<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (minus:SVE_I
- (USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "%0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (<max_opp>:SVE_I
+ (minus:SVE_FULL_I
+ (USMAX:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "%0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (<max_opp>:SVE_FULL_I
(match_dup 2)
(match_dup 3)))]
UNSPEC_PRED_X))]
)
(define_expand "@aarch64_cond_<su>abd<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (minus:SVE_I
- (unspec:SVE_I
+ (minus:SVE_FULL_I
+ (unspec:SVE_FULL_I
[(match_dup 1)
- (USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "register_operand"))]
+ (USMAX:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand"))]
UNSPEC_PRED_X)
- (unspec:SVE_I
+ (unspec:SVE_FULL_I
[(match_dup 1)
- (<max_opp>:SVE_I
+ (<max_opp>:SVE_FULL_I
(match_dup 2)
(match_dup 3))]
UNSPEC_PRED_X))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; Predicated integer absolute difference, merging with the first input.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (minus:SVE_I
- (unspec:SVE_I
+ (minus:SVE_FULL_I
+ (unspec:SVE_FULL_I
[(match_operand 4)
- (USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))]
+ (USMAX:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))]
UNSPEC_PRED_X)
- (unspec:SVE_I
+ (unspec:SVE_FULL_I
[(match_operand 5)
- (<max_opp>:SVE_I
+ (<max_opp>:SVE_FULL_I
(match_dup 2)
(match_dup 3))]
UNSPEC_PRED_X))
;; Predicated integer absolute difference, merging with an independent value.
(define_insn_and_rewrite "*aarch64_cond_<su>abd<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
- (minus:SVE_I
- (unspec:SVE_I
+ (minus:SVE_FULL_I
+ (unspec:SVE_FULL_I
[(match_operand 5)
- (USMAX:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, 0, w, w, w"))]
+ (USMAX:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w, w, w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, 0, w, w, w"))]
UNSPEC_PRED_X)
- (unspec:SVE_I
+ (unspec:SVE_FULL_I
[(match_operand 6)
- (<max_opp>:SVE_I
+ (<max_opp>:SVE_FULL_I
(match_dup 2)
(match_dup 3))]
UNSPEC_PRED_X))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; Unpredicated saturating signed addition and subtraction.
(define_insn "@aarch64_<su_optab><optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w, ?&w, w")
- (SBINQOPS:SVE_I
- (match_operand:SVE_I 1 "register_operand" "0, 0, w, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_sqadd_operand" "vsQ, vsS, vsQ, vsS, w")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w, w")
+ (SBINQOPS:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand" "0, 0, w, w, w")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_sqadd_operand" "vsQ, vsS, vsQ, vsS, w")))]
"TARGET_SVE"
"@
<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
;; Unpredicated saturating unsigned addition and subtraction.
(define_insn "@aarch64_<su_optab><optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w, w")
- (UBINQOPS:SVE_I
- (match_operand:SVE_I 1 "register_operand" "0, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_arith_operand" "vsa, vsa, w")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, w")
+ (UBINQOPS:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand" "0, w, w")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_arith_operand" "vsa, vsa, w")))]
"TARGET_SVE"
"@
<binqops_op>\t%0.<Vetype>, %0.<Vetype>, #%D2
;; Unpredicated highpart multiplication.
(define_expand "<su>mul<mode>3_highpart"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_dup 3)
- (unspec:SVE_I [(match_operand:SVE_I 1 "register_operand")
- (match_operand:SVE_I 2 "register_operand")]
- MUL_HIGHPART)]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "register_operand")]
+ MUL_HIGHPART)]
UNSPEC_PRED_X))]
"TARGET_SVE"
{
;; Predicated highpart multiplication.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_I [(match_operand:SVE_I 2 "register_operand" "%0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")]
- MUL_HIGHPART)]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand" "%0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
+ MUL_HIGHPART)]
UNSPEC_PRED_X))]
"TARGET_SVE"
"@
;; Predicated highpart multiplications with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "register_operand")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "register_operand")]
MUL_HIGHPART)
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; Predicated highpart multiplications, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
MUL_HIGHPART)
(match_dup 2)]
UNSPEC_SEL))]
;; Predicated highpart multiplications, merging with zero.
(define_insn "*cond_<optab><mode>_z"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "%0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand" "%0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w")]
MUL_HIGHPART)
- (match_operand:SVE_I 4 "aarch64_simd_imm_zero")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
"@
;; Unpredicated integer division.
(define_expand "<optab><mode>3"
- [(set (match_operand:SVE_SDI 0 "register_operand")
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
+ (unspec:SVE_FULL_SDI
[(match_dup 3)
- (SVE_INT_BINARY_SD:SVE_SDI
- (match_operand:SVE_SDI 1 "register_operand")
- (match_operand:SVE_SDI 2 "register_operand"))]
+ (SVE_INT_BINARY_SD:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 1 "register_operand")
+ (match_operand:SVE_FULL_SDI 2 "register_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{
;; Integer division predicated with a PTRUE.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=w, w, ?&w")
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, w, ?&w")
+ (unspec:SVE_FULL_SDI
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (SVE_INT_BINARY_SD:SVE_SDI
- (match_operand:SVE_SDI 2 "register_operand" "0, w, w")
- (match_operand:SVE_SDI 3 "register_operand" "w, 0, w"))]
+ (SVE_INT_BINARY_SD:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w")
+ (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
"@
;; Predicated integer division with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_SDI 0 "register_operand")
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand")
+ (unspec:SVE_FULL_SDI
[(match_operand:<VPRED> 1 "register_operand")
- (SVE_INT_BINARY_SD:SVE_SDI
- (match_operand:SVE_SDI 2 "register_operand")
- (match_operand:SVE_SDI 3 "register_operand"))
- (match_operand:SVE_SDI 4 "aarch64_simd_reg_or_zero")]
+ (SVE_INT_BINARY_SD:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 2 "register_operand")
+ (match_operand:SVE_FULL_SDI 3 "register_operand"))
+ (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated integer division, merging with the first input.
(define_insn "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w")
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_SDI
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (SVE_INT_BINARY_SD:SVE_SDI
- (match_operand:SVE_SDI 2 "register_operand" "0, w")
- (match_operand:SVE_SDI 3 "register_operand" "w, w"))
+ (SVE_INT_BINARY_SD:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_SDI 3 "register_operand" "w, w"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer division, merging with the second input.
(define_insn "*cond_<optab><mode>_3"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w")
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_SDI
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (SVE_INT_BINARY_SD:SVE_SDI
- (match_operand:SVE_SDI 2 "register_operand" "w, w")
- (match_operand:SVE_SDI 3 "register_operand" "0, w"))
+ (SVE_INT_BINARY_SD:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w"))
(match_dup 3)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer division, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=&w, &w, &w, &w, ?&w")
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_SDI
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
- (SVE_INT_BINARY_SD:SVE_SDI
- (match_operand:SVE_SDI 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_SDI 3 "register_operand" "w, 0, w, w, w"))
- (match_operand:SVE_SDI 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (SVE_INT_BINARY_SD:SVE_FULL_SDI
+ (match_operand:SVE_FULL_SDI 2 "register_operand" "0, w, w, w, w")
+ (match_operand:SVE_FULL_SDI 3 "register_operand" "w, 0, w, w, w"))
+ (match_operand:SVE_FULL_SDI 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; Unpredicated integer binary logical operations.
(define_insn "<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?w, w")
- (LOGICAL:SVE_I
- (match_operand:SVE_I 1 "register_operand" "%0, w, w")
- (match_operand:SVE_I 2 "aarch64_sve_logical_operand" "vsl, vsl, w")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?w, w")
+ (LOGICAL:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand" "%0, w, w")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_logical_operand" "vsl, vsl, w")))]
"TARGET_SVE"
"@
<logical>\t%0.<Vetype>, %0.<Vetype>, #%C2
;; Unpredicated BIC.
(define_expand "@aarch64_bic<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (and:SVE_I
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (and:SVE_FULL_I
+ (unspec:SVE_FULL_I
[(match_dup 3)
- (not:SVE_I (match_operand:SVE_I 2 "register_operand"))]
+ (not:SVE_FULL_I (match_operand:SVE_FULL_I 2 "register_operand"))]
UNSPEC_PRED_X)
- (match_operand:SVE_I 1 "register_operand")))]
+ (match_operand:SVE_FULL_I 1 "register_operand")))]
"TARGET_SVE"
{
operands[3] = CONSTM1_RTX (<VPRED>mode);
;; Predicated BIC.
(define_insn_and_rewrite "*bic<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w")
- (and:SVE_I
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (and:SVE_FULL_I
+ (unspec:SVE_FULL_I
[(match_operand 3)
- (not:SVE_I (match_operand:SVE_I 2 "register_operand" "w"))]
+ (not:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w"))]
UNSPEC_PRED_X)
- (match_operand:SVE_I 1 "register_operand" "w")))]
+ (match_operand:SVE_FULL_I 1 "register_operand" "w")))]
"TARGET_SVE"
"bic\t%0.d, %1.d, %2.d"
"&& !CONSTANT_P (operands[3])"
;; Predicated BIC with merging.
(define_expand "@cond_bic<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (and:SVE_I
- (not:SVE_I (match_operand:SVE_I 3 "register_operand"))
- (match_operand:SVE_I 2 "register_operand"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
+ (and:SVE_FULL_I
+ (not:SVE_FULL_I (match_operand:SVE_FULL_I 3 "register_operand"))
+ (match_operand:SVE_FULL_I 2 "register_operand"))
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated integer BIC, merging with the first input.
(define_insn "*cond_bic<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (and:SVE_I
- (not:SVE_I (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 2 "register_operand" "0, w"))
+ (and:SVE_FULL_I
+ (not:SVE_FULL_I
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer BIC, merging with an independent value.
(define_insn_and_rewrite "*cond_bic<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
- (and:SVE_I
- (not:SVE_I (match_operand:SVE_I 3 "register_operand" "w, w, w, w"))
- (match_operand:SVE_I 2 "register_operand" "0, w, w, w"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (and:SVE_FULL_I
+ (not:SVE_FULL_I
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w, w, w"))
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w, w, w"))
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
"@
;; Unpredicated shift by a scalar, which expands into one of the vector
;; shifts below.
(define_expand "<ASHIFT:optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand")
- (ASHIFT:SVE_I (match_operand:SVE_I 1 "register_operand")
- (match_operand:<VEL> 2 "general_operand")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (ASHIFT:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:<VEL> 2 "general_operand")))]
"TARGET_SVE"
{
rtx amount;
;; Unpredicated shift by a vector.
(define_expand "v<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_dup 3)
- (ASHIFT:SVE_I
- (match_operand:SVE_I 1 "register_operand")
- (match_operand:SVE_I 2 "aarch64_sve_<lr>shift_operand"))]
+ (ASHIFT:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "aarch64_sve_<lr>shift_operand"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
{
;; likely to gain much and would make the instruction seem less uniform
;; to the register allocator.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
- (ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, w"))]
+ (ASHIFT:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, 0, w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, w"))]
UNSPEC_PRED_X))]
"TARGET_SVE"
"@
movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
"&& reload_completed
&& !register_operand (operands[3], <MODE>mode)"
- [(set (match_dup 0) (ASHIFT:SVE_I (match_dup 2) (match_dup 3)))]
+ [(set (match_dup 0) (ASHIFT:SVE_FULL_I (match_dup 2) (match_dup 3)))]
""
[(set_attr "movprfx" "*,*,*,yes")]
)
;; These are generated by splitting a predicated instruction whose
;; predicate is unused.
(define_insn "*post_ra_v<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w")
- (ASHIFT:SVE_I
- (match_operand:SVE_I 1 "register_operand" "w")
- (match_operand:SVE_I 2 "aarch64_simd_<lr>shift_imm")))]
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (ASHIFT:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand" "w")
+ (match_operand:SVE_FULL_I 2 "aarch64_simd_<lr>shift_imm")))]
"TARGET_SVE && reload_completed"
"<shift>\t%0.<Vetype>, %1.<Vetype>, #%2"
)
;; Predicated integer shift, merging with the first input.
(define_insn "*cond_<optab><mode>_2_const"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
+ (ASHIFT:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_<lr>shift_imm"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer shift, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any_const"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, &w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, w, w")
- (match_operand:SVE_I 3 "aarch64_simd_<lr>shift_imm"))
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (ASHIFT:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_<lr>shift_imm"))
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
"@
;; Unpredicated shifts of narrow elements by 64-bit amounts.
(define_insn "@aarch64_sve_<sve_int_op><mode>"
- [(set (match_operand:SVE_BHSI 0 "register_operand" "=w")
- (unspec:SVE_BHSI
- [(match_operand:SVE_BHSI 1 "register_operand" "w")
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=w")
+ (unspec:SVE_FULL_BHSI
+ [(match_operand:SVE_FULL_BHSI 1 "register_operand" "w")
(match_operand:VNx2DI 2 "register_operand" "w")]
SVE_SHIFT_WIDE))]
"TARGET_SVE"
;; Merging predicated shifts of narrow elements by 64-bit amounts.
(define_expand "@cond_<sve_int_op><mode>"
- [(set (match_operand:SVE_BHSI 0 "register_operand")
- (unspec:SVE_BHSI
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand")
+ (unspec:SVE_FULL_BHSI
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_BHSI
- [(match_operand:SVE_BHSI 2 "register_operand")
+ (unspec:SVE_FULL_BHSI
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand")
(match_operand:VNx2DI 3 "register_operand")]
SVE_SHIFT_WIDE)
- (match_operand:SVE_BHSI 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_BHSI 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated shifts of narrow elements by 64-bit amounts, merging with
;; the first input.
(define_insn "*cond_<sve_int_op><mode>_m"
- [(set (match_operand:SVE_BHSI 0 "register_operand" "=w, ?&w")
- (unspec:SVE_BHSI
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_BHSI
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_BHSI
- [(match_operand:SVE_BHSI 2 "register_operand" "0, w")
+ (unspec:SVE_FULL_BHSI
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
(match_operand:VNx2DI 3 "register_operand" "w, w")]
SVE_SHIFT_WIDE)
(match_dup 2)]
;; Predicated shifts of narrow elements by 64-bit amounts, merging with zero.
(define_insn "*cond_<sve_int_op><mode>_z"
- [(set (match_operand:SVE_BHSI 0 "register_operand" "=&w, &w")
- (unspec:SVE_BHSI
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=&w, &w")
+ (unspec:SVE_FULL_BHSI
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_BHSI
- [(match_operand:SVE_BHSI 2 "register_operand" "0, w")
+ (unspec:SVE_FULL_BHSI
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand" "0, w")
(match_operand:VNx2DI 3 "register_operand" "w, w")]
SVE_SHIFT_WIDE)
- (match_operand:SVE_BHSI 4 "aarch64_simd_imm_zero")]
+ (match_operand:SVE_FULL_BHSI 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
"@
;; Unpredicated ASRD.
(define_expand "sdiv_pow2<mode>3"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_dup 3)
- (unspec:SVE_I
- [(match_operand:SVE_I 1 "register_operand")
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 1 "register_operand")
(match_operand 2 "aarch64_simd_rshift_imm")]
UNSPEC_ASRD)
(match_dup 1)]
;; Predicated ASRD with merging.
(define_expand "@cond_asrd<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "aarch64_simd_rshift_imm")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm")]
UNSPEC_ASRD)
- (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated ASRD, merging with the first input.
(define_insn "*cond_asrd<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "aarch64_simd_rshift_imm")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm")]
UNSPEC_ASRD)
(match_dup 2)]
UNSPEC_SEL))]
;; Predicated ASRD, merging with zero.
(define_insn "*cond_asrd<mode>_z"
- [(set (match_operand:SVE_I 0 "register_operand" "=w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl")
- (unspec:SVE_I
- [(match_operand:SVE_I 2 "register_operand" "w")
- (match_operand:SVE_I 3 "aarch64_simd_rshift_imm")]
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 2 "register_operand" "w")
+ (match_operand:SVE_FULL_I 3 "aarch64_simd_rshift_imm")]
UNSPEC_ASRD)
- (match_operand:SVE_I 4 "aarch64_simd_imm_zero")]
+ (match_operand:SVE_FULL_I 4 "aarch64_simd_imm_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
"movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;asrd\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3"
;; Unpredicated floating-point binary operations that take an integer as
;; their second operand.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (unspec:SVE_F [(match_operand:SVE_F 1 "register_operand" "w")
- (match_operand:<V_INT_EQUIV> 2 "register_operand" "w")]
- SVE_FP_BINARY_INT))]
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 1 "register_operand" "w")
+ (match_operand:<V_INT_EQUIV> 2 "register_operand" "w")]
+ SVE_FP_BINARY_INT))]
"TARGET_SVE"
"<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
)
;; Predicated floating-point binary operations that take an integer
;; as their second operand.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
(match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
SVE_COND_FP_BINARY_INT))]
"TARGET_SVE"
;; Predicated floating-point binary operations with merging, taking an
;; integer as their second operand.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")
(match_operand:<V_INT_EQUIV> 3 "register_operand")]
SVE_COND_FP_BINARY_INT)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated floating-point binary operations that take an integer as their
;; second operand, with inactive lanes coming from the first operand.
(define_insn_and_rewrite "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
(match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w")]
SVE_COND_FP_BINARY_INT)
(match_dup 2)]
;; their second operand, with the values of inactive lanes being distinct
;; from the other inputs.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w, w, w")
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w")
(match_operand:<V_INT_EQUIV> 3 "register_operand" "w, w, w, w")]
SVE_COND_FP_BINARY_INT)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; These are generated by splitting a predicated instruction whose
;; predicate is unused.
(define_insn "*post_ra_<sve_fp_op><mode>3"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (SVE_UNPRED_FP_BINARY:SVE_F
- (match_operand:SVE_F 1 "register_operand" "w")
- (match_operand:SVE_F 2 "register_operand" "w")))]
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (SVE_UNPRED_FP_BINARY:SVE_FULL_F
+ (match_operand:SVE_FULL_F 1 "register_operand" "w")
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")))]
"TARGET_SVE && reload_completed"
"<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>")
;; Unpredicated floating-point binary operations.
(define_insn "@aarch64_sve_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (unspec:SVE_F [(match_operand:SVE_F 1 "register_operand" "w")
- (match_operand:SVE_F 2 "register_operand" "w")]
- SVE_FP_BINARY))]
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 1 "register_operand" "w")
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
+ SVE_FP_BINARY))]
"TARGET_SVE"
"<sve_fp_op>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
)
;; Unpredicated floating-point binary operations that need to be predicated
;; for SVE.
(define_expand "<optab><mode>3"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_dup 3)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_F 1 "<sve_pred_fp_rhs1_operand>")
- (match_operand:SVE_F 2 "<sve_pred_fp_rhs2_operand>")]
+ (match_operand:SVE_FULL_F 1 "<sve_pred_fp_rhs1_operand>")
+ (match_operand:SVE_FULL_F 2 "<sve_pred_fp_rhs2_operand>")]
SVE_COND_FP_BINARY))]
"TARGET_SVE"
{
;; Predicated floating-point binary operations that have no immediate forms.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w, w")
- (match_operand:SVE_F 3 "register_operand" "w, 0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w")]
SVE_COND_FP_BINARY_REG))]
"TARGET_SVE"
"@
;; Predicated floating-point operations with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "<sve_pred_fp_rhs1_operand>")
- (match_operand:SVE_F 3 "<sve_pred_fp_rhs2_operand>")]
+ (match_operand:SVE_FULL_F 2 "<sve_pred_fp_rhs1_operand>")
+ (match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_operand>")]
SVE_COND_FP_BINARY)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated floating-point operations, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
SVE_COND_FP_BINARY)
(match_dup 2)]
UNSPEC_SEL))]
;; Same for operations that take a 1-bit constant.
(define_insn_and_rewrite "*cond_<optab><mode>_2_const"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
- (match_operand:SVE_F 3 "<sve_pred_fp_rhs2_immediate>")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
(match_dup 2)]
UNSPEC_SEL))]
;; Predicated floating-point operations, merging with the second input.
(define_insn_and_rewrite "*cond_<optab><mode>_3"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w")
- (match_operand:SVE_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
SVE_COND_FP_BINARY)
(match_dup 3)]
UNSPEC_SEL))]
;; Predicated floating-point operations, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
SVE_COND_FP_BINARY)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; Same for operations that take a 1-bit constant.
(define_insn_and_rewrite "*cond_<optab><mode>_any_const"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w, w")
- (match_operand:SVE_F 3 "<sve_pred_fp_rhs2_immediate>")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_F 3 "<sve_pred_fp_rhs2_immediate>")]
SVE_COND_FP_BINARY_I1)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; Predicated floating-point addition.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness" "i, i, Z, Ui1, i, i, Ui1")
- (match_operand:SVE_F 2 "register_operand" "%0, 0, w, 0, w, w, w")
- (match_operand:SVE_F 3 "aarch64_sve_float_arith_with_sub_operand" "vsA, vsN, w, w, vsA, vsN, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, 0, w, w, w")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_operand" "vsA, vsN, w, w, vsA, vsN, w")]
SVE_COND_FP_ADD))]
"TARGET_SVE"
"@
"&& reload_completed
&& register_operand (operands[3], <MODE>mode)
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
- [(set (match_dup 0) (plus:SVE_F (match_dup 2) (match_dup 3)))]
+ [(set (match_dup 0) (plus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
[(set_attr "movprfx" "*,*,*,*,yes,yes,yes")]
)
;; Predicated floating-point addition of a constant, merging with the
;; first input.
(define_insn_and_rewrite "*cond_add<mode>_2_const"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?w, ?w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w, ?w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, 0, w, w")
- (match_operand:SVE_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, 0, w, w")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN")]
UNSPEC_COND_FADD)
(match_dup 2)]
UNSPEC_SEL))]
;; Predicated floating-point addition of a constant, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_add<mode>_any_const"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?w, ?w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w, w, w, w, w")
- (match_operand:SVE_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w, w, w")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_arith_with_sub_immediate" "vsA, vsN, vsA, vsN, vsA, vsN")]
UNSPEC_COND_FADD)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, 0, w, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; Predicated FCADD.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
SVE_COND_FCADD))]
"TARGET_SVE"
"@
;; Predicated FCADD with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "register_operand")
- (match_operand:SVE_F 3 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FCADD)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated FCADD, merging with the first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
SVE_COND_FCADD)
(match_dup 2)]
UNSPEC_SEL))]
;; Predicated FCADD, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, 0, w, w")
- (match_operand:SVE_F 3 "register_operand" "w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, 0, w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")]
SVE_COND_FCADD)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; Predicated floating-point subtraction.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, w, ?&w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, Ui1, i, Ui1")
- (match_operand:SVE_F 2 "aarch64_sve_float_arith_operand" "vsA, w, 0, w, vsA, w")
- (match_operand:SVE_F 3 "register_operand" "0, w, w, 0, w, w")]
+ (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_operand" "vsA, w, 0, w, vsA, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "0, w, w, 0, w, w")]
SVE_COND_FP_SUB))]
"TARGET_SVE"
"@
"&& reload_completed
&& register_operand (operands[2], <MODE>mode)
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
- [(set (match_dup 0) (minus:SVE_F (match_dup 2) (match_dup 3)))]
+ [(set (match_dup 0) (minus:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
[(set_attr "movprfx" "*,*,*,*,yes,yes")]
)
;; Predicated floating-point subtraction from a constant, merging with the
;; second input.
(define_insn_and_rewrite "*cond_sub<mode>_3_const"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
+ (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
UNSPEC_COND_FSUB)
(match_dup 3)]
UNSPEC_SEL))]
;; Predicated floating-point subtraction from a constant, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_sub<mode>_any_const"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "aarch64_sve_float_arith_immediate")
- (match_operand:SVE_F 3 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "aarch64_sve_float_arith_immediate")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")]
UNSPEC_COND_FSUB)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[3], operands[4])
;; Predicated floating-point absolute difference.
(define_expand "@aarch64_pred_abd<mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(match_dup 4)
- (match_operand:SVE_F 2 "register_operand")
- (match_operand:SVE_F 3 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS))]
"TARGET_SVE"
;; Predicated floating-point absolute difference.
(define_insn_and_rewrite "*aarch64_pred_abd<mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "%0, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "%0, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS))]
"TARGET_SVE && aarch64_sve_pred_dominates_p (&operands[5], operands[1])"
)
(define_expand "@aarch64_cond_abd<mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "register_operand")
- (match_operand:SVE_F 3 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; Predicated floating-point absolute difference, merging with the first
;; input.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_2"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 2)]
;; Predicated floating-point absolute difference, merging with the second
;; input.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_3"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w")
- (match_operand:SVE_F 3 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "0, w")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
(match_dup 3)]
;; Predicated floating-point absolute difference, merging with an
;; independent value.
(define_insn_and_rewrite "*aarch64_cond_abd<mode>_any"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 7)
(match_operand:SI 8 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w, w, w, w")
- (match_operand:SVE_F 3 "register_operand" "w, 0, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w, w, w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, 0, w, w, w")]
UNSPEC_COND_FSUB)]
UNSPEC_COND_FABS)
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[4])
;; Predicated floating-point multiplication.
(define_insn_and_split "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, w, ?&w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, w, ?&w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness" "i, Z, Ui1, i, Ui1")
- (match_operand:SVE_F 2 "register_operand" "%0, w, 0, w, w")
- (match_operand:SVE_F 3 "aarch64_sve_float_mul_operand" "vsM, w, w, vsM, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "%0, w, 0, w, w")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_mul_operand" "vsM, w, w, vsM, w")]
SVE_COND_FP_MUL))]
"TARGET_SVE"
"@
"&& reload_completed
&& register_operand (operands[3], <MODE>mode)
&& INTVAL (operands[4]) == SVE_RELAXED_GP"
- [(set (match_dup 0) (mult:SVE_F (match_dup 2) (match_dup 3)))]
+ [(set (match_dup 0) (mult:SVE_FULL_F (match_dup 2) (match_dup 3)))]
""
[(set_attr "movprfx" "*,*,*,yes,yes")]
)
;; Unpredicated multiplication by selected lanes.
(define_insn "@aarch64_mul_lane_<mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (mult:SVE_F
- (unspec:SVE_F
- [(match_operand:SVE_F 2 "register_operand" "<sve_lane_con>")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (mult:SVE_FULL_F
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 2 "register_operand" "<sve_lane_con>")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_F 1 "register_operand" "w")))]
+ (match_operand:SVE_FULL_F 1 "register_operand" "w")))]
"TARGET_SVE"
"fmul\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>[%3]"
)
;; by providing this, but we need to use UNSPECs since rtx logical ops
;; aren't defined for floating-point modes.
(define_insn "*<optab><mode>3"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (unspec:SVE_F [(match_operand:SVE_F 1 "register_operand" "w")
- (match_operand:SVE_F 2 "register_operand" "w")]
- LOGICALF))]
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 1 "register_operand" "w")
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
+ LOGICALF))]
"TARGET_SVE"
"<logicalf_op>\t%0.d, %1.d, %2.d"
)
;; -------------------------------------------------------------------------
(define_expand "copysign<mode>3"
- [(match_operand:SVE_F 0 "register_operand")
- (match_operand:SVE_F 1 "register_operand")
- (match_operand:SVE_F 2 "register_operand")]
+ [(match_operand:SVE_FULL_F 0 "register_operand")
+ (match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")]
"TARGET_SVE"
{
rtx sign = gen_reg_rtx (<V_INT_EQUIV>mode);
)
(define_expand "xorsign<mode>3"
- [(match_operand:SVE_F 0 "register_operand")
- (match_operand:SVE_F 1 "register_operand")
- (match_operand:SVE_F 2 "register_operand")]
+ [(match_operand:SVE_FULL_F 0 "register_operand")
+ (match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")]
"TARGET_SVE"
{
rtx sign = gen_reg_rtx (<V_INT_EQUIV>mode);
;; Unpredicated fmax/fmin (the libm functions). The optabs for the
;; smin/smax rtx codes are handled in the generic section above.
(define_expand "<maxmin_uns><mode>3"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_dup 3)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_F 1 "register_operand")
- (match_operand:SVE_F 2 "aarch64_sve_float_maxmin_operand")]
+ (match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "aarch64_sve_float_maxmin_operand")]
SVE_COND_FP_MAXMIN_PUBLIC))]
"TARGET_SVE"
{
;; Predicated floating-point maximum/minimum.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "%0, 0, w, w")
- (match_operand:SVE_F 3 "aarch64_sve_float_maxmin_operand" "vsB, w, vsB, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "%0, 0, w, w")
+ (match_operand:SVE_FULL_F 3 "aarch64_sve_float_maxmin_operand" "vsB, w, vsB, w")]
SVE_COND_FP_MAXMIN))]
"TARGET_SVE"
"@
;; Unpredicated integer addition of product.
(define_expand "fma<mode>4"
- [(set (match_operand:SVE_I 0 "register_operand")
- (plus:SVE_I
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (plus:SVE_FULL_I
+ (unspec:SVE_FULL_I
[(match_dup 4)
- (mult:SVE_I (match_operand:SVE_I 1 "register_operand")
- (match_operand:SVE_I 2 "nonmemory_operand"))]
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "nonmemory_operand"))]
UNSPEC_PRED_X)
- (match_operand:SVE_I 3 "register_operand")))]
+ (match_operand:SVE_FULL_I 3 "register_operand")))]
"TARGET_SVE"
{
if (aarch64_prepare_sve_int_fma (operands, PLUS))
;; Predicated integer addition of product.
(define_insn "@aarch64_pred_fma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
- (plus:SVE_I
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w")
+ (plus:SVE_FULL_I
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "%0, w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w, w"))]
UNSPEC_PRED_X)
- (match_operand:SVE_I 4 "register_operand" "w, 0, w")))]
+ (match_operand:SVE_FULL_I 4 "register_operand" "w, 0, w")))]
"TARGET_SVE"
"@
mad\t%0.<Vetype>, %1/m, %3.<Vetype>, %4.<Vetype>
;; Predicated integer addition of product with merging.
(define_expand "cond_fma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (plus:SVE_I
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "general_operand"))
- (match_operand:SVE_I 4 "register_operand"))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
+ (plus:SVE_FULL_I
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "general_operand"))
+ (match_operand:SVE_FULL_I 4 "register_operand"))
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; Predicated integer addition of product, merging with the first input.
(define_insn "*cond_fma<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (plus:SVE_I
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 4 "register_operand" "w, w"))
+ (plus:SVE_FULL_I
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 4 "register_operand" "w, w"))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer addition of product, merging with the third input.
(define_insn "*cond_fma<mode>_4"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (plus:SVE_I
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w"))
- (match_operand:SVE_I 4 "register_operand" "0, w"))
+ (plus:SVE_FULL_I
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w"))
+ (match_operand:SVE_FULL_I 4 "register_operand" "0, w"))
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer addition of product, merging with an independent value.
(define_insn_and_rewrite "*cond_fma<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (plus:SVE_I
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w"))
- (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w"))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (plus:SVE_FULL_I
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w, 0, w, w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w, w, 0, w, w"))
+ (match_operand:SVE_FULL_I 4 "register_operand" "w, 0, w, w, w, w"))
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
;; Unpredicated integer subtraction of product.
(define_expand "fnma<mode>4"
- [(set (match_operand:SVE_I 0 "register_operand")
- (minus:SVE_I
- (match_operand:SVE_I 3 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (minus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 3 "register_operand")
+ (unspec:SVE_FULL_I
[(match_dup 4)
- (mult:SVE_I (match_operand:SVE_I 1 "register_operand")
- (match_operand:SVE_I 2 "general_operand"))]
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 1 "register_operand")
+ (match_operand:SVE_FULL_I 2 "general_operand"))]
UNSPEC_PRED_X)))]
"TARGET_SVE"
{
;; Predicated integer subtraction of product.
(define_insn "@aarch64_pred_fnma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
- (minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, 0, w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w")
+ (minus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 4 "register_operand" "w, 0, w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "%0, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w"))]
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "%0, w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w, w"))]
UNSPEC_PRED_X)))]
"TARGET_SVE"
"@
;; Predicated integer subtraction of product with merging.
(define_expand "cond_fnma<mode>"
- [(set (match_operand:SVE_I 0 "register_operand")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand")
- (minus:SVE_I
- (match_operand:SVE_I 4 "register_operand")
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "general_operand")))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero")]
+ (minus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 4 "register_operand")
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "general_operand")))
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; Predicated integer subtraction of product, merging with the first input.
(define_insn "*cond_fnma<mode>_2"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, w")
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "0, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")))
+ (minus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 4 "register_operand" "w, w")
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w")))
(match_dup 2)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer subtraction of product, merging with the third input.
(define_insn "*cond_fnma<mode>_4"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "0, w")
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w")))
+ (minus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 4 "register_operand" "0, w")
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w")))
(match_dup 4)]
UNSPEC_SEL))]
"TARGET_SVE"
;; Predicated integer subtraction of product, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_fnma<mode>_any"
- [(set (match_operand:SVE_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
- (unspec:SVE_I
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_I
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (minus:SVE_I
- (match_operand:SVE_I 4 "register_operand" "w, 0, w, w, w, w")
- (mult:SVE_I (match_operand:SVE_I 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_I 3 "register_operand" "w, w, w, 0, w, w")))
- (match_operand:SVE_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (minus:SVE_FULL_I
+ (match_operand:SVE_FULL_I 4 "register_operand" "w, 0, w, w, w, w")
+ (mult:SVE_FULL_I
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w, 0, w, w, w")
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w, w, 0, w, w")))
+ (match_operand:SVE_FULL_I 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
;; Four-element integer dot-product with accumulation.
(define_insn "<sur>dot_prod<vsi2qi>"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w")
- (plus:SVE_SDI
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ (plus:SVE_FULL_SDI
+ (unspec:SVE_FULL_SDI
[(match_operand:<VSI2QI> 1 "register_operand" "w, w")
(match_operand:<VSI2QI> 2 "register_operand" "w, w")]
DOTPROD)
- (match_operand:SVE_SDI 3 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 3 "register_operand" "0, w")))]
"TARGET_SVE"
"@
<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>
;; Four-element integer dot-product by selected lanes with accumulation.
(define_insn "@aarch64_<sur>dot_prod_lane<vsi2qi>"
- [(set (match_operand:SVE_SDI 0 "register_operand" "=w, ?&w")
- (plus:SVE_SDI
- (unspec:SVE_SDI
+ [(set (match_operand:SVE_FULL_SDI 0 "register_operand" "=w, ?&w")
+ (plus:SVE_FULL_SDI
+ (unspec:SVE_FULL_SDI
[(match_operand:<VSI2QI> 1 "register_operand" "w, w")
(unspec:<VSI2QI>
[(match_operand:<VSI2QI> 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)]
DOTPROD)
- (match_operand:SVE_SDI 4 "register_operand" "0, w")))]
+ (match_operand:SVE_FULL_SDI 4 "register_operand" "0, w")))]
"TARGET_SVE"
"@
<sur>dot\\t%0.<Vetype>, %1.<Vetype_fourth>, %2.<Vetype_fourth>[%3]
;; MOVPRFX op0, op3 // If necessary
;; UDOT op0.s, diff.b, ones.b
(define_expand "<sur>sad<vsi2qi>"
- [(use (match_operand:SVE_SDI 0 "register_operand"))
+ [(use (match_operand:SVE_FULL_SDI 0 "register_operand"))
(unspec:<VSI2QI> [(use (match_operand:<VSI2QI> 1 "register_operand"))
(use (match_operand:<VSI2QI> 2 "register_operand"))] ABAL)
- (use (match_operand:SVE_SDI 3 "register_operand"))]
+ (use (match_operand:SVE_FULL_SDI 3 "register_operand"))]
"TARGET_SVE"
{
rtx ones = force_reg (<VSI2QI>mode, CONST1_RTX (<VSI2QI>mode));
;; Unpredicated floating-point ternary operations.
(define_expand "<optab><mode>4"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_dup 4)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_F 1 "register_operand")
- (match_operand:SVE_F 2 "register_operand")
- (match_operand:SVE_F 3 "register_operand")]
+ (match_operand:SVE_FULL_F 1 "register_operand")
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")]
SVE_COND_FP_TERNARY))]
"TARGET_SVE"
{
;; Predicated floating-point ternary operations.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "%w, 0, w")
- (match_operand:SVE_F 3 "register_operand" "w, w, w")
- (match_operand:SVE_F 4 "register_operand" "0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "%w, 0, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "0, w, w")]
SVE_COND_FP_TERNARY))]
"TARGET_SVE"
"@
;; Predicated floating-point ternary operations with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "register_operand")
- (match_operand:SVE_F 3 "register_operand")
- (match_operand:SVE_F 4 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FP_TERNARY)
- (match_operand:SVE_F 5 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; Predicated floating-point ternary operations, merging with the
;; first input.
(define_insn_and_rewrite "*cond_<optab><mode>_2"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "0, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")
- (match_operand:SVE_F 4 "register_operand" "w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "w, w")]
SVE_COND_FP_TERNARY)
(match_dup 2)]
UNSPEC_SEL))]
;; Predicated floating-point ternary operations, merging with the
;; third input.
(define_insn_and_rewrite "*cond_<optab><mode>_4"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")
- (match_operand:SVE_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
SVE_COND_FP_TERNARY)
(match_dup 4)]
UNSPEC_SEL))]
;; Predicated floating-point ternary operations, merging with an
;; independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, &w, &w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w, 0, w, w, w")
- (match_operand:SVE_F 3 "register_operand" "w, w, w, 0, w, w")
- (match_operand:SVE_F 4 "register_operand" "w, 0, w, w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w, 0, w, w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, 0, w, w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w, w, w")]
SVE_COND_FP_TERNARY)
- (match_operand:SVE_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[2], operands[5])
;; Unpredicated FMLA and FMLS by selected lanes. It doesn't seem worth using
;; (fma ...) since target-independent code won't understand the indexing.
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
- [(match_operand:SVE_F 1 "register_operand" "w, w")
- (unspec:SVE_F
- [(match_operand:SVE_F 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 1 "register_operand" "w, w")
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 2 "register_operand" "<sve_lane_con>, <sve_lane_con>")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
SVE_FP_TERNARY_LANE))]
"TARGET_SVE"
"@
;; Predicated FCMLA.
(define_insn "@aarch64_pred_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")
- (match_operand:SVE_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
SVE_COND_FCMLA))]
"TARGET_SVE"
"@
;; Predicated FCMLA with merging.
(define_expand "@cond_<optab><mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "register_operand")
- (match_operand:SVE_F 3 "register_operand")
- (match_operand:SVE_F 4 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "register_operand")
+ (match_operand:SVE_FULL_F 4 "register_operand")]
SVE_COND_FCMLA)
- (match_operand:SVE_F 5 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
)
;; Predicated FCMLA, merging with the third input.
(define_insn_and_rewrite "*cond_<optab><mode>_4"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w")
- (match_operand:SVE_F 3 "register_operand" "w, w")
- (match_operand:SVE_F 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "0, w")]
SVE_COND_FCMLA)
(match_dup 4)]
UNSPEC_SEL))]
;; Predicated FCMLA, merging with an independent value.
(define_insn_and_rewrite "*cond_<optab><mode>_any"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, &w, ?&w")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, &w, ?&w")
+ (unspec:SVE_FULL_F
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w, w, w")
- (match_operand:SVE_F 3 "register_operand" "w, w, w, w")
- (match_operand:SVE_F 4 "register_operand" "w, 0, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w, w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w, w, w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "w, 0, w, w")]
SVE_COND_FCMLA)
- (match_operand:SVE_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
+ (match_operand:SVE_FULL_F 5 "aarch64_simd_reg_or_zero" "Dz, Dz, 0, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& !rtx_equal_p (operands[4], operands[5])
;; Unpredicated FCMLA with indexing.
(define_insn "@aarch64_<optab>_lane_<mode>"
- [(set (match_operand:SVE_HSF 0 "register_operand" "=w, ?&w")
- (unspec:SVE_HSF
- [(match_operand:SVE_HSF 1 "register_operand" "w, w")
- (unspec:SVE_HSF
- [(match_operand:SVE_HSF 2 "register_operand" "<sve_lane_pair_con>, <sve_lane_pair_con>")
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_HSF
+ [(match_operand:SVE_FULL_HSF 1 "register_operand" "w, w")
+ (unspec:SVE_FULL_HSF
+ [(match_operand:SVE_FULL_HSF 2 "register_operand" "<sve_lane_pair_con>, <sve_lane_pair_con>")
(match_operand:SI 3 "const_int_operand")]
UNSPEC_SVE_LANE_SELECT)
- (match_operand:SVE_HSF 4 "register_operand" "0, w")]
+ (match_operand:SVE_FULL_HSF 4 "register_operand" "0, w")]
FCMLA))]
"TARGET_SVE"
"@
;; -------------------------------------------------------------------------
(define_insn "@aarch64_sve_tmad<mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
- (unspec:SVE_F [(match_operand:SVE_F 1 "register_operand" "0, w")
- (match_operand:SVE_F 2 "register_operand" "w, w")
- (match_operand:DI 3 "const_int_operand")]
- UNSPEC_FTMAD))]
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL_F
+ [(match_operand:SVE_FULL_F 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ (match_operand:DI 3 "const_int_operand")]
+ UNSPEC_FTMAD))]
"TARGET_SVE"
"@
ftmad\t%0.<Vetype>, %0.<Vetype>, %2.<Vetype>, #%3
;; UNSPEC_SEL operand order: mask, true, false (as for VEC_COND_EXPR)
;; SEL operand order: mask, true, false
(define_expand "@vcond_mask_<mode><vpred>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 3 "register_operand")
- (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm")
- (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL 1 "aarch64_sve_reg_or_dup_imm")
+ (match_operand:SVE_FULL 2 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
"TARGET_SVE"
{
;; - a duplicated immediate and a register
;; - a duplicated immediate and zero
(define_insn "*vcond_mask_<mode><vpred>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, w, w, w, ?w, ?&w, ?&w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w, w, w, w, ?w, ?&w, ?&w")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 3 "register_operand" "Upa, Upa, Upa, Upa, Upl, Upl, Upl")
- (match_operand:SVE_ALL 1 "aarch64_sve_reg_or_dup_imm" "w, vss, vss, Ufc, Ufc, vss, Ufc")
- (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "w, 0, Dz, 0, Dz, w, w")]
+ (match_operand:SVE_FULL 1 "aarch64_sve_reg_or_dup_imm" "w, vss, vss, Ufc, Ufc, vss, Ufc")
+ (match_operand:SVE_FULL 2 "aarch64_simd_reg_or_zero" "w, 0, Dz, 0, Dz, w, w")]
UNSPEC_SEL))]
"TARGET_SVE
&& (!register_operand (operands[1], <MODE>mode)
;; of GPRs as being more expensive than duplicates of FPRs, since they
;; involve a cross-file move.
(define_insn "@aarch64_sel_dup<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=?w, w, ??w, ?&w, ??&w, ?&w")
- (unspec:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=?w, w, ??w, ?&w, ??&w, ?&w")
+ (unspec:SVE_FULL
[(match_operand:<VPRED> 3 "register_operand" "Upa, Upa, Upl, Upl, Upl, Upl")
- (vec_duplicate:SVE_ALL
+ (vec_duplicate:SVE_FULL
(match_operand:<VEL> 1 "register_operand" "r, w, r, w, r, w"))
- (match_operand:SVE_ALL 2 "aarch64_simd_reg_or_zero" "0, 0, Dz, Dz, w, w")]
+ (match_operand:SVE_FULL 2 "aarch64_simd_reg_or_zero" "0, 0, Dz, Dz, w, w")]
UNSPEC_SEL))]
"TARGET_SVE"
"@
;; Integer (signed) vcond. Don't enforce an immediate range here, since it
;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead.
(define_expand "vcond<mode><v_int_equiv>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (if_then_else:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand")
+ (if_then_else:SVE_FULL
(match_operator 3 "comparison_operator"
[(match_operand:<V_INT_EQUIV> 4 "register_operand")
(match_operand:<V_INT_EQUIV> 5 "nonmemory_operand")])
- (match_operand:SVE_ALL 1 "nonmemory_operand")
- (match_operand:SVE_ALL 2 "nonmemory_operand")))]
+ (match_operand:SVE_FULL 1 "nonmemory_operand")
+ (match_operand:SVE_FULL 2 "nonmemory_operand")))]
"TARGET_SVE"
{
aarch64_expand_sve_vcond (<MODE>mode, <V_INT_EQUIV>mode, operands);
;; Integer vcondu. Don't enforce an immediate range here, since it
;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead.
(define_expand "vcondu<mode><v_int_equiv>"
- [(set (match_operand:SVE_ALL 0 "register_operand")
- (if_then_else:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand")
+ (if_then_else:SVE_FULL
(match_operator 3 "comparison_operator"
[(match_operand:<V_INT_EQUIV> 4 "register_operand")
(match_operand:<V_INT_EQUIV> 5 "nonmemory_operand")])
- (match_operand:SVE_ALL 1 "nonmemory_operand")
- (match_operand:SVE_ALL 2 "nonmemory_operand")))]
+ (match_operand:SVE_FULL 1 "nonmemory_operand")
+ (match_operand:SVE_FULL 2 "nonmemory_operand")))]
"TARGET_SVE"
{
aarch64_expand_sve_vcond (<MODE>mode, <V_INT_EQUIV>mode, operands);
;; Floating-point vcond. All comparisons except FCMUO allow a zero operand;
;; aarch64_expand_sve_vcond handles the case of an FCMUO with zero.
(define_expand "vcond<mode><v_fp_equiv>"
- [(set (match_operand:SVE_HSD 0 "register_operand")
- (if_then_else:SVE_HSD
+ [(set (match_operand:SVE_FULL_HSD 0 "register_operand")
+ (if_then_else:SVE_FULL_HSD
(match_operator 3 "comparison_operator"
[(match_operand:<V_FP_EQUIV> 4 "register_operand")
(match_operand:<V_FP_EQUIV> 5 "aarch64_simd_reg_or_zero")])
- (match_operand:SVE_HSD 1 "nonmemory_operand")
- (match_operand:SVE_HSD 2 "nonmemory_operand")))]
+ (match_operand:SVE_FULL_HSD 1 "nonmemory_operand")
+ (match_operand:SVE_FULL_HSD 2 "nonmemory_operand")))]
"TARGET_SVE"
{
aarch64_expand_sve_vcond (<MODE>mode, <V_FP_EQUIV>mode, operands);
[(parallel
[(set (match_operand:<VPRED> 0 "register_operand")
(match_operator:<VPRED> 1 "comparison_operator"
- [(match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "nonmemory_operand")]))
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "nonmemory_operand")]))
(clobber (reg:CC_NZC CC_REGNUM))])]
"TARGET_SVE"
{
[(parallel
[(set (match_operand:<VPRED> 0 "register_operand")
(match_operator:<VPRED> 1 "comparison_operator"
- [(match_operand:SVE_I 2 "register_operand")
- (match_operand:SVE_I 3 "nonmemory_operand")]))
+ [(match_operand:SVE_FULL_I 2 "register_operand")
+ (match_operand:SVE_FULL_I 3 "nonmemory_operand")]))
(clobber (reg:CC_NZC CC_REGNUM))])]
"TARGET_SVE"
{
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 3 "register_operand" "w, w")
- (match_operand:SVE_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_FULL_I 3 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 4 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
UNSPEC_PRED_Z))
(clobber (reg:CC_NZC CC_REGNUM))]
"TARGET_SVE"
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
UNSPEC_PRED_Z)]
UNSPEC_PTEST))
(set (match_operand:<VPRED> 0 "register_operand" "=Upa, Upa")
[(match_operand 6)
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
UNSPEC_PRED_Z)]
UNSPEC_PTEST))
(clobber (match_scratch:<VPRED> 0 "=Upa, Upa"))]
[(match_operand 4)
(const_int SVE_KNOWN_PTRUE)
(SVE_INT_CMP:<VPRED>
- (match_operand:SVE_I 2 "register_operand" "w, w")
- (match_operand:SVE_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
+ (match_operand:SVE_FULL_I 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_I 3 "aarch64_sve_cmp_<sve_imm_con>_operand" "<sve_imm_con>, w"))]
UNSPEC_PRED_Z)
(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")))
(clobber (reg:CC_NZC CC_REGNUM))]
[(match_operand:VNx16BI 1 "register_operand" "Upl")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
(unspec:<VPRED>
- [(match_operand:SVE_BHSI 3 "register_operand" "w")
+ [(match_operand:SVE_FULL_BHSI 3 "register_operand" "w")
(match_operand:VNx2DI 4 "register_operand" "w")]
SVE_COND_INT_CMP_WIDE)]
UNSPEC_PRED_Z))
[(match_operand:VNx16BI 6 "register_operand" "Upl")
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(unspec:<VPRED>
- [(match_operand:SVE_BHSI 2 "register_operand" "w")
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand" "w")
(match_operand:VNx2DI 3 "register_operand" "w")]
SVE_COND_INT_CMP_WIDE)]
UNSPEC_PRED_Z)]
[(match_operand:VNx16BI 6 "register_operand" "Upl")
(match_operand:SI 7 "aarch64_sve_ptrue_flag")
(unspec:<VPRED>
- [(match_operand:SVE_BHSI 2 "register_operand" "w")
+ [(match_operand:SVE_FULL_BHSI 2 "register_operand" "w")
(match_operand:VNx2DI 3 "register_operand" "w")]
SVE_COND_INT_CMP_WIDE)]
UNSPEC_PRED_Z)]
(define_expand "vec_cmp<mode><vpred>"
[(set (match_operand:<VPRED> 0 "register_operand")
(match_operator:<VPRED> 1 "comparison_operator"
- [(match_operand:SVE_F 2 "register_operand")
- (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero")]))]
+ [(match_operand:SVE_FULL_F 2 "register_operand")
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]))]
"TARGET_SVE"
{
aarch64_expand_sve_vec_cmp_float (operands[0], GET_CODE (operands[1]),
(unspec:<VPRED>
[(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
- (match_operand:SVE_F 3 "register_operand" "w, w")
- (match_operand:SVE_F 4 "aarch64_simd_reg_or_zero" "Dz, w")]
+ (match_operand:SVE_FULL_F 3 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero" "Dz, w")]
SVE_COND_FP_CMP_I0))]
"TARGET_SVE"
"@
(unspec:<VPRED>
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
- (match_operand:SVE_F 3 "register_operand" "w")
- (match_operand:SVE_F 4 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 3 "register_operand" "w")
+ (match_operand:SVE_FULL_F 4 "register_operand" "w")]
UNSPEC_COND_FCMUO))]
"TARGET_SVE"
"fcmuo\t%0.<Vetype>, %1/z, %3.<Vetype>, %4.<Vetype>"
(unspec:<VPRED>
[(match_operand:<VPRED> 1)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:SVE_F 2 "register_operand" "w, w")
- (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "Dz, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w")
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "Dz, w")]
SVE_COND_FP_CMP_I0)
(match_operand:<VPRED> 4 "register_operand" "Upl, Upl")))]
"TARGET_SVE"
(unspec:<VPRED>
[(match_operand:<VPRED> 1)
(const_int SVE_KNOWN_PTRUE)
- (match_operand:SVE_F 2 "register_operand" "w")
- (match_operand:SVE_F 3 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")
+ (match_operand:SVE_FULL_F 3 "register_operand" "w")]
UNSPEC_COND_FCMUO)
(match_operand:<VPRED> 4 "register_operand" "Upl")))]
"TARGET_SVE"
(unspec:<VPRED>
[(match_operand:<VPRED> 1 "register_operand")
(match_operand:SI 2 "aarch64_sve_ptrue_flag")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(match_dup 2)
- (match_operand:SVE_F 3 "register_operand")]
+ (match_operand:SVE_FULL_F 3 "register_operand")]
UNSPEC_COND_FABS)
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_dup 1)
(match_dup 2)
- (match_operand:SVE_F 4 "register_operand")]
+ (match_operand:SVE_FULL_F 4 "register_operand")]
UNSPEC_COND_FABS)]
SVE_COND_FP_ABS_CMP))]
"TARGET_SVE"
(unspec:<VPRED>
[(match_operand:<VPRED> 1 "register_operand" "Upl")
(match_operand:SI 4 "aarch64_sve_ptrue_flag")
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 5)
(match_operand:SI 6 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
UNSPEC_COND_FABS)
- (unspec:SVE_F
+ (unspec:SVE_FULL_F
[(match_operand 7)
(match_operand:SI 8 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 3 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 3 "register_operand" "w")]
UNSPEC_COND_FABS)]
SVE_COND_FP_ABS_CMP))]
"TARGET_SVE
(unspec:<VEL>
[(match_operand:<VEL> 1 "register_operand" "0, 0")
(match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
- (match_operand:SVE_ALL 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL 3 "register_operand" "w, w")]
CLAST))]
"TARGET_SVE"
"@
)
(define_insn "@aarch64_fold_extract_vector_<last_op>_<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, ?&w")
- (unspec:SVE_ALL
- [(match_operand:SVE_ALL 1 "register_operand" "0, w")
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "0, w")
(match_operand:<VPRED> 2 "register_operand" "Upl, Upl")
- (match_operand:SVE_ALL 3 "register_operand" "w, w")]
+ (match_operand:SVE_FULL 3 "register_operand" "w, w")]
CLAST))]
"TARGET_SVE"
"@
;; Unpredicated integer add reduction.
(define_expand "reduc_plus_scal_<mode>"
[(match_operand:<VEL> 0 "register_operand")
- (match_operand:SVE_I 1 "register_operand")]
+ (match_operand:SVE_FULL_I 1 "register_operand")]
"TARGET_SVE"
{
rtx pred = aarch64_ptrue_reg (<VPRED>mode);
(define_insn "@aarch64_pred_reduc_<optab>_<mode>"
[(set (match_operand:DI 0 "register_operand" "=w")
(unspec:DI [(match_operand:<VPRED> 1 "register_operand" "Upl")
- (match_operand:SVE_I 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_I 2 "register_operand" "w")]
SVE_INT_ADDV))]
"TARGET_SVE && <max_elem_bits> >= <elem_bits>"
"<su>addv\t%d0, %1, %2.<Vetype>"
(define_expand "reduc_<optab>_scal_<mode>"
[(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL> [(match_dup 2)
- (match_operand:SVE_I 1 "register_operand")]
+ (match_operand:SVE_FULL_I 1 "register_operand")]
SVE_INT_REDUCTION))]
"TARGET_SVE"
{
(define_insn "@aarch64_pred_reduc_<optab>_<mode>"
[(set (match_operand:<VEL> 0 "register_operand" "=w")
(unspec:<VEL> [(match_operand:<VPRED> 1 "register_operand" "Upl")
- (match_operand:SVE_I 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_I 2 "register_operand" "w")]
SVE_INT_REDUCTION))]
"TARGET_SVE"
"<sve_int_op>\t%<Vetype>0, %1, %2.<Vetype>"
(define_expand "reduc_<optab>_scal_<mode>"
[(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL> [(match_dup 2)
- (match_operand:SVE_F 1 "register_operand")]
+ (match_operand:SVE_FULL_F 1 "register_operand")]
SVE_FP_REDUCTION))]
"TARGET_SVE"
{
(define_insn "@aarch64_pred_reduc_<optab>_<mode>"
[(set (match_operand:<VEL> 0 "register_operand" "=w")
(unspec:<VEL> [(match_operand:<VPRED> 1 "register_operand" "Upl")
- (match_operand:SVE_F 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
SVE_FP_REDUCTION))]
"TARGET_SVE"
"<sve_fp_op>\t%<Vetype>0, %1, %2.<Vetype>"
[(set (match_operand:<VEL> 0 "register_operand")
(unspec:<VEL> [(match_dup 3)
(match_operand:<VEL> 1 "register_operand")
- (match_operand:SVE_F 2 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
UNSPEC_FADDA))]
"TARGET_SVE"
{
[(set (match_operand:<VEL> 0 "register_operand" "=w")
(unspec:<VEL> [(match_operand:<VPRED> 3 "register_operand" "Upl")
(match_operand:<VEL> 1 "register_operand" "0")
- (match_operand:SVE_F 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
UNSPEC_FADDA))]
"TARGET_SVE"
"fadda\t%<Vetype>0, %3, %<Vetype>0, %2.<Vetype>"
;; -------------------------------------------------------------------------
(define_expand "vec_perm<mode>"
- [(match_operand:SVE_ALL 0 "register_operand")
- (match_operand:SVE_ALL 1 "register_operand")
- (match_operand:SVE_ALL 2 "register_operand")
+ [(match_operand:SVE_FULL 0 "register_operand")
+ (match_operand:SVE_FULL 1 "register_operand")
+ (match_operand:SVE_FULL 2 "register_operand")
(match_operand:<V_INT_EQUIV> 3 "aarch64_sve_vec_perm_operand")]
"TARGET_SVE && GET_MODE_NUNITS (<MODE>mode).is_constant ()"
{
)
(define_insn "@aarch64_sve_tbl<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL
- [(match_operand:SVE_ALL 1 "register_operand" "w")
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "w")
(match_operand:<V_INT_EQUIV> 2 "register_operand" "w")]
UNSPEC_TBL))]
"TARGET_SVE"
;; Compact active elements and pad with zeros.
(define_insn "@aarch64_sve_compact<mode>"
- [(set (match_operand:SVE_SD 0 "register_operand" "=w")
- (unspec:SVE_SD [(match_operand:<VPRED> 1 "register_operand" "Upl")
- (match_operand:SVE_SD 2 "register_operand" "w")]
- UNSPEC_SVE_COMPACT))]
+ [(set (match_operand:SVE_FULL_SD 0 "register_operand" "=w")
+ (unspec:SVE_FULL_SD
+ [(match_operand:<VPRED> 1 "register_operand" "Upl")
+ (match_operand:SVE_FULL_SD 2 "register_operand" "w")]
+ UNSPEC_SVE_COMPACT))]
"TARGET_SVE"
"compact\t%0.<Vetype>, %1, %2.<Vetype>"
)
;; Duplicate one element of a vector.
(define_insn "@aarch64_sve_dup_lane<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (vec_duplicate:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (vec_duplicate:SVE_FULL
(vec_select:<VEL>
- (match_operand:SVE_ALL 1 "register_operand" "w")
+ (match_operand:SVE_FULL 1 "register_operand" "w")
(parallel [(match_operand:SI 2 "const_int_operand")]))))]
"TARGET_SVE
&& IN_RANGE (INTVAL (operands[2]) * GET_MODE_SIZE (<VEL>mode), 0, 63)"
;; and architectural register lane numbering for op1 or op0, since the
;; two numbering schemes are the same for SVE.)
;;
-;; The vec_duplicate:SVE_ALL then copies memory lane number N of the
+;; The vec_duplicate:SVE_FULL then copies memory lane number N of the
;; V128 (and thus lane number op2 + N of op1) to lane numbers N + I * STEP
;; of op0. We therefore get the correct result for both endiannesses.
;;
;; for big-endian targets. In this fused pattern the two reverses cancel
;; each other out.
(define_insn "@aarch64_sve_dupq_lane<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (vec_duplicate:SVE_ALL
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (vec_duplicate:SVE_FULL
(vec_select:<V128>
- (match_operand:SVE_ALL 1 "register_operand" "w")
+ (match_operand:SVE_FULL 1 "register_operand" "w")
(match_operand 2 "ascending_int_parallel"))))]
"TARGET_SVE
&& (INTVAL (XVECEXP (operands[2], 0, 0))
;; Reverse the order of elements within a full vector.
(define_insn "@aarch64_sve_rev<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w")]
- UNSPEC_REV))]
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "w")]
+ UNSPEC_REV))]
"TARGET_SVE"
"rev\t%0.<Vetype>, %1.<Vetype>")
;; Like EXT, but start at the first active element.
(define_insn "@aarch64_sve_splice<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, ?&w")
- (unspec:SVE_ALL [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
- (match_operand:SVE_ALL 2 "register_operand" "0, w")
- (match_operand:SVE_ALL 3 "register_operand" "w, w")]
- UNSPEC_SVE_SPLICE))]
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
+ (match_operand:SVE_FULL 2 "register_operand" "0, w")
+ (match_operand:SVE_FULL 3 "register_operand" "w, w")]
+ UNSPEC_SVE_SPLICE))]
"TARGET_SVE"
"@
splice\t%0.<Vetype>, %1, %0.<Vetype>, %3.<Vetype>
;; Permutes that take half the elements from one vector and half the
;; elements from the other.
(define_insn "@aarch64_sve_<perm_insn><mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w")
- (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "w")
- (match_operand:SVE_ALL 2 "register_operand" "w")]
- PERMUTE))]
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "w")
+ (match_operand:SVE_FULL 2 "register_operand" "w")]
+ PERMUTE))]
"TARGET_SVE"
"<perm_insn>\t%0.<Vetype>, %1.<Vetype>, %2.<Vetype>"
)
;; Concatenate two vectors and extract a subvector. Note that the
;; immediate (third) operand is the lane index not the byte index.
(define_insn "@aarch64_sve_ext<mode>"
- [(set (match_operand:SVE_ALL 0 "register_operand" "=w, ?&w")
- (unspec:SVE_ALL [(match_operand:SVE_ALL 1 "register_operand" "0, w")
- (match_operand:SVE_ALL 2 "register_operand" "w, w")
- (match_operand:SI 3 "const_int_operand")]
- UNSPEC_EXT))]
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL 2 "register_operand" "w, w")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_EXT))]
"TARGET_SVE
&& IN_RANGE (INTVAL (operands[3]) * GET_MODE_SIZE (<VEL>mode), 0, 255)"
{
;; Integer pack. Use UZP1 on the narrower type, which discards
;; the high part of each wide element.
(define_insn "vec_pack_trunc_<Vwide>"
- [(set (match_operand:SVE_BHSI 0 "register_operand" "=w")
- (unspec:SVE_BHSI
+ [(set (match_operand:SVE_FULL_BHSI 0 "register_operand" "=w")
+ (unspec:SVE_FULL_BHSI
[(match_operand:<VWIDE> 1 "register_operand" "w")
(match_operand:<VWIDE> 2 "register_operand" "w")]
UNSPEC_PACK))]
;; Unpack the low or high half of a vector, where "high" refers to
;; the low-numbered lanes for big-endian and the high-numbered lanes
;; for little-endian.
-(define_expand "vec_unpack<su>_<perm_hilo>_<SVE_BHSI:mode>"
+(define_expand "vec_unpack<su>_<perm_hilo>_<SVE_FULL_BHSI:mode>"
[(match_operand:<VWIDE> 0 "register_operand")
- (unspec:<VWIDE> [(match_operand:SVE_BHSI 1 "register_operand")] UNPACK)]
+ (unspec:<VWIDE>
+ [(match_operand:SVE_FULL_BHSI 1 "register_operand")] UNPACK)]
"TARGET_SVE"
{
emit_insn ((<hi_lanes_optab>
- ? gen_aarch64_sve_<su>unpkhi_<SVE_BHSI:mode>
- : gen_aarch64_sve_<su>unpklo_<SVE_BHSI:mode>)
+ ? gen_aarch64_sve_<su>unpkhi_<SVE_FULL_BHSI:mode>
+ : gen_aarch64_sve_<su>unpklo_<SVE_FULL_BHSI:mode>)
(operands[0], operands[1]));
DONE;
}
)
-(define_insn "@aarch64_sve_<su>unpk<perm_hilo>_<SVE_BHSI:mode>"
+(define_insn "@aarch64_sve_<su>unpk<perm_hilo>_<SVE_FULL_BHSI:mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
- (unspec:<VWIDE> [(match_operand:SVE_BHSI 1 "register_operand" "w")]
- UNPACK))]
+ (unspec:<VWIDE>
+ [(match_operand:SVE_FULL_BHSI 1 "register_operand" "w")]
+ UNPACK))]
"TARGET_SVE"
"<su>unpk<perm_hilo>\t%0.<Vewtype>, %1.<Vetype>"
)
(unspec:<V_INT_EQUIV>
[(match_dup 2)
(const_int SVE_RELAXED_GP)
- (match_operand:SVE_F 1 "register_operand")]
+ (match_operand:SVE_FULL_F 1 "register_operand")]
SVE_COND_FCVTI))]
"TARGET_SVE"
{
)
;; Predicated float-to-integer conversion, either to the same width or wider.
-(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_F:mode><SVE_HSDI:mode>"
- [(set (match_operand:SVE_HSDI 0 "register_operand" "=w")
- (unspec:SVE_HSDI
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand" "Upl")
+(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>"
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w")
+ (unspec:SVE_FULL_HSDI
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
SVE_COND_FCVTI))]
- "TARGET_SVE && <SVE_HSDI:elem_bits> >= <SVE_F:elem_bits>"
- "fcvtz<su>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_F:Vetype>"
+ "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
+ "fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
)
;; Predicated narrowing float-to-integer conversion.
;; Predicated float-to-integer conversion with merging, either to the same
;; width or wider.
-(define_expand "@cond_<optab>_nontrunc<SVE_F:mode><SVE_HSDI:mode>"
- [(set (match_operand:SVE_HSDI 0 "register_operand")
- (unspec:SVE_HSDI
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand")
- (unspec:SVE_HSDI
+(define_expand "@cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>"
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand")
+ (unspec:SVE_FULL_HSDI
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
+ (unspec:SVE_FULL_HSDI
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_F 2 "register_operand")]
+ (match_operand:SVE_FULL_F 2 "register_operand")]
SVE_COND_FCVTI)
- (match_operand:SVE_HSDI 3 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
- "TARGET_SVE && <SVE_HSDI:elem_bits> >= <SVE_F:elem_bits>"
+ "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
)
;; The first alternative doesn't need the earlyclobber, but the only case
;; the same register (despite having different modes). Making all the
;; alternatives earlyclobber makes things more consistent for the
;; register allocator.
-(define_insn_and_rewrite "*cond_<optab>_nontrunc<SVE_F:mode><SVE_HSDI:mode>"
- [(set (match_operand:SVE_HSDI 0 "register_operand" "=&w, &w, ?&w")
- (unspec:SVE_HSDI
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_HSDI
+(define_insn_and_rewrite "*cond_<optab>_nontrunc<SVE_FULL_F:mode><SVE_FULL_HSDI:mode>"
+ [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=&w, &w, ?&w")
+ (unspec:SVE_FULL_HSDI
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ (unspec:SVE_FULL_HSDI
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_F 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_F 2 "register_operand" "w, w, w")]
SVE_COND_FCVTI)
- (match_operand:SVE_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE
- && <SVE_HSDI:elem_bits> >= <SVE_F:elem_bits>
+ && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>
&& aarch64_sve_pred_dominates_p (&operands[4], operands[1])"
"@
- fcvtz<su>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_F:Vetype>
- movprfx\t%0.<SVE_HSDI:Vetype>, %1/z, %2.<SVE_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_F:Vetype>
- movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_HSDI:Vetype>, %1/m, %2.<SVE_F:Vetype>"
+ fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>
+ movprfx\t%0, %3\;fcvtz<su>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_F:Vetype>"
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
;; Unpredicated conversion of integers to floats of the same size
;; (HI to HF, SI to SF or DI to DF).
(define_expand "<optab><v_int_equiv><mode>2"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
[(match_dup 2)
(const_int SVE_RELAXED_GP)
(match_operand:<V_INT_EQUIV> 1 "register_operand")]
;; Predicated integer-to-float conversion, either to the same width or
;; narrower.
-(define_insn "@aarch64_sve_<optab>_nonextend<SVE_HSDI:mode><SVE_F:mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=w")
- (unspec:SVE_F
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand" "Upl")
+(define_insn "@aarch64_sve_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>"
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=w")
+ (unspec:SVE_FULL_F
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_HSDI 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand" "w")]
SVE_COND_ICVTF))]
- "TARGET_SVE && <SVE_HSDI:elem_bits> >= <SVE_F:elem_bits>"
- "<su>cvtf\t%0.<SVE_F:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
+ "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
+ "<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
)
;; Predicated widening integer-to-float conversion.
;; Predicated integer-to-float conversion with merging, either to the same
;; width or narrower.
-(define_expand "@cond_<optab>_nonextend<SVE_HSDI:mode><SVE_F:mode>"
- [(set (match_operand:SVE_F 0 "register_operand")
- (unspec:SVE_F
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand")
- (unspec:SVE_F
+(define_expand "@cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>"
+ [(set (match_operand:SVE_FULL_F 0 "register_operand")
+ (unspec:SVE_FULL_F
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand")
+ (unspec:SVE_FULL_F
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_HSDI 2 "register_operand")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand")]
SVE_COND_ICVTF)
- (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
- "TARGET_SVE && <SVE_HSDI:elem_bits> >= <SVE_F:elem_bits>"
+ "TARGET_SVE && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>"
)
;; The first alternative doesn't need the earlyclobber, but the only case
;; the same register (despite having different modes). Making all the
;; alternatives earlyclobber makes things more consistent for the
;; register allocator.
-(define_insn_and_rewrite "*cond_<optab>_nonextend<SVE_HSDI:mode><SVE_F:mode>"
- [(set (match_operand:SVE_F 0 "register_operand" "=&w, &w, ?&w")
- (unspec:SVE_F
- [(match_operand:<SVE_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_F
+(define_insn_and_rewrite "*cond_<optab>_nonextend<SVE_FULL_HSDI:mode><SVE_FULL_F:mode>"
+ [(set (match_operand:SVE_FULL_F 0 "register_operand" "=&w, &w, ?&w")
+ (unspec:SVE_FULL_F
+ [(match_operand:<SVE_FULL_HSDI:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ (unspec:SVE_FULL_F
[(match_operand 4)
(match_operand:SI 5 "aarch64_sve_gp_strictness")
- (match_operand:SVE_HSDI 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")]
SVE_COND_ICVTF)
- (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
"TARGET_SVE
- && <SVE_HSDI:elem_bits> >= <SVE_F:elem_bits>
+ && <SVE_FULL_HSDI:elem_bits> >= <SVE_FULL_F:elem_bits>
&& aarch64_sve_pred_dominates_p (&operands[4], operands[1])"
"@
- <su>cvtf\t%0.<SVE_F:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
- movprfx\t%0.<SVE_HSDI:Vetype>, %1/z, %2.<SVE_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_F:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>
- movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_F:Vetype>, %1/m, %2.<SVE_HSDI:Vetype>"
+ <su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+ movprfx\t%0, %3\;<su>cvtf\t%0.<SVE_FULL_F:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
"&& !rtx_equal_p (operands[1], operands[4])"
{
operands[4] = copy_rtx (operands[1]);
;; the results into a single vector.
(define_expand "vec_pack_trunc_<Vwide>"
[(set (match_dup 4)
- (unspec:SVE_HSF
+ (unspec:SVE_FULL_HSF
[(match_dup 3)
(const_int SVE_RELAXED_GP)
(match_operand:<VWIDE> 1 "register_operand")]
UNSPEC_COND_FCVT))
(set (match_dup 5)
- (unspec:SVE_HSF
+ (unspec:SVE_FULL_HSF
[(match_dup 3)
(const_int SVE_RELAXED_GP)
(match_operand:<VWIDE> 2 "register_operand")]
UNSPEC_COND_FCVT))
- (set (match_operand:SVE_HSF 0 "register_operand")
- (unspec:SVE_HSF [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
+ (set (match_operand:SVE_FULL_HSF 0 "register_operand")
+ (unspec:SVE_FULL_HSF [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
"TARGET_SVE"
{
operands[3] = aarch64_ptrue_reg (<VWIDE_PRED>mode);
)
;; Predicated float-to-float truncation.
-(define_insn "@aarch64_sve_<optab>_trunc<SVE_SDF:mode><SVE_HSF:mode>"
- [(set (match_operand:SVE_HSF 0 "register_operand" "=w")
- (unspec:SVE_HSF
- [(match_operand:<SVE_SDF:VPRED> 1 "register_operand" "Upl")
+(define_insn "@aarch64_sve_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w")
+ (unspec:SVE_FULL_HSF
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_SDF 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_SDF 2 "register_operand" "w")]
SVE_COND_FCVT))]
- "TARGET_SVE && <SVE_SDF:elem_bits> > <SVE_HSF:elem_bits>"
- "fcvt\t%0.<SVE_HSF:Vetype>, %1/m, %2.<SVE_SDF:Vetype>"
+ "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
+ "fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
)
;; Predicated float-to-float truncation with merging.
-(define_expand "@cond_<optab>_trunc<SVE_SDF:mode><SVE_HSF:mode>"
- [(set (match_operand:SVE_HSF 0 "register_operand")
- (unspec:SVE_HSF
- [(match_operand:<SVE_SDF:VPRED> 1 "register_operand")
- (unspec:SVE_HSF
+(define_expand "@cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand")
+ (unspec:SVE_FULL_HSF
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
+ (unspec:SVE_FULL_HSF
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_SDF 2 "register_operand")]
+ (match_operand:SVE_FULL_SDF 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:SVE_HSF 3 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
- "TARGET_SVE && <SVE_SDF:elem_bits> > <SVE_HSF:elem_bits>"
+ "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
)
-(define_insn "*cond_<optab>_trunc<SVE_SDF:mode><SVE_HSF:mode>"
- [(set (match_operand:SVE_HSF 0 "register_operand" "=w, ?&w, ?&w")
- (unspec:SVE_HSF
- [(match_operand:<SVE_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_HSF
+(define_insn "*cond_<optab>_trunc<SVE_FULL_SDF:mode><SVE_FULL_HSF:mode>"
+ [(set (match_operand:SVE_FULL_HSF 0 "register_operand" "=w, ?&w, ?&w")
+ (unspec:SVE_FULL_HSF
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ (unspec:SVE_FULL_HSF
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_SDF 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_SDF 2 "register_operand" "w, w, w")]
SVE_COND_FCVT)
- (match_operand:SVE_HSF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_HSF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
- "TARGET_SVE && <SVE_SDF:elem_bits> > <SVE_HSF:elem_bits>"
+ "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
"@
- fcvt\t%0.<SVE_HSF:Vetype>, %1/m, %2.<SVE_SDF:Vetype>
- movprfx\t%0.<SVE_SDF:Vetype>, %1/z, %2.<SVE_SDF:Vetype>\;fcvt\t%0.<SVE_HSF:Vetype>, %1/m, %2.<SVE_SDF:Vetype>
- movprfx\t%0, %3\;fcvt\t%0.<SVE_HSF:Vetype>, %1/m, %2.<SVE_SDF:Vetype>"
+ fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>
+ movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_HSF:Vetype>, %1/m, %2.<SVE_FULL_SDF:Vetype>"
[(set_attr "movprfx" "*,yes,yes")]
)
;; unpacked source.
(define_expand "vec_unpacks_<perm_hilo>_<mode>"
[(match_operand:<VWIDE> 0 "register_operand")
- (unspec:SVE_HSF [(match_operand:SVE_HSF 1 "register_operand")]
- UNPACK_UNSIGNED)]
+ (unspec:SVE_FULL_HSF
+ [(match_operand:SVE_FULL_HSF 1 "register_operand")]
+ UNPACK_UNSIGNED)]
"TARGET_SVE"
{
/* Use ZIP to do the unpack, since we don't care about the upper halves
)
;; Predicated float-to-float extension.
-(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_HSF:mode><SVE_SDF:mode>"
- [(set (match_operand:SVE_SDF 0 "register_operand" "=w")
- (unspec:SVE_SDF
- [(match_operand:<SVE_SDF:VPRED> 1 "register_operand" "Upl")
+(define_insn "@aarch64_sve_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
+ [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w")
+ (unspec:SVE_FULL_SDF
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl")
(match_operand:SI 3 "aarch64_sve_gp_strictness")
- (match_operand:SVE_HSF 2 "register_operand" "w")]
+ (match_operand:SVE_FULL_HSF 2 "register_operand" "w")]
SVE_COND_FCVT))]
- "TARGET_SVE && <SVE_SDF:elem_bits> > <SVE_HSF:elem_bits>"
- "fcvt\t%0.<SVE_SDF:Vetype>, %1/m, %2.<SVE_HSF:Vetype>"
+ "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
+ "fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
)
;; Predicated float-to-float extension with merging.
-(define_expand "@cond_<optab>_nontrunc<SVE_HSF:mode><SVE_SDF:mode>"
- [(set (match_operand:SVE_SDF 0 "register_operand")
- (unspec:SVE_SDF
- [(match_operand:<SVE_SDF:VPRED> 1 "register_operand")
- (unspec:SVE_SDF
+(define_expand "@cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
+ [(set (match_operand:SVE_FULL_SDF 0 "register_operand")
+ (unspec:SVE_FULL_SDF
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand")
+ (unspec:SVE_FULL_SDF
[(match_dup 1)
(const_int SVE_STRICT_GP)
- (match_operand:SVE_HSF 2 "register_operand")]
+ (match_operand:SVE_FULL_HSF 2 "register_operand")]
SVE_COND_FCVT)
- (match_operand:SVE_SDF 3 "aarch64_simd_reg_or_zero")]
+ (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero")]
UNSPEC_SEL))]
- "TARGET_SVE && <SVE_SDF:elem_bits> > <SVE_HSF:elem_bits>"
+ "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
)
-(define_insn "*cond_<optab>_nontrunc<SVE_HSF:mode><SVE_SDF:mode>"
- [(set (match_operand:SVE_SDF 0 "register_operand" "=w, ?&w, ?&w")
- (unspec:SVE_SDF
- [(match_operand:<SVE_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
- (unspec:SVE_SDF
+(define_insn "*cond_<optab>_nontrunc<SVE_FULL_HSF:mode><SVE_FULL_SDF:mode>"
+ [(set (match_operand:SVE_FULL_SDF 0 "register_operand" "=w, ?&w, ?&w")
+ (unspec:SVE_FULL_SDF
+ [(match_operand:<SVE_FULL_SDF:VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ (unspec:SVE_FULL_SDF
[(match_dup 1)
(match_operand:SI 4 "aarch64_sve_gp_strictness")
- (match_operand:SVE_HSF 2 "register_operand" "w, w, w")]
+ (match_operand:SVE_FULL_HSF 2 "register_operand" "w, w, w")]
SVE_COND_FCVT)
- (match_operand:SVE_SDF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+ (match_operand:SVE_FULL_SDF 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
UNSPEC_SEL))]
- "TARGET_SVE && <SVE_SDF:elem_bits> > <SVE_HSF:elem_bits>"
+ "TARGET_SVE && <SVE_FULL_SDF:elem_bits> > <SVE_FULL_HSF:elem_bits>"
"@
- fcvt\t%0.<SVE_SDF:Vetype>, %1/m, %2.<SVE_HSF:Vetype>
- movprfx\t%0.<SVE_SDF:Vetype>, %1/z, %2.<SVE_SDF:Vetype>\;fcvt\t%0.<SVE_SDF:Vetype>, %1/m, %2.<SVE_HSF:Vetype>
- movprfx\t%0, %3\;fcvt\t%0.<SVE_SDF:Vetype>, %1/m, %2.<SVE_HSF:Vetype>"
+ fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ movprfx\t%0.<SVE_FULL_SDF:Vetype>, %1/z, %2.<SVE_FULL_SDF:Vetype>\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>
+ movprfx\t%0, %3\;fcvt\t%0.<SVE_FULL_SDF:Vetype>, %1/m, %2.<SVE_FULL_HSF:Vetype>"
[(set_attr "movprfx" "*,yes,yes")]
)