;; Machine description for AArch64 architecture.
-;; Copyright (C) 2009-2016 Free Software Foundation, Inc.
+;; Copyright (C) 2009-2018 Free Software Foundation, Inc.
;; Contributed by ARM Ltd.
;;
;; This file is part of GCC.
;; Iterator for General Purpose Integer registers (32- and 64-bit modes)
(define_mode_iterator GPI [SI DI])
+;; Iterator for HI, SI, DI, some instructions can only work on these modes.
+(define_mode_iterator GPI_I16 [(HI "AARCH64_ISA_F16") SI DI])
+
;; Iterator for QI and HI modes
(define_mode_iterator SHORT [QI HI])
;; Iterator for all integer modes (up to 64-bit)
(define_mode_iterator ALLI [QI HI SI DI])
+;; Iterator for all integer modes (up to 128-bit)
+(define_mode_iterator ALLI_TI [QI HI SI DI TI])
+
;; Iterator for all integer modes that can be extended (up to 64-bit)
(define_mode_iterator ALLX [QI HI SI])
;; Iterator for General Purpose Floating-point registers (32- and 64-bit modes)
(define_mode_iterator GPF [SF DF])
+;; Iterator for all scalar floating point modes (HF, SF, DF)
+(define_mode_iterator GPF_F16 [(HF "AARCH64_ISA_F16") SF DF])
+
+;; Iterator for all scalar floating point modes (HF, SF, DF)
+(define_mode_iterator GPF_HF [HF SF DF])
+
;; Iterator for all scalar floating point modes (HF, SF, DF and TF)
(define_mode_iterator GPF_TF_F16 [HF SF DF TF])
;; Iterator for all scalar floating point modes (SF, DF and TF)
(define_mode_iterator GPF_TF [SF DF TF])
-;; Integer vector modes.
+;; Integer Advanced SIMD modes.
(define_mode_iterator VDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
-;; vector and scalar, 64 & 128-bit container, all integer modes
+;; Advanced SIMD and scalar, 64 & 128-bit container, all integer modes.
(define_mode_iterator VSDQ_I [V8QI V16QI V4HI V8HI V2SI V4SI V2DI QI HI SI DI])
-;; vector and scalar, 64 & 128-bit container: all vector integer modes;
-;; 64-bit scalar integer mode
+;; Advanced SIMD and scalar, 64 & 128-bit container: all Advanced SIMD
+;; integer modes; 64-bit scalar integer mode.
(define_mode_iterator VSDQ_I_DI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI DI])
;; Double vector modes.
(define_mode_iterator VD [V8QI V4HI V4HF V2SI V2SF])
-;; vector, 64-bit container, all integer modes
+;; All modes stored in registers d0-d31.
+(define_mode_iterator DREG [V8QI V4HI V4HF V2SI V2SF DF])
+
+;; Copy of the above.
+(define_mode_iterator DREG2 [V8QI V4HI V4HF V2SI V2SF DF])
+
+;; Advanced SIMD, 64-bit container, all integer modes.
(define_mode_iterator VD_BHSI [V8QI V4HI V2SI])
;; 128 and 64-bit container; 8, 16, 32-bit vector integer modes
;; Quad vector modes.
(define_mode_iterator VQ [V16QI V8HI V4SI V2DI V8HF V4SF V2DF])
+;; Copy of the above.
+(define_mode_iterator VQ2 [V16QI V8HI V4SI V2DI V8HF V4SF V2DF])
+
+;; Quad integer vector modes.
+(define_mode_iterator VQ_I [V16QI V8HI V4SI V2DI])
+
;; VQ without 2 element modes.
(define_mode_iterator VQ_NO2E [V16QI V8HI V4SI V8HF V4SF])
;; pointer-sized quantities. Exactly one of the two alternatives will match.
(define_mode_iterator PTR [(SI "ptr_mode == SImode") (DI "ptr_mode == DImode")])
-;; Vector Float modes suitable for moving, loading and storing.
+;; Advanced SIMD Float modes suitable for moving, loading and storing.
(define_mode_iterator VDQF_F16 [V4HF V8HF V2SF V4SF V2DF])
-;; Vector Float modes.
+;; Advanced SIMD Float modes.
(define_mode_iterator VDQF [V2SF V4SF V2DF])
(define_mode_iterator VHSDF [(V4HF "TARGET_SIMD_F16INST")
(V8HF "TARGET_SIMD_F16INST")
V2SF V4SF V2DF])
-;; Vector Float modes, and DF.
-(define_mode_iterator VDQF_DF [V2SF V4SF V2DF DF])
+;; Advanced SIMD Float modes, and DF.
(define_mode_iterator VHSDF_DF [(V4HF "TARGET_SIMD_F16INST")
(V8HF "TARGET_SIMD_F16INST")
V2SF V4SF V2DF DF])
-(define_mode_iterator VHSDF_SDF [(V4HF "TARGET_SIMD_F16INST")
- (V8HF "TARGET_SIMD_F16INST")
- V2SF V4SF V2DF SF DF])
+(define_mode_iterator VHSDF_HSDF [(V4HF "TARGET_SIMD_F16INST")
+ (V8HF "TARGET_SIMD_F16INST")
+ V2SF V4SF V2DF
+ (HF "TARGET_SIMD_F16INST")
+ SF DF])
-;; Vector single Float modes.
+;; Advanced SIMD single Float modes.
(define_mode_iterator VDQSF [V2SF V4SF])
;; Quad vector Float modes with half/single elements.
;; Modes suitable to use as the return type of a vcond expression.
(define_mode_iterator VDQF_COND [V2SF V2SI V4SF V4SI V2DF V2DI])
-;; All Float modes.
+;; All scalar and Advanced SIMD Float modes.
(define_mode_iterator VALLF [V2SF V4SF V2DF SF DF])
-;; Vector Float modes with 2 elements.
+;; Advanced SIMD Float modes with 2 elements.
(define_mode_iterator V2F [V2SF V2DF])
-;; All vector modes on which we support any arithmetic operations.
+;; All Advanced SIMD modes on which we support any arithmetic operations.
(define_mode_iterator VALL [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF])
-;; All vector modes suitable for moving, loading, and storing.
+;; All Advanced SIMD modes suitable for moving, loading, and storing.
(define_mode_iterator VALL_F16 [V8QI V16QI V4HI V8HI V2SI V4SI V2DI
V4HF V8HF V2SF V4SF V2DF])
-;; All vector modes barring HF modes, plus DI.
+;; The VALL_F16 modes except the 128-bit 2-element ones.
+(define_mode_iterator VALL_F16_NO_V2Q [V8QI V16QI V4HI V8HI V2SI V4SI
+ V4HF V8HF V2SF V4SF])
+
+;; All Advanced SIMD modes barring HF modes, plus DI.
(define_mode_iterator VALLDI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V2SF V4SF V2DF DI])
-;; All vector modes and DI.
+;; All Advanced SIMD modes and DI.
(define_mode_iterator VALLDI_F16 [V8QI V16QI V4HI V8HI V2SI V4SI V2DI
V4HF V8HF V2SF V4SF V2DF DI])
-;; All vector modes, plus DI and DF.
+;; All Advanced SIMD modes, plus DI and DF.
(define_mode_iterator VALLDIF [V8QI V16QI V4HI V8HI V2SI V4SI
V2DI V4HF V8HF V2SF V4SF V2DF DI DF])
-;; Vector modes for Integer reduction across lanes.
+;; Advanced SIMD modes for Integer reduction across lanes.
(define_mode_iterator VDQV [V8QI V16QI V4HI V8HI V4SI V2DI])
-;; Vector modes(except V2DI) for Integer reduction across lanes.
+;; Advanced SIMD modes (except V2DI) for Integer reduction across lanes.
(define_mode_iterator VDQV_S [V8QI V16QI V4HI V8HI V4SI])
;; All double integer narrow-able modes.
;; All quad integer narrow-able modes.
(define_mode_iterator VQN [V8HI V4SI V2DI])
-;; Vector and scalar 128-bit container: narrowable 16, 32, 64-bit integer modes
+;; Advanced SIMD and scalar 128-bit container: narrowable 16, 32, 64-bit
+;; integer modes
(define_mode_iterator VSQN_HSDI [V8HI V4SI V2DI HI SI DI])
;; All quad integer widen-able modes.
;; Double vector modes for combines.
(define_mode_iterator VDC [V8QI V4HI V4HF V2SI V2SF DI DF])
-;; Vector modes except double int.
+;; Advanced SIMD modes except double int.
(define_mode_iterator VDQIF [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
+(define_mode_iterator VDQIF_F16 [V8QI V16QI V4HI V8HI V2SI V4SI
+ V4HF V8HF V2SF V4SF V2DF])
-;; Vector modes for S type.
+;; Advanced SIMD modes for S type.
(define_mode_iterator VDQ_SI [V2SI V4SI])
-;; Vector modes for S and D
+;; Advanced SIMD modes for S and D.
(define_mode_iterator VDQ_SDI [V2SI V4SI V2DI])
-;; Scalar and Vector modes for S and D
+;; Advanced SIMD modes for H, S and D.
+(define_mode_iterator VDQ_HSDI [(V4HI "TARGET_SIMD_F16INST")
+ (V8HI "TARGET_SIMD_F16INST")
+ V2SI V4SI V2DI])
+
+;; Scalar and Advanced SIMD modes for S and D.
(define_mode_iterator VSDQ_SDI [V2SI V4SI V2DI SI DI])
-;; Vector modes for Q and H types.
+;; Scalar and Advanced SIMD modes for S and D, Advanced SIMD modes for H.
+(define_mode_iterator VSDQ_HSDI [(V4HI "TARGET_SIMD_F16INST")
+ (V8HI "TARGET_SIMD_F16INST")
+ V2SI V4SI V2DI
+ (HI "TARGET_SIMD_F16INST")
+ SI DI])
+
+;; Advanced SIMD modes for Q and H types.
(define_mode_iterator VDQQH [V8QI V16QI V4HI V8HI])
-;; Vector modes for H and S types.
+;; Advanced SIMD modes for H and S types.
(define_mode_iterator VDQHS [V4HI V8HI V2SI V4SI])
-;; Vector modes for H, S and D types.
+;; Advanced SIMD modes for H, S and D types.
(define_mode_iterator VDQHSD [V4HI V8HI V2SI V4SI V2DI])
-;; Vector and scalar integer modes for H and S
+;; Advanced SIMD and scalar integer modes for H and S.
(define_mode_iterator VSDQ_HSI [V4HI V8HI V2SI V4SI HI SI])
-;; Vector and scalar 64-bit container: 16, 32-bit integer modes
+;; Advanced SIMD and scalar 64-bit container: 16, 32-bit integer modes.
(define_mode_iterator VSD_HSI [V4HI V2SI HI SI])
-;; Vector 64-bit container: 16, 32-bit integer modes
+;; Advanced SIMD 64-bit container: 16, 32-bit integer modes.
(define_mode_iterator VD_HSI [V4HI V2SI])
;; Scalar 64-bit container: 16, 32-bit integer modes
(define_mode_iterator SD_HSI [HI SI])
-;; Vector 64-bit container: 16, 32-bit integer modes
+;; Advanced SIMD 64-bit container: 16, 32-bit integer modes.
(define_mode_iterator VQ_HSI [V8HI V4SI])
;; All byte modes.
(define_mode_iterator TX [TI TF])
-;; Opaque structure modes.
+;; Advanced SIMD opaque structure modes.
(define_mode_iterator VSTRUCT [OI CI XI])
;; Double scalar modes
(define_mode_iterator DX [DI DF])
-;; Modes available for <f>mul lane operations.
-(define_mode_iterator VMUL [V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
+;; Duplicate of the above
+(define_mode_iterator DX2 [DI DF])
+
+;; Single scalar modes
+(define_mode_iterator SX [SI SF])
-;; Modes available for <f>mul lane operations changing lane count.
+;; Duplicate of the above
+(define_mode_iterator SX2 [SI SF])
+
+;; Single and double integer and float modes
+(define_mode_iterator DSX [DF DI SF SI])
+
+
+;; Modes available for Advanced SIMD <f>mul lane operations.
+(define_mode_iterator VMUL [V4HI V8HI V2SI V4SI
+ (V4HF "TARGET_SIMD_F16INST")
+ (V8HF "TARGET_SIMD_F16INST")
+ V2SF V4SF V2DF])
+
+;; Modes available for Advanced SIMD <f>mul lane operations changing lane
+;; count.
(define_mode_iterator VMUL_CHANGE_NLANES [V4HI V8HI V2SI V4SI V2SF V4SF])
+;; All SVE vector modes.
+(define_mode_iterator SVE_ALL [VNx16QI VNx8HI VNx4SI VNx2DI
+ VNx8HF VNx4SF VNx2DF])
+
+;; All SVE vector structure modes.
+(define_mode_iterator SVE_STRUCT [VNx32QI VNx16HI VNx8SI VNx4DI
+ VNx16HF VNx8SF VNx4DF
+ VNx48QI VNx24HI VNx12SI VNx6DI
+ VNx24HF VNx12SF VNx6DF
+ VNx64QI VNx32HI VNx16SI VNx8DI
+ VNx32HF VNx16SF VNx8DF])
+
+;; All SVE vector modes that have 8-bit or 16-bit elements.
+(define_mode_iterator SVE_BH [VNx16QI VNx8HI VNx8HF])
+
+;; All SVE vector modes that have 8-bit, 16-bit or 32-bit elements.
+(define_mode_iterator SVE_BHS [VNx16QI VNx8HI VNx4SI VNx8HF VNx4SF])
+
+;; All SVE integer vector modes that have 8-bit, 16-bit or 32-bit elements.
+(define_mode_iterator SVE_BHSI [VNx16QI VNx8HI VNx4SI])
+
+;; All SVE integer vector modes that have 16-bit, 32-bit or 64-bit elements.
+(define_mode_iterator SVE_HSDI [VNx16QI VNx8HI VNx4SI])
+
+;; All SVE floating-point vector modes that have 16-bit or 32-bit elements.
+(define_mode_iterator SVE_HSF [VNx8HF VNx4SF])
+
+;; All SVE vector modes that have 32-bit or 64-bit elements.
+(define_mode_iterator SVE_SD [VNx4SI VNx2DI VNx4SF VNx2DF])
+
+;; All SVE vector modes that have 32-bit elements.
+(define_mode_iterator SVE_S [VNx4SI VNx4SF])
+
+;; All SVE vector modes that have 64-bit elements.
+(define_mode_iterator SVE_D [VNx2DI VNx2DF])
+
+;; All SVE integer vector modes that have 32-bit or 64-bit elements.
+(define_mode_iterator SVE_SDI [VNx4SI VNx2DI])
+
+;; All SVE integer vector modes.
+(define_mode_iterator SVE_I [VNx16QI VNx8HI VNx4SI VNx2DI])
+
+;; All SVE floating-point vector modes.
+(define_mode_iterator SVE_F [VNx8HF VNx4SF VNx2DF])
+
+;; All SVE predicate modes.
+(define_mode_iterator PRED_ALL [VNx16BI VNx8BI VNx4BI VNx2BI])
+
+;; SVE predicate modes that control 8-bit, 16-bit or 32-bit elements.
+(define_mode_iterator PRED_BHS [VNx16BI VNx8BI VNx4BI])
+
;; ------------------------------------------------------------------
;; Unspec enumerations for Advance SIMD. These could well go into
;; aarch64.md but for their use in int_iterators here.
UNSPEC_TBL ; Used in vector permute patterns.
UNSPEC_TBX ; Used in vector permute patterns.
UNSPEC_CONCAT ; Used in vector permute patterns.
+
+ ;; The following permute unspecs are generated directly by
+ ;; aarch64_expand_vec_perm_const, so any changes to the underlying
+ ;; instructions would need a corresponding change there.
UNSPEC_ZIP1 ; Used in vector permute patterns.
UNSPEC_ZIP2 ; Used in vector permute patterns.
UNSPEC_UZP1 ; Used in vector permute patterns.
UNSPEC_UZP2 ; Used in vector permute patterns.
UNSPEC_TRN1 ; Used in vector permute patterns.
UNSPEC_TRN2 ; Used in vector permute patterns.
- UNSPEC_EXT ; Used in aarch64-simd.md.
+ UNSPEC_EXT ; Used in vector permute patterns.
UNSPEC_REV64 ; Used in vector reverse patterns (permute).
UNSPEC_REV32 ; Used in vector reverse patterns (permute).
UNSPEC_REV16 ; Used in vector reverse patterns (permute).
+
UNSPEC_AESE ; Used in aarch64-simd.md.
UNSPEC_AESD ; Used in aarch64-simd.md.
UNSPEC_AESMC ; Used in aarch64-simd.md.
UNSPEC_SQRDMLSH ; Used in aarch64-simd.md.
UNSPEC_FMAXNM ; Used in aarch64-simd.md.
UNSPEC_FMINNM ; Used in aarch64-simd.md.
+ UNSPEC_SDOT ; Used in aarch64-simd.md.
+ UNSPEC_UDOT ; Used in aarch64-simd.md.
+ UNSPEC_SM3SS1 ; Used in aarch64-simd.md.
+ UNSPEC_SM3TT1A ; Used in aarch64-simd.md.
+ UNSPEC_SM3TT1B ; Used in aarch64-simd.md.
+ UNSPEC_SM3TT2A ; Used in aarch64-simd.md.
+ UNSPEC_SM3TT2B ; Used in aarch64-simd.md.
+ UNSPEC_SM3PARTW1 ; Used in aarch64-simd.md.
+ UNSPEC_SM3PARTW2 ; Used in aarch64-simd.md.
+ UNSPEC_SM4E ; Used in aarch64-simd.md.
+ UNSPEC_SM4EKEY ; Used in aarch64-simd.md.
+ UNSPEC_SHA512H ; Used in aarch64-simd.md.
+ UNSPEC_SHA512H2 ; Used in aarch64-simd.md.
+ UNSPEC_SHA512SU0 ; Used in aarch64-simd.md.
+ UNSPEC_SHA512SU1 ; Used in aarch64-simd.md.
+ UNSPEC_FMLAL ; Used in aarch64-simd.md.
+ UNSPEC_FMLSL ; Used in aarch64-simd.md.
+ UNSPEC_FMLAL2 ; Used in aarch64-simd.md.
+ UNSPEC_FMLSL2 ; Used in aarch64-simd.md.
+ UNSPEC_SEL ; Used in aarch64-sve.md.
+ UNSPEC_ANDV ; Used in aarch64-sve.md.
+ UNSPEC_IORV ; Used in aarch64-sve.md.
+ UNSPEC_XORV ; Used in aarch64-sve.md.
+ UNSPEC_ANDF ; Used in aarch64-sve.md.
+ UNSPEC_IORF ; Used in aarch64-sve.md.
+ UNSPEC_XORF ; Used in aarch64-sve.md.
+ UNSPEC_SMUL_HIGHPART ; Used in aarch64-sve.md.
+ UNSPEC_UMUL_HIGHPART ; Used in aarch64-sve.md.
+ UNSPEC_COND_ADD ; Used in aarch64-sve.md.
+ UNSPEC_COND_SUB ; Used in aarch64-sve.md.
+ UNSPEC_COND_MUL ; Used in aarch64-sve.md.
+ UNSPEC_COND_DIV ; Used in aarch64-sve.md.
+ UNSPEC_COND_MAX ; Used in aarch64-sve.md.
+ UNSPEC_COND_MIN ; Used in aarch64-sve.md.
+ UNSPEC_COND_FMLA ; Used in aarch64-sve.md.
+ UNSPEC_COND_FMLS ; Used in aarch64-sve.md.
+ UNSPEC_COND_FNMLA ; Used in aarch64-sve.md.
+ UNSPEC_COND_FNMLS ; Used in aarch64-sve.md.
+ UNSPEC_COND_LT ; Used in aarch64-sve.md.
+ UNSPEC_COND_LE ; Used in aarch64-sve.md.
+ UNSPEC_COND_EQ ; Used in aarch64-sve.md.
+ UNSPEC_COND_NE ; Used in aarch64-sve.md.
+ UNSPEC_COND_GE ; Used in aarch64-sve.md.
+ UNSPEC_COND_GT ; Used in aarch64-sve.md.
+ UNSPEC_LASTB ; Used in aarch64-sve.md.
])
;; ------------------------------------------------------------------
;; 32-bit version and "%x0" in the 64-bit version.
(define_mode_attr w [(QI "w") (HI "w") (SI "w") (DI "x") (SF "s") (DF "d")])
+;; The size of access, in bytes.
+(define_mode_attr ldst_sz [(SI "4") (DI "8")])
+;; Likewise for load/store pair.
+(define_mode_attr ldpstp_sz [(SI "8") (DI "16")])
+
;; For inequal width int to float conversion
-(define_mode_attr w1 [(SF "w") (DF "x")])
-(define_mode_attr w2 [(SF "x") (DF "w")])
+(define_mode_attr w1 [(HF "w") (SF "w") (DF "x")])
+(define_mode_attr w2 [(HF "x") (SF "x") (DF "w")])
+
+;; For width of fp registers in fcvt instruction
+(define_mode_attr fpw [(DI "s") (SI "d")])
(define_mode_attr short_mask [(HI "65535") (QI "255")])
;; For doubling width of an integer mode
(define_mode_attr DWI [(QI "HI") (HI "SI") (SI "DI") (DI "TI")])
+(define_mode_attr fcvt_change_mode [(SI "df") (DI "sf")])
+
+(define_mode_attr FCVT_CHANGE_MODE [(SI "DF") (DI "SF")])
+
;; For scalar usage of vector/FP registers
(define_mode_attr v [(QI "b") (HI "h") (SI "s") (DI "d")
- (SF "s") (DF "d")
+ (HF "h") (SF "s") (DF "d")
(V8QI "") (V16QI "")
(V4HI "") (V8HI "")
(V2SI "") (V4SI "")
(define_mode_attr rtn [(DI "d") (SI "")])
(define_mode_attr vas [(DI "") (SI ".2s")])
-;; Map a floating point mode to the appropriate register name prefix
-(define_mode_attr s [(SF "s") (DF "d")])
+;; Map a vector to the number of units in it, if the size of the mode
+;; is constant.
+(define_mode_attr nunits [(V8QI "8") (V16QI "16")
+ (V4HI "4") (V8HI "8")
+ (V2SI "2") (V4SI "4")
+ (V2DI "2")
+ (V4HF "4") (V8HF "8")
+ (V2SF "2") (V4SF "4")
+ (V1DF "1") (V2DF "2")
+ (DI "1") (DF "1")])
+
+;; Map a mode to the number of bits in it, if the size of the mode
+;; is constant.
+(define_mode_attr bitsize [(V8QI "64") (V16QI "128")
+ (V4HI "64") (V8HI "128")
+ (V2SI "64") (V4SI "128")
+ (V2DI "128")])
+
+;; Map a floating point or integer mode to the appropriate register name prefix
+(define_mode_attr s [(HF "h") (SF "s") (DF "d") (SI "s") (DI "d")])
;; Give the length suffix letter for a sign- or zero-extension.
(define_mode_attr size [(QI "b") (HI "h") (SI "w")])
;; Attribute to describe constants acceptable in logical operations
(define_mode_attr lconst [(SI "K") (DI "L")])
+;; Attribute to describe constants acceptable in logical and operations
+(define_mode_attr lconst2 [(SI "UsO") (DI "UsP")])
+
;; Map a mode to a specific constraint character.
(define_mode_attr cmode [(QI "q") (HI "h") (SI "s") (DI "d")])
+;; Map modes to Usg and Usj constraints for SISD right shifts
+(define_mode_attr cmode_simd [(SI "g") (DI "j")])
+
(define_mode_attr Vtype [(V8QI "8b") (V16QI "16b")
(V4HI "4h") (V8HI "8h")
(V2SI "2s") (V4SI "4s")
(V4SF ".4s") (V2DF ".2d")
(DI "") (SI "")
(HI "") (QI "")
- (TI "") (SF "")
- (DF "")])
+ (TI "") (HF "")
+ (SF "") (DF "")])
;; Register suffix narrowed modes for VQN.
(define_mode_attr Vmntype [(V8HI ".8b") (V4SI ".4h")
(HI "")])
;; Mode-to-individual element type mapping.
-(define_mode_attr Vetype [(V8QI "b") (V16QI "b")
- (V4HI "h") (V8HI "h")
- (V2SI "s") (V4SI "s")
- (V2DI "d") (V4HF "h")
- (V8HF "h") (V2SF "s")
- (V4SF "s") (V2DF "d")
+(define_mode_attr Vetype [(V8QI "b") (V16QI "b") (VNx16QI "b") (VNx16BI "b")
+ (V4HI "h") (V8HI "h") (VNx8HI "h") (VNx8BI "h")
+ (V2SI "s") (V4SI "s") (VNx4SI "s") (VNx4BI "s")
+ (V2DI "d") (VNx2DI "d") (VNx2BI "d")
+ (V4HF "h") (V8HF "h") (VNx8HF "h")
+ (V2SF "s") (V4SF "s") (VNx4SF "s")
+ (V2DF "d") (VNx2DF "d")
+ (HF "h")
(SF "s") (DF "d")
(QI "b") (HI "h")
(SI "s") (DI "d")])
+;; Equivalent of "size" for a vector element.
+(define_mode_attr Vesize [(VNx16QI "b")
+ (VNx8HI "h") (VNx8HF "h")
+ (VNx4SI "w") (VNx4SF "w")
+ (VNx2DI "d") (VNx2DF "d")
+ (VNx32QI "b") (VNx48QI "b") (VNx64QI "b")
+ (VNx16HI "h") (VNx24HI "h") (VNx32HI "h")
+ (VNx16HF "h") (VNx24HF "h") (VNx32HF "h")
+ (VNx8SI "w") (VNx12SI "w") (VNx16SI "w")
+ (VNx8SF "w") (VNx12SF "w") (VNx16SF "w")
+ (VNx4DI "d") (VNx6DI "d") (VNx8DI "d")
+ (VNx4DF "d") (VNx6DF "d") (VNx8DF "d")])
+
;; Vetype is used everywhere in scheduling type and assembly output,
;; sometimes they are not the same, for example HF modes on some
;; instructions. stype is defined to represent scheduling type
(SI "8b")])
;; Define element mode for each vector mode.
-(define_mode_attr VEL [(V8QI "QI") (V16QI "QI")
- (V4HI "HI") (V8HI "HI")
- (V2SI "SI") (V4SI "SI")
- (DI "DI") (V2DI "DI")
- (V4HF "HF") (V8HF "HF")
- (V2SF "SF") (V4SF "SF")
- (V2DF "DF") (DF "DF")
- (SI "SI") (HI "HI")
+(define_mode_attr VEL [(V8QI "QI") (V16QI "QI") (VNx16QI "QI")
+ (V4HI "HI") (V8HI "HI") (VNx8HI "HI")
+ (V2SI "SI") (V4SI "SI") (VNx4SI "SI")
+ (DI "DI") (V2DI "DI") (VNx2DI "DI")
+ (V4HF "HF") (V8HF "HF") (VNx8HF "HF")
+ (V2SF "SF") (V4SF "SF") (VNx4SF "SF")
+ (DF "DF") (V2DF "DF") (VNx2DF "DF")
+ (SI "SI") (HI "HI")
(QI "QI")])
+;; Define element mode for each vector mode (lower case).
+(define_mode_attr Vel [(V8QI "qi") (V16QI "qi") (VNx16QI "qi")
+ (V4HI "hi") (V8HI "hi") (VNx8HI "hi")
+ (V2SI "si") (V4SI "si") (VNx4SI "si")
+ (DI "di") (V2DI "di") (VNx2DI "di")
+ (V4HF "hf") (V8HF "hf") (VNx8HF "hf")
+ (V2SF "sf") (V4SF "sf") (VNx4SF "sf")
+ (V2DF "df") (DF "df") (VNx2DF "df")
+ (SI "si") (HI "hi")
+ (QI "qi")])
+
+;; Element mode with floating-point values replaced by like-sized integers.
+(define_mode_attr VEL_INT [(VNx16QI "QI")
+ (VNx8HI "HI") (VNx8HF "HI")
+ (VNx4SI "SI") (VNx4SF "SI")
+ (VNx2DI "DI") (VNx2DF "DI")])
+
+;; Gives the mode of the 128-bit lowpart of an SVE vector.
+(define_mode_attr V128 [(VNx16QI "V16QI")
+ (VNx8HI "V8HI") (VNx8HF "V8HF")
+ (VNx4SI "V4SI") (VNx4SF "V4SF")
+ (VNx2DI "V2DI") (VNx2DF "V2DF")])
+
+;; ...and again in lower case.
+(define_mode_attr v128 [(VNx16QI "v16qi")
+ (VNx8HI "v8hi") (VNx8HF "v8hf")
+ (VNx4SI "v4si") (VNx4SF "v4sf")
+ (VNx2DI "v2di") (VNx2DF "v2df")])
+
;; 64-bit container modes the inner or scalar source mode.
(define_mode_attr VCOND [(HI "V4HI") (SI "V2SI")
(V4HI "V4HI") (V8HI "V4HI")
(V2DI "4s")])
;; Widened modes of vector modes.
-(define_mode_attr VWIDE [(V8QI "V8HI") (V4HI "V4SI")
- (V2SI "V2DI") (V16QI "V8HI")
- (V8HI "V4SI") (V4SI "V2DI")
- (HI "SI") (SI "DI")
- (V8HF "V4SF") (V4SF "V2DF")
- (V4HF "V4SF") (V2SF "V2DF")]
-)
+(define_mode_attr VWIDE [(V8QI "V8HI") (V4HI "V4SI")
+ (V2SI "V2DI") (V16QI "V8HI")
+ (V8HI "V4SI") (V4SI "V2DI")
+ (HI "SI") (SI "DI")
+ (V8HF "V4SF") (V4SF "V2DF")
+ (V4HF "V4SF") (V2SF "V2DF")
+ (VNx8HF "VNx4SF") (VNx4SF "VNx2DF")
+ (VNx16QI "VNx8HI") (VNx8HI "VNx4SI")
+ (VNx4SI "VNx2DI")
+ (VNx16BI "VNx8BI") (VNx8BI "VNx4BI")
+ (VNx4BI "VNx2BI")])
+
+;; Predicate mode associated with VWIDE.
+(define_mode_attr VWIDE_PRED [(VNx8HF "VNx4BI") (VNx4SF "VNx2BI")])
;; Widened modes of vector modes, lowercase
-(define_mode_attr Vwide [(V2SF "v2df") (V4HF "v4sf")])
+(define_mode_attr Vwide [(V2SF "v2df") (V4HF "v4sf")
+ (VNx16QI "vnx8hi") (VNx8HI "vnx4si")
+ (VNx4SI "vnx2di")
+ (VNx8HF "vnx4sf") (VNx4SF "vnx2df")
+ (VNx16BI "vnx8bi") (VNx8BI "vnx4bi")
+ (VNx4BI "vnx2bi")])
;; Widened mode register suffixes for VD_BHSI/VQW/VQ_HSF.
(define_mode_attr Vwtype [(V8QI "8h") (V4HI "4s")
(V8HI "4s") (V4SI "2d")
(V8HF "4s") (V4SF "2d")])
+;; SVE vector after widening
+(define_mode_attr Vewtype [(VNx16QI "h")
+ (VNx8HI "s") (VNx8HF "s")
+ (VNx4SI "d") (VNx4SF "d")])
+
;; Widened mode register suffixes for VDW/VQW.
(define_mode_attr Vmwtype [(V8QI ".8h") (V4HI ".4s")
(V2SI ".2d") (V16QI ".8h")
(V4SF "2s")])
;; Define corresponding core/FP element mode for each vector mode.
-(define_mode_attr vw [(V8QI "w") (V16QI "w")
- (V4HI "w") (V8HI "w")
- (V2SI "w") (V4SI "w")
- (DI "x") (V2DI "x")
- (V2SF "s") (V4SF "s")
- (V2DF "d")])
+(define_mode_attr vw [(V8QI "w") (V16QI "w") (VNx16QI "w")
+ (V4HI "w") (V8HI "w") (VNx8HI "w")
+ (V2SI "w") (V4SI "w") (VNx4SI "w")
+ (DI "x") (V2DI "x") (VNx2DI "x")
+ (VNx8HF "h")
+ (V2SF "s") (V4SF "s") (VNx4SF "s")
+ (V2DF "d") (VNx2DF "d")])
;; Corresponding core element mode for each vector mode. This is a
;; variation on <vw> mapping FP modes to GP regs.
-(define_mode_attr vwcore [(V8QI "w") (V16QI "w")
- (V4HI "w") (V8HI "w")
- (V2SI "w") (V4SI "w")
- (DI "x") (V2DI "x")
- (V4HF "w") (V8HF "w")
- (V2SF "w") (V4SF "w")
- (V2DF "x")])
+(define_mode_attr vwcore [(V8QI "w") (V16QI "w") (VNx16QI "w")
+ (V4HI "w") (V8HI "w") (VNx8HI "w")
+ (V2SI "w") (V4SI "w") (VNx4SI "w")
+ (DI "x") (V2DI "x") (VNx2DI "x")
+ (V4HF "w") (V8HF "w") (VNx8HF "w")
+ (V2SF "w") (V4SF "w") (VNx4SF "w")
+ (V2DF "x") (VNx2DF "x")])
;; Double vector types for ALLX.
(define_mode_attr Vallxd [(QI "8b") (HI "4h") (SI "2s")])
-;; Mode of result of comparison operations.
-(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
- (V4HI "V4HI") (V8HI "V8HI")
- (V2SI "V2SI") (V4SI "V4SI")
- (DI "DI") (V2DI "V2DI")
- (V4HF "V4HI") (V8HF "V8HI")
- (V2SF "V2SI") (V4SF "V4SI")
- (V2DF "V2DI") (DF "DI")
- (SF "SI")])
-
-;; Lower case mode of results of comparison operations.
-(define_mode_attr v_cmp_result [(V8QI "v8qi") (V16QI "v16qi")
- (V4HI "v4hi") (V8HI "v8hi")
- (V2SI "v2si") (V4SI "v4si")
- (DI "di") (V2DI "v2di")
- (V4HF "v4hi") (V8HF "v8hi")
- (V2SF "v2si") (V4SF "v4si")
- (V2DF "v2di") (DF "di")
- (SF "si")])
+;; Mode with floating-point values replaced by like-sized integers.
+(define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI "V16QI")
+ (V4HI "V4HI") (V8HI "V8HI")
+ (V2SI "V2SI") (V4SI "V4SI")
+ (DI "DI") (V2DI "V2DI")
+ (V4HF "V4HI") (V8HF "V8HI")
+ (V2SF "V2SI") (V4SF "V4SI")
+ (DF "DI") (V2DF "V2DI")
+ (SF "SI") (SI "SI")
+ (HF "HI")
+ (VNx16QI "VNx16QI")
+ (VNx8HI "VNx8HI") (VNx8HF "VNx8HI")
+ (VNx4SI "VNx4SI") (VNx4SF "VNx4SI")
+ (VNx2DI "VNx2DI") (VNx2DF "VNx2DI")
+])
+
+;; Lower case mode with floating-point values replaced by like-sized integers.
+(define_mode_attr v_int_equiv [(V8QI "v8qi") (V16QI "v16qi")
+ (V4HI "v4hi") (V8HI "v8hi")
+ (V2SI "v2si") (V4SI "v4si")
+ (DI "di") (V2DI "v2di")
+ (V4HF "v4hi") (V8HF "v8hi")
+ (V2SF "v2si") (V4SF "v4si")
+ (DF "di") (V2DF "v2di")
+ (SF "si")
+ (VNx16QI "vnx16qi")
+ (VNx8HI "vnx8hi") (VNx8HF "vnx8hi")
+ (VNx4SI "vnx4si") (VNx4SF "vnx4si")
+ (VNx2DI "vnx2di") (VNx2DF "vnx2di")
+])
+
+;; Floating-point equivalent of selected modes.
+(define_mode_attr V_FP_EQUIV [(VNx4SI "VNx4SF") (VNx4SF "VNx4SF")
+ (VNx2DI "VNx2DF") (VNx2DF "VNx2DF")])
+(define_mode_attr v_fp_equiv [(VNx4SI "vnx4sf") (VNx4SF "vnx4sf")
+ (VNx2DI "vnx2df") (VNx2DF "vnx2df")])
+
+;; Mode for vector conditional operations where the comparison has
+;; different type from the lhs.
+(define_mode_attr V_cmp_mixed [(V2SI "V2SF") (V4SI "V4SF")
+ (V2DI "V2DF") (V2SF "V2SI")
+ (V4SF "V4SI") (V2DF "V2DI")])
+
+(define_mode_attr v_cmp_mixed [(V2SI "v2sf") (V4SI "v4sf")
+ (V2DI "v2df") (V2SF "v2si")
+ (V4SF "v4si") (V2DF "v2di")])
;; Lower case element modes (as used in shift immediate patterns).
(define_mode_attr ve_mode [(V8QI "qi") (V16QI "qi")
;; ld..._lane and st..._lane operations.
(define_mode_attr nregs [(OI "2") (CI "3") (XI "4")])
-(define_mode_attr VRL2 [(V8QI "V32QI") (V4HI "V16HI")
- (V4HF "V16HF")
- (V2SI "V8SI") (V2SF "V8SF")
- (DI "V4DI") (DF "V4DF")])
-
-(define_mode_attr VRL3 [(V8QI "V48QI") (V4HI "V24HI")
- (V4HF "V24HF")
- (V2SI "V12SI") (V2SF "V12SF")
- (DI "V6DI") (DF "V6DF")])
-
-(define_mode_attr VRL4 [(V8QI "V64QI") (V4HI "V32HI")
- (V4HF "V32HF")
- (V2SI "V16SI") (V2SF "V16SF")
- (DI "V8DI") (DF "V8DF")])
-
;; Mode for atomic operation suffixes
(define_mode_attr atomic_sfx
[(QI "b") (HI "h") (SI "") (DI "")])
(V2DI "v2df") (V4SI "v4sf") (V2SI "v2sf")
(SF "si") (DF "di") (SI "sf") (DI "df")
(V4HF "v4hi") (V8HF "v8hi") (V4HI "v4hf")
- (V8HI "v8hf")])
+ (V8HI "v8hf") (HF "hi") (HI "hf")])
(define_mode_attr FCVT_TARGET [(V2DF "V2DI") (V4SF "V4SI") (V2SF "V2SI")
(V2DI "V2DF") (V4SI "V4SF") (V2SI "V2SF")
(SF "SI") (DF "DI") (SI "SF") (DI "DF")
(V4HF "V4HI") (V8HF "V8HI") (V4HI "V4HF")
- (V8HI "V8HF")])
+ (V8HI "V8HF") (HF "HI") (HI "HF")])
;; for the inequal width integer to fp conversions
-(define_mode_attr fcvt_iesize [(SF "di") (DF "si")])
-(define_mode_attr FCVT_IESIZE [(SF "DI") (DF "SI")])
+(define_mode_attr fcvt_iesize [(HF "di") (SF "di") (DF "si")])
+(define_mode_attr FCVT_IESIZE [(HF "DI") (SF "DI") (DF "SI")])
(define_mode_attr VSWAP_WIDTH [(V8QI "V16QI") (V16QI "V8QI")
(V4HI "V8HI") (V8HI "V4HI")
(DF "to_128") (V2DF "to_64")])
;; For certain vector-by-element multiplication instructions we must
-;; constrain the HI cases to use only V0-V15. This is covered by
+;; constrain the 16-bit cases to use only V0-V15. This is covered by
;; the 'x' constraint. All other modes may use the 'w' constraint.
(define_mode_attr h_con [(V2SI "w") (V4SI "w")
(V4HI "x") (V8HI "x")
- (V4HF "w") (V8HF "w")
+ (V4HF "x") (V8HF "x")
(V2SF "w") (V4SF "w")
(V2DF "w") (DF "w")])
(V4HI "") (V8HI "")
(V2SI "") (V4SI "")
(DI "") (V2DI "")
+ (V4HF "f") (V8HF "f")
(V2SF "f") (V4SF "f")
(V2DF "f") (DF "f")])
(V4HI "") (V8HI "")
(V2SI "") (V4SI "")
(DI "") (V2DI "")
+ (V4HF "_fp") (V8HF "_fp")
(V2SF "_fp") (V4SF "_fp")
(V2DF "_fp") (DF "_fp")
(SF "_fp")])
(V4HF "") (V8HF "_q")
(V2SF "") (V4SF "_q")
(V2DF "_q")
- (QI "") (HI "") (SI "") (DI "") (SF "") (DF "")])
+ (QI "") (HI "") (SI "") (DI "") (HF "") (SF "") (DF "")])
(define_mode_attr vp [(V8QI "v") (V16QI "v")
(V4HI "v") (V8HI "v")
(V2SI "p") (V4SI "v")
- (V2DI "p") (V2DF "p")
- (V2SF "p") (V4SF "v")])
+ (V2DI "p") (V2DF "p")
+ (V2SF "p") (V4SF "v")
+ (V4HF "v") (V8HF "v")])
(define_mode_attr vsi2qi [(V2SI "v8qi") (V4SI "v16qi")])
(define_mode_attr VSI2QI [(V2SI "V8QI") (V4SI "V16QI")])
+
+;; Register suffix for DOTPROD input types from the return type.
+(define_mode_attr Vdottype [(V2SI "8b") (V4SI "16b")])
+
;; Sum of lengths of instructions needed to move vector registers of a mode.
(define_mode_attr insn_count [(OI "8") (CI "12") (XI "16")])
;; No need of iterator for -fPIC as it use got_lo12 for both modes.
(define_mode_attr got_modifier [(SI "gotpage_lo14") (DI "gotpage_lo15")])
+;; Width of 2nd and 3rd arguments to fp16 vector multiply add/sub
+(define_mode_attr VFMLA_W [(V2SF "V4HF") (V4SF "V8HF")])
+
+(define_mode_attr VFMLA_SEL_W [(V2SF "V2HF") (V4SF "V4HF")])
+
+(define_mode_attr f16quad [(V2SF "") (V4SF "q")])
+
+(define_code_attr f16mac [(plus "a") (minus "s")])
+
+;; The number of subvectors in an SVE_STRUCT.
+(define_mode_attr vector_count [(VNx32QI "2") (VNx16HI "2")
+ (VNx8SI "2") (VNx4DI "2")
+ (VNx16HF "2") (VNx8SF "2") (VNx4DF "2")
+ (VNx48QI "3") (VNx24HI "3")
+ (VNx12SI "3") (VNx6DI "3")
+ (VNx24HF "3") (VNx12SF "3") (VNx6DF "3")
+ (VNx64QI "4") (VNx32HI "4")
+ (VNx16SI "4") (VNx8DI "4")
+ (VNx32HF "4") (VNx16SF "4") (VNx8DF "4")])
+
+;; The number of instruction bytes needed for an SVE_STRUCT move. This is
+;; equal to vector_count * 4.
+(define_mode_attr insn_length [(VNx32QI "8") (VNx16HI "8")
+ (VNx8SI "8") (VNx4DI "8")
+ (VNx16HF "8") (VNx8SF "8") (VNx4DF "8")
+ (VNx48QI "12") (VNx24HI "12")
+ (VNx12SI "12") (VNx6DI "12")
+ (VNx24HF "12") (VNx12SF "12") (VNx6DF "12")
+ (VNx64QI "16") (VNx32HI "16")
+ (VNx16SI "16") (VNx8DI "16")
+ (VNx32HF "16") (VNx16SF "16") (VNx8DF "16")])
+
+;; The type of a subvector in an SVE_STRUCT.
+(define_mode_attr VSINGLE [(VNx32QI "VNx16QI")
+ (VNx16HI "VNx8HI") (VNx16HF "VNx8HF")
+ (VNx8SI "VNx4SI") (VNx8SF "VNx4SF")
+ (VNx4DI "VNx2DI") (VNx4DF "VNx2DF")
+ (VNx48QI "VNx16QI")
+ (VNx24HI "VNx8HI") (VNx24HF "VNx8HF")
+ (VNx12SI "VNx4SI") (VNx12SF "VNx4SF")
+ (VNx6DI "VNx2DI") (VNx6DF "VNx2DF")
+ (VNx64QI "VNx16QI")
+ (VNx32HI "VNx8HI") (VNx32HF "VNx8HF")
+ (VNx16SI "VNx4SI") (VNx16SF "VNx4SF")
+ (VNx8DI "VNx2DI") (VNx8DF "VNx2DF")])
+
+;; ...and again in lower case.
+(define_mode_attr vsingle [(VNx32QI "vnx16qi")
+ (VNx16HI "vnx8hi") (VNx16HF "vnx8hf")
+ (VNx8SI "vnx4si") (VNx8SF "vnx4sf")
+ (VNx4DI "vnx2di") (VNx4DF "vnx2df")
+ (VNx48QI "vnx16qi")
+ (VNx24HI "vnx8hi") (VNx24HF "vnx8hf")
+ (VNx12SI "vnx4si") (VNx12SF "vnx4sf")
+ (VNx6DI "vnx2di") (VNx6DF "vnx2df")
+ (VNx64QI "vnx16qi")
+ (VNx32HI "vnx8hi") (VNx32HF "vnx8hf")
+ (VNx16SI "vnx4si") (VNx16SF "vnx4sf")
+ (VNx8DI "vnx2di") (VNx8DF "vnx2df")])
+
+;; The predicate mode associated with an SVE data mode. For structure modes
+;; this is equivalent to the <VPRED> of the subvector mode.
+(define_mode_attr VPRED [(VNx16QI "VNx16BI")
+ (VNx8HI "VNx8BI") (VNx8HF "VNx8BI")
+ (VNx4SI "VNx4BI") (VNx4SF "VNx4BI")
+ (VNx2DI "VNx2BI") (VNx2DF "VNx2BI")
+ (VNx32QI "VNx16BI")
+ (VNx16HI "VNx8BI") (VNx16HF "VNx8BI")
+ (VNx8SI "VNx4BI") (VNx8SF "VNx4BI")
+ (VNx4DI "VNx2BI") (VNx4DF "VNx2BI")
+ (VNx48QI "VNx16BI")
+ (VNx24HI "VNx8BI") (VNx24HF "VNx8BI")
+ (VNx12SI "VNx4BI") (VNx12SF "VNx4BI")
+ (VNx6DI "VNx2BI") (VNx6DF "VNx2BI")
+ (VNx64QI "VNx16BI")
+ (VNx32HI "VNx8BI") (VNx32HF "VNx8BI")
+ (VNx16SI "VNx4BI") (VNx16SF "VNx4BI")
+ (VNx8DI "VNx2BI") (VNx8DF "VNx2BI")])
+
+;; ...and again in lower case.
+(define_mode_attr vpred [(VNx16QI "vnx16bi")
+ (VNx8HI "vnx8bi") (VNx8HF "vnx8bi")
+ (VNx4SI "vnx4bi") (VNx4SF "vnx4bi")
+ (VNx2DI "vnx2bi") (VNx2DF "vnx2bi")
+ (VNx32QI "vnx16bi")
+ (VNx16HI "vnx8bi") (VNx16HF "vnx8bi")
+ (VNx8SI "vnx4bi") (VNx8SF "vnx4bi")
+ (VNx4DI "vnx2bi") (VNx4DF "vnx2bi")
+ (VNx48QI "vnx16bi")
+ (VNx24HI "vnx8bi") (VNx24HF "vnx8bi")
+ (VNx12SI "vnx4bi") (VNx12SF "vnx4bi")
+ (VNx6DI "vnx2bi") (VNx6DF "vnx2bi")
+ (VNx64QI "vnx16bi")
+ (VNx32HI "vnx8bi") (VNx32HF "vnx4bi")
+ (VNx16SI "vnx4bi") (VNx16SF "vnx4bi")
+ (VNx8DI "vnx2bi") (VNx8DF "vnx2bi")])
+
;; -------------------------------------------------------------------
;; Code Iterators
;; -------------------------------------------------------------------
;; Code iterator for logical operations
(define_code_iterator LOGICAL [and ior xor])
+;; LOGICAL without AND.
+(define_code_iterator LOGICAL_OR [ior xor])
+
;; Code iterator for logical operations whose :nlogical works on SIMD registers.
(define_code_iterator NLOGICAL [and ior])
;; Unsigned comparison operators.
(define_code_iterator FAC_COMPARISONS [lt le ge gt])
+;; SVE integer unary operations.
+(define_code_iterator SVE_INT_UNARY [neg not popcount])
+
+;; SVE floating-point unary operations.
+(define_code_iterator SVE_FP_UNARY [neg abs sqrt])
+
+;; SVE integer binary operations.
+(define_code_iterator SVE_INT_BINARY [plus minus mult smax umax smin umin
+ and ior xor])
+
+;; SVE integer binary division operations.
+(define_code_iterator SVE_INT_BINARY_SD [div udiv])
+
+;; SVE integer comparisons.
+(define_code_iterator SVE_INT_CMP [lt le eq ne ge gt ltu leu geu gtu])
+
+;; SVE floating-point comparisons.
+(define_code_iterator SVE_FP_CMP [lt le eq ne ge gt])
+
;; -------------------------------------------------------------------
;; Code Attributes
;; -------------------------------------------------------------------
(unsigned_fix "fixuns")
(float "float")
(unsigned_float "floatuns")
+ (popcount "popcount")
(and "and")
(ior "ior")
(xor "xor")
(neg "neg")
(plus "add")
(minus "sub")
+ (mult "mul")
+ (div "div")
+ (udiv "udiv")
(ss_plus "qadd")
(us_plus "qadd")
(ss_minus "qsub")
(us_minus "qsub")
(ss_neg "qneg")
(ss_abs "qabs")
+ (smin "smin")
+ (smax "smax")
+ (umin "umin")
+ (umax "umax")
(eq "eq")
(ne "ne")
(lt "lt")
(ltu "ltu")
(leu "leu")
(geu "geu")
- (gtu "gtu")])
+ (gtu "gtu")
+ (abs "abs")
+ (sqrt "sqrt")])
;; For comparison operators we use the FCM* and CM* instructions.
;; As there are no CMLE or CMLT instructions which act on 3 vector
(ltu "LTU") (leu "LEU") (ne "NE") (geu "GEU")
(gtu "GTU")])
+;; The AArch64 condition associated with an rtl comparison code.
+(define_code_attr cmp_op [(lt "lt")
+ (le "le")
+ (eq "eq")
+ (ne "ne")
+ (ge "ge")
+ (gt "gt")
+ (ltu "lo")
+ (leu "ls")
+ (geu "hs")
+ (gtu "hi")])
+
(define_code_attr fix_trunc_optab [(fix "fix_trunc")
(unsigned_fix "fixuns_trunc")])
;; Operation names for negate and bitwise complement.
(define_code_attr neg_not_op [(neg "neg") (not "not")])
-;; Similar, but when not(op)
+;; Similar, but when the second operand is inverted.
(define_code_attr nlogical [(and "bic") (ior "orn") (xor "eon")])
-;; Sign- or zero-extending load
-(define_code_attr ldrxt [(sign_extend "ldrs") (zero_extend "ldr")])
+;; Similar, but when both operands are inverted.
+(define_code_attr logical_nn [(and "nor") (ior "nand")])
;; Sign- or zero-extending data-op
(define_code_attr su [(sign_extend "s") (zero_extend "u")
(smax "s") (umax "u")
(smin "s") (umin "u")])
+;; Whether a shift is left or right.
+(define_code_attr lr [(ashift "l") (ashiftrt "r") (lshiftrt "r")])
+
;; Emit conditional branch instructions.
(define_code_attr bcond [(eq "beq") (ne "bne") (lt "bne") (ge "beq")])
;; Attribute to describe constants acceptable in atomic logical operations
(define_mode_attr lconst_atomic [(QI "K") (HI "K") (SI "K") (DI "L")])
+;; The integer SVE instruction that implements an rtx code.
+(define_code_attr sve_int_op [(plus "add")
+ (minus "sub")
+ (mult "mul")
+ (div "sdiv")
+ (udiv "udiv")
+ (neg "neg")
+ (smin "smin")
+ (smax "smax")
+ (umin "umin")
+ (umax "umax")
+ (and "and")
+ (ior "orr")
+ (xor "eor")
+ (not "not")
+ (popcount "cnt")])
+
+(define_code_attr sve_int_op_rev [(plus "add")
+ (minus "subr")
+ (mult "mul")
+ (div "sdivr")
+ (udiv "udivr")
+ (smin "smin")
+ (smax "smax")
+ (umin "umin")
+ (umax "umax")
+ (and "and")
+ (ior "orr")
+ (xor "eor")])
+
+;; The floating-point SVE instruction that implements an rtx code.
+(define_code_attr sve_fp_op [(plus "fadd")
+ (neg "fneg")
+ (abs "fabs")
+ (sqrt "fsqrt")])
+
+;; The SVE immediate constraint to use for an rtl code.
+(define_code_attr sve_imm_con [(eq "vsc")
+ (ne "vsc")
+ (lt "vsc")
+ (ge "vsc")
+ (le "vsc")
+ (gt "vsc")
+ (ltu "vsd")
+ (leu "vsd")
+ (geu "vsd")
+ (gtu "vsd")])
+
;; -------------------------------------------------------------------
;; Int Iterators.
;; -------------------------------------------------------------------
+
+;; The unspec codes for the SABAL, UABAL AdvancedSIMD instructions.
+(define_int_iterator ABAL [UNSPEC_SABAL UNSPEC_UABAL])
+
+;; The unspec codes for the SABDL2, UABDL2 AdvancedSIMD instructions.
+(define_int_iterator ABDL2 [UNSPEC_SABDL2 UNSPEC_UABDL2])
+
+;; The unspec codes for the SADALP, UADALP AdvancedSIMD instructions.
+(define_int_iterator ADALP [UNSPEC_SADALP UNSPEC_UADALP])
+
(define_int_iterator MAXMINV [UNSPEC_UMAXV UNSPEC_UMINV
UNSPEC_SMAXV UNSPEC_SMINV])
(define_int_iterator FMAXMINV [UNSPEC_FMAXV UNSPEC_FMINV
UNSPEC_FMAXNMV UNSPEC_FMINNMV])
+(define_int_iterator BITWISEV [UNSPEC_ANDV UNSPEC_IORV UNSPEC_XORV])
+
+(define_int_iterator LOGICALF [UNSPEC_ANDF UNSPEC_IORF UNSPEC_XORF])
+
(define_int_iterator HADDSUB [UNSPEC_SHADD UNSPEC_UHADD
UNSPEC_SRHADD UNSPEC_URHADD
UNSPEC_SHSUB UNSPEC_UHSUB
UNSPEC_SRHSUB UNSPEC_URHSUB])
+(define_int_iterator HADD [UNSPEC_SHADD UNSPEC_UHADD])
+
+(define_int_iterator RHADD [UNSPEC_SRHADD UNSPEC_URHADD])
+
+(define_int_iterator DOTPROD [UNSPEC_SDOT UNSPEC_UDOT])
(define_int_iterator ADDSUBHN [UNSPEC_ADDHN UNSPEC_RADDHN
UNSPEC_SUBHN UNSPEC_RSUBHN])
(define_int_iterator ADDSUBHN2 [UNSPEC_ADDHN2 UNSPEC_RADDHN2
UNSPEC_SUBHN2 UNSPEC_RSUBHN2])
-(define_int_iterator FMAXMIN_UNS [UNSPEC_FMAX UNSPEC_FMIN])
+(define_int_iterator FMAXMIN_UNS [UNSPEC_FMAX UNSPEC_FMIN
+ UNSPEC_FMAXNM UNSPEC_FMINNM])
+
+(define_int_iterator PAUTH_LR_SP [UNSPEC_PACISP UNSPEC_AUTISP])
-(define_int_iterator FMAXMIN [UNSPEC_FMAXNM UNSPEC_FMINNM])
+(define_int_iterator PAUTH_17_16 [UNSPEC_PACI1716 UNSPEC_AUTI1716])
(define_int_iterator VQDMULH [UNSPEC_SQDMULH UNSPEC_SQRDMULH])
UNSPEC_TRN1 UNSPEC_TRN2
UNSPEC_UZP1 UNSPEC_UZP2])
+(define_int_iterator OPTAB_PERMUTE [UNSPEC_ZIP1 UNSPEC_ZIP2
+ UNSPEC_UZP1 UNSPEC_UZP2])
+
(define_int_iterator REVERSE [UNSPEC_REV64 UNSPEC_REV32 UNSPEC_REV16])
(define_int_iterator FRINT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTM
(define_int_iterator CRYPTO_SHA256 [UNSPEC_SHA256H UNSPEC_SHA256H2])
+(define_int_iterator CRYPTO_SHA512 [UNSPEC_SHA512H UNSPEC_SHA512H2])
+
+(define_int_iterator CRYPTO_SM3TT [UNSPEC_SM3TT1A UNSPEC_SM3TT1B
+ UNSPEC_SM3TT2A UNSPEC_SM3TT2B])
+
+(define_int_iterator CRYPTO_SM3PART [UNSPEC_SM3PARTW1 UNSPEC_SM3PARTW2])
+
+;; Iterators for fp16 operations
+
+(define_int_iterator VFMLA16_LOW [UNSPEC_FMLAL UNSPEC_FMLSL])
+
+(define_int_iterator VFMLA16_HIGH [UNSPEC_FMLAL2 UNSPEC_FMLSL2])
+
+(define_int_iterator UNPACK [UNSPEC_UNPACKSHI UNSPEC_UNPACKUHI
+ UNSPEC_UNPACKSLO UNSPEC_UNPACKULO])
+
+(define_int_iterator UNPACK_UNSIGNED [UNSPEC_UNPACKULO UNSPEC_UNPACKUHI])
+
+(define_int_iterator MUL_HIGHPART [UNSPEC_SMUL_HIGHPART UNSPEC_UMUL_HIGHPART])
+
+(define_int_iterator SVE_COND_FP_BINARY [UNSPEC_COND_ADD UNSPEC_COND_SUB
+ UNSPEC_COND_MUL UNSPEC_COND_DIV
+ UNSPEC_COND_MAX UNSPEC_COND_MIN])
+
+(define_int_iterator SVE_COND_FP_TERNARY [UNSPEC_COND_FMLA
+ UNSPEC_COND_FMLS
+ UNSPEC_COND_FNMLA
+ UNSPEC_COND_FNMLS])
+
+(define_int_iterator SVE_COND_FP_CMP [UNSPEC_COND_LT UNSPEC_COND_LE
+ UNSPEC_COND_EQ UNSPEC_COND_NE
+ UNSPEC_COND_GE UNSPEC_COND_GT])
+
;; Iterators for atomic operations.
(define_int_iterator ATOMIC_LDOP
;; -------------------------------------------------------------------
;; Int Iterators Attributes.
;; -------------------------------------------------------------------
+
+;; The optab associated with an operation. Note that for ANDF, IORF
+;; and XORF, the optab pattern is not actually defined; we just use this
+;; name for consistency with the integer patterns.
+(define_int_attr optab [(UNSPEC_ANDF "and")
+ (UNSPEC_IORF "ior")
+ (UNSPEC_XORF "xor")
+ (UNSPEC_ANDV "and")
+ (UNSPEC_IORV "ior")
+ (UNSPEC_XORV "xor")
+ (UNSPEC_COND_ADD "add")
+ (UNSPEC_COND_SUB "sub")
+ (UNSPEC_COND_MUL "mul")
+ (UNSPEC_COND_DIV "div")
+ (UNSPEC_COND_MAX "smax")
+ (UNSPEC_COND_MIN "smin")
+ (UNSPEC_COND_FMLA "fma")
+ (UNSPEC_COND_FMLS "fnma")
+ (UNSPEC_COND_FNMLA "fnms")
+ (UNSPEC_COND_FNMLS "fms")])
+
(define_int_attr maxmin_uns [(UNSPEC_UMAXV "umax")
(UNSPEC_UMINV "umin")
(UNSPEC_SMAXV "smax")
(UNSPEC_FMAXV "smax_nan")
(UNSPEC_FMIN "smin_nan")
(UNSPEC_FMINNMV "smin")
- (UNSPEC_FMINV "smin_nan")])
+ (UNSPEC_FMINV "smin_nan")
+ (UNSPEC_FMAXNM "fmax")
+ (UNSPEC_FMINNM "fmin")])
(define_int_attr maxmin_uns_op [(UNSPEC_UMAXV "umax")
(UNSPEC_UMINV "umin")
(UNSPEC_FMAXV "fmax")
(UNSPEC_FMIN "fmin")
(UNSPEC_FMINNMV "fminnm")
- (UNSPEC_FMINV "fmin")])
-
-(define_int_attr fmaxmin [(UNSPEC_FMAXNM "fmax")
- (UNSPEC_FMINNM "fmin")])
-
-(define_int_attr fmaxmin_op [(UNSPEC_FMAXNM "fmaxnm")
- (UNSPEC_FMINNM "fminnm")])
+ (UNSPEC_FMINV "fmin")
+ (UNSPEC_FMAXNM "fmaxnm")
+ (UNSPEC_FMINNM "fminnm")])
+
+(define_int_attr bit_reduc_op [(UNSPEC_ANDV "andv")
+ (UNSPEC_IORV "orv")
+ (UNSPEC_XORV "eorv")])
+
+;; The SVE logical instruction that implements an unspec.
+(define_int_attr logicalf_op [(UNSPEC_ANDF "and")
+ (UNSPEC_IORF "orr")
+ (UNSPEC_XORF "eor")])
+
+;; "s" for signed operations and "u" for unsigned ones.
+(define_int_attr su [(UNSPEC_UNPACKSHI "s")
+ (UNSPEC_UNPACKUHI "u")
+ (UNSPEC_UNPACKSLO "s")
+ (UNSPEC_UNPACKULO "u")
+ (UNSPEC_SMUL_HIGHPART "s")
+ (UNSPEC_UMUL_HIGHPART "u")])
(define_int_attr sur [(UNSPEC_SHADD "s") (UNSPEC_UHADD "u")
(UNSPEC_SRHADD "sr") (UNSPEC_URHADD "ur")
(UNSPEC_SHSUB "s") (UNSPEC_UHSUB "u")
(UNSPEC_SRHSUB "sr") (UNSPEC_URHSUB "ur")
(UNSPEC_ADDHN "") (UNSPEC_RADDHN "r")
+ (UNSPEC_SABAL "s") (UNSPEC_UABAL "u")
+ (UNSPEC_SABDL2 "s") (UNSPEC_UABDL2 "u")
+ (UNSPEC_SADALP "s") (UNSPEC_UADALP "u")
(UNSPEC_SUBHN "") (UNSPEC_RSUBHN "r")
(UNSPEC_ADDHN2 "") (UNSPEC_RADDHN2 "r")
(UNSPEC_SUBHN2 "") (UNSPEC_RSUBHN2 "r")
(UNSPEC_USHLL "u") (UNSPEC_SSHLL "s")
(UNSPEC_URSHL "ur") (UNSPEC_SRSHL "sr")
(UNSPEC_UQRSHL "u") (UNSPEC_SQRSHL "s")
+ (UNSPEC_SDOT "s") (UNSPEC_UDOT "u")
])
(define_int_attr r [(UNSPEC_SQDMULH "") (UNSPEC_SQRDMULH "r")
(define_int_attr u [(UNSPEC_SQSHLU "u") (UNSPEC_SQSHL "") (UNSPEC_UQSHL "")
(UNSPEC_SQSHRUN "u") (UNSPEC_SQRSHRUN "u")
- (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
- (UNSPEC_SQRSHRN "") (UNSPEC_UQRSHRN "")])
+ (UNSPEC_SQSHRN "") (UNSPEC_UQSHRN "")
+ (UNSPEC_SQRSHRN "") (UNSPEC_UQRSHRN "")
+ (UNSPEC_SHADD "") (UNSPEC_UHADD "u")
+ (UNSPEC_SRHADD "") (UNSPEC_URHADD "u")])
(define_int_attr addsub [(UNSPEC_SHADD "add")
(UNSPEC_UHADD "add")
(UNSPEC_FCVTZS "fcvtzs")
(UNSPEC_FCVTZU "fcvtzu")])
+;; Pointer authentication mnemonic prefix.
+(define_int_attr pauth_mnem_prefix [(UNSPEC_PACISP "paci")
+ (UNSPEC_AUTISP "auti")
+ (UNSPEC_PACI1716 "paci")
+ (UNSPEC_AUTI1716 "auti")])
+
+;; Pointer authentication HINT number for NOP space instructions using A Key.
+(define_int_attr pauth_hint_num_a [(UNSPEC_PACISP "25")
+ (UNSPEC_AUTISP "29")
+ (UNSPEC_PACI1716 "8")
+ (UNSPEC_AUTI1716 "12")])
+
(define_int_attr perm_insn [(UNSPEC_ZIP1 "zip") (UNSPEC_ZIP2 "zip")
(UNSPEC_TRN1 "trn") (UNSPEC_TRN2 "trn")
(UNSPEC_UZP1 "uzp") (UNSPEC_UZP2 "uzp")])
(define_int_attr perm_hilo [(UNSPEC_ZIP1 "1") (UNSPEC_ZIP2 "2")
(UNSPEC_TRN1 "1") (UNSPEC_TRN2 "2")
- (UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")])
+ (UNSPEC_UZP1 "1") (UNSPEC_UZP2 "2")
+ (UNSPEC_UNPACKSHI "hi") (UNSPEC_UNPACKUHI "hi")
+ (UNSPEC_UNPACKSLO "lo") (UNSPEC_UNPACKULO "lo")])
+
+;; Return true if the associated optab refers to the high-numbered lanes,
+;; false if it refers to the low-numbered lanes. The convention is for
+;; "hi" to refer to the low-numbered lanes (the first ones in memory)
+;; for big-endian.
+(define_int_attr hi_lanes_optab [(UNSPEC_UNPACKSHI "!BYTES_BIG_ENDIAN")
+ (UNSPEC_UNPACKUHI "!BYTES_BIG_ENDIAN")
+ (UNSPEC_UNPACKSLO "BYTES_BIG_ENDIAN")
+ (UNSPEC_UNPACKULO "BYTES_BIG_ENDIAN")])
(define_int_attr frecp_suffix [(UNSPEC_FRECPE "e") (UNSPEC_FRECPX "x")])
(define_int_attr sha256_op [(UNSPEC_SHA256H "") (UNSPEC_SHA256H2 "2")])
(define_int_attr rdma_as [(UNSPEC_SQRDMLAH "a") (UNSPEC_SQRDMLSH "s")])
+
+(define_int_attr sha512_op [(UNSPEC_SHA512H "") (UNSPEC_SHA512H2 "2")])
+
+(define_int_attr sm3tt_op [(UNSPEC_SM3TT1A "1a") (UNSPEC_SM3TT1B "1b")
+ (UNSPEC_SM3TT2A "2a") (UNSPEC_SM3TT2B "2b")])
+
+(define_int_attr sm3part_op [(UNSPEC_SM3PARTW1 "1") (UNSPEC_SM3PARTW2 "2")])
+
+(define_int_attr f16mac1 [(UNSPEC_FMLAL "a") (UNSPEC_FMLSL "s")
+ (UNSPEC_FMLAL2 "a") (UNSPEC_FMLSL2 "s")])
+
+;; The condition associated with an UNSPEC_COND_<xx>.
+(define_int_attr cmp_op [(UNSPEC_COND_LT "lt")
+ (UNSPEC_COND_LE "le")
+ (UNSPEC_COND_EQ "eq")
+ (UNSPEC_COND_NE "ne")
+ (UNSPEC_COND_GE "ge")
+ (UNSPEC_COND_GT "gt")])
+
+(define_int_attr sve_fp_op [(UNSPEC_COND_ADD "fadd")
+ (UNSPEC_COND_SUB "fsub")
+ (UNSPEC_COND_MUL "fmul")
+ (UNSPEC_COND_DIV "fdiv")
+ (UNSPEC_COND_MAX "fmaxnm")
+ (UNSPEC_COND_MIN "fminnm")])
+
+(define_int_attr sve_fp_op_rev [(UNSPEC_COND_ADD "fadd")
+ (UNSPEC_COND_SUB "fsubr")
+ (UNSPEC_COND_MUL "fmul")
+ (UNSPEC_COND_DIV "fdivr")
+ (UNSPEC_COND_MAX "fmaxnm")
+ (UNSPEC_COND_MIN "fminnm")])
+
+(define_int_attr sve_fmla_op [(UNSPEC_COND_FMLA "fmla")
+ (UNSPEC_COND_FMLS "fmls")
+ (UNSPEC_COND_FNMLA "fnmla")
+ (UNSPEC_COND_FNMLS "fnmls")])
+
+(define_int_attr sve_fmad_op [(UNSPEC_COND_FMLA "fmad")
+ (UNSPEC_COND_FMLS "fmsb")
+ (UNSPEC_COND_FNMLA "fnmad")
+ (UNSPEC_COND_FNMLS "fnmsb")])
+
+(define_int_attr commutative [(UNSPEC_COND_ADD "true")
+ (UNSPEC_COND_SUB "false")
+ (UNSPEC_COND_MUL "true")
+ (UNSPEC_COND_DIV "false")
+ (UNSPEC_COND_MIN "true")
+ (UNSPEC_COND_MAX "true")])