[AArch64] Replace SVE_PARTIAL with SVE_PARTIAL_I
authorRichard Sandiford <richard.sandiford@arm.com>
Sat, 16 Nov 2019 10:55:40 +0000 (10:55 +0000)
committerRichard Sandiford <rsandifo@gcc.gnu.org>
Sat, 16 Nov 2019 10:55:40 +0000 (10:55 +0000)
Another renaming, this time to make way for partial/unpacked
float modes.

2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
* config/aarch64/iterators.md (SVE_PARTIAL): Rename to...
(SVE_PARTIAL_I): ...this.
* config/aarch64/aarch64-sve.md: Apply the above renaming throughout.

From-SVN: r278339

gcc/ChangeLog
gcc/config/aarch64/aarch64-sve.md
gcc/config/aarch64/iterators.md

index fbf188089cf0df049691cfb08ea7529f8cae03a6..b7e46cf9b783c2e812d03b84bac41b58d6a877d5 100644 (file)
@@ -1,3 +1,9 @@
+2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>
+
+       * config/aarch64/iterators.md (SVE_PARTIAL): Rename to...
+       (SVE_PARTIAL_I): ...this.
+       * config/aarch64/aarch64-sve.md: Apply the above renaming throughout.
+
 2019-11-16  Richard Sandiford  <richard.sandiford@arm.com>
 
        * config/aarch64/iterators.md (SVE_ALL): Rename to...
index 88eaaa37c03b506c54ae16c0b62541af9be73977..5b71ab029b33714101db192955cc7f7b43e69263 100644 (file)
 ;; -------------------------------------------------------------------------
 
 ;; Predicated SXT[BHW].
-(define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL:mode>"
+(define_insn "@aarch64_pred_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
   [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w")
        (unspec:SVE_FULL_HSDI
          [(match_operand:<VPRED> 1 "register_operand" "Upl")
           (sign_extend:SVE_FULL_HSDI
-            (truncate:SVE_PARTIAL
+            (truncate:SVE_PARTIAL_I
               (match_operand:SVE_FULL_HSDI 2 "register_operand" "w")))]
          UNSPEC_PRED_X))]
   "TARGET_SVE && (~<narrower_mask> & <self_mask>) == 0"
-  "sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
+  "sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
 )
 
 ;; Predicated SXT[BHW] with merging.
-(define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL:mode>"
+(define_insn "@aarch64_cond_sxt<SVE_FULL_HSDI:mode><SVE_PARTIAL_I:mode>"
   [(set (match_operand:SVE_FULL_HSDI 0 "register_operand" "=w, ?&w, ?&w")
        (unspec:SVE_FULL_HSDI
          [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
           (sign_extend:SVE_FULL_HSDI
-            (truncate:SVE_PARTIAL
+            (truncate:SVE_PARTIAL_I
               (match_operand:SVE_FULL_HSDI 2 "register_operand" "w, w, w")))
           (match_operand:SVE_FULL_HSDI 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
          UNSPEC_SEL))]
   "TARGET_SVE && (~<narrower_mask> & <self_mask>) == 0"
   "@
-   sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
-   movprfx\t%0, %3\;sxt<SVE_PARTIAL:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
+   sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+   movprfx\t%0.<SVE_FULL_HSDI:Vetype>, %1/z, %2.<SVE_FULL_HSDI:Vetype>\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>
+   movprfx\t%0, %3\;sxt<SVE_PARTIAL_I:Vesize>\t%0.<SVE_FULL_HSDI:Vetype>, %1/m, %2.<SVE_FULL_HSDI:Vetype>"
   [(set_attr "movprfx" "*,yes,yes")]
 )
 
index 890b3a8d721e105bd9e87900d5157b8ddb76ab8e..fc27179d8501749ce99a6d2bc1b6fbfa41361f50 100644 (file)
 ;; Fully-packed SVE vector modes that have 64-bit elements.
 (define_mode_iterator SVE_FULL_D [VNx2DI VNx2DF])
 
-;; All partial SVE modes.
-(define_mode_iterator SVE_PARTIAL [VNx2QI
-                                  VNx4QI VNx2HI
-                                  VNx8QI VNx4HI VNx2SI])
+;; All partial SVE integer modes.
+(define_mode_iterator SVE_PARTIAL_I [VNx8QI VNx4QI VNx2QI
+                                    VNx4HI VNx2HI
+                                    VNx2SI])
 
 ;; Modes involved in extending or truncating SVE data, for 8 elements per
 ;; 128-bit block.