[AArch64] Add special case when expanding vcond with arms {-1, -1}, {0, 0}.
authorJames Greenhalgh <james.greenhalgh@arm.com>
Wed, 1 May 2013 10:40:23 +0000 (10:40 +0000)
committerJames Greenhalgh <jgreenhalgh@gcc.gnu.org>
Wed, 1 May 2013 10:40:23 +0000 (10:40 +0000)
gcc/
* config/aarch64/aarch64-simd.md
(vcond<mode>_internal): Handle special cases for constant masks.
(vcond<mode><mode>): Allow nonmemory_operands for outcome vectors.
(vcondu<mode><mode>): Likewise.
(vcond<v_cmp_result><mode>): New.

From-SVN: r198492

gcc/ChangeLog
gcc/config/aarch64/aarch64-simd.md

index 94959e0a0cab5a3d718d1d7c71335e66d15f2e65..d0392c80076093f40c8793f70df86b219ec92968 100644 (file)
@@ -1,3 +1,11 @@
+2013-05-01  James Greenhalgh  <james.greenhalgh@arm.com>
+
+       * config/aarch64/aarch64-simd.md
+       (vcond<mode>_internal): Handle special cases for constant masks.
+       (vcond<mode><mode>): Allow nonmemory_operands for outcome vectors.
+       (vcondu<mode><mode>): Likewise.
+       (vcond<v_cmp_result><mode>): New.
+
 2013-05-01  James Greenhalgh  <james.greenhalgh@arm.com>
 
        * config/aarch64/aarch64-builtins.c (BUILTIN_VALLDI): Define.
index 389344474ac939b21a6ec3c8d34c3f5f03860c6b..dfe4acb51a8b35d5e005176e5263141b9c971d2e 100644 (file)
          (match_operator 3 "comparison_operator"
            [(match_operand:VDQ 4 "register_operand")
             (match_operand:VDQ 5 "nonmemory_operand")])
-         (match_operand:VDQ 1 "register_operand")
-         (match_operand:VDQ 2 "register_operand")))]
+         (match_operand:VDQ 1 "nonmemory_operand")
+         (match_operand:VDQ 2 "nonmemory_operand")))]
   "TARGET_SIMD"
 {
   int inverse = 0, has_zero_imm_form = 0;
+  rtx op1 = operands[1];
+  rtx op2 = operands[2];
   rtx mask = gen_reg_rtx (<MODE>mode);
 
   switch (GET_CODE (operands[3]))
     }
 
   if (inverse)
-    emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
-                                   operands[1]));
-  else
-    emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
-                                   operands[2]));
+    {
+      op1 = operands[2];
+      op2 = operands[1];
+    }
+
+    /* If we have (a = (b CMP c) ? -1 : 0);
+       Then we can simply move the generated mask.  */
+
+    if (op1 == CONSTM1_RTX (<V_cmp_result>mode)
+       && op2 == CONST0_RTX (<V_cmp_result>mode))
+      emit_move_insn (operands[0], mask);
+    else
+      {
+       if (!REG_P (op1))
+         op1 = force_reg (<MODE>mode, op1);
+       if (!REG_P (op2))
+         op2 = force_reg (<MODE>mode, op2);
+       emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask,
+                                              op1, op2));
+      }
 
   DONE;
 })
          (match_operator 3 "comparison_operator"
            [(match_operand:VDQF 4 "register_operand")
             (match_operand:VDQF 5 "nonmemory_operand")])
-         (match_operand:VDQF 1 "register_operand")
-         (match_operand:VDQF 2 "register_operand")))]
+         (match_operand:VDQF 1 "nonmemory_operand")
+         (match_operand:VDQF 2 "nonmemory_operand")))]
   "TARGET_SIMD"
 {
   int inverse = 0;
   int use_zero_form = 0;
   int swap_bsl_operands = 0;
+  rtx op1 = operands[1];
+  rtx op2 = operands[2];
   rtx mask = gen_reg_rtx (<V_cmp_result>mode);
   rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
 
     }
 
   if (swap_bsl_operands)
-    emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
-                                   operands[1]));
-  else
-    emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
-                                   operands[2]));
+    {
+      op1 = operands[2];
+      op2 = operands[1];
+    }
+
+    /* If we have (a = (b CMP c) ? -1 : 0);
+       Then we can simply move the generated mask.  */
+
+    if (op1 == CONSTM1_RTX (<V_cmp_result>mode)
+       && op2 == CONST0_RTX (<V_cmp_result>mode))
+      emit_move_insn (operands[0], mask);
+    else
+      {
+       if (!REG_P (op1))
+         op1 = force_reg (<MODE>mode, op1);
+       if (!REG_P (op2))
+         op2 = force_reg (<MODE>mode, op2);
+       emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask,
+                                              op1, op2));
+      }
+
   DONE;
 })
 
          (match_operator 3 "comparison_operator"
            [(match_operand:VALL 4 "register_operand")
             (match_operand:VALL 5 "nonmemory_operand")])
-         (match_operand:VALL 1 "register_operand")
-         (match_operand:VALL 2 "register_operand")))]
+         (match_operand:VALL 1 "nonmemory_operand")
+         (match_operand:VALL 2 "nonmemory_operand")))]
   "TARGET_SIMD"
 {
   emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
   DONE;
 })
 
+(define_expand "vcond<v_cmp_result><mode>"
+  [(set (match_operand:<V_cmp_result> 0 "register_operand")
+       (if_then_else:<V_cmp_result>
+         (match_operator 3 "comparison_operator"
+           [(match_operand:VDQF 4 "register_operand")
+            (match_operand:VDQF 5 "nonmemory_operand")])
+         (match_operand:<V_cmp_result> 1 "nonmemory_operand")
+         (match_operand:<V_cmp_result> 2 "nonmemory_operand")))]
+  "TARGET_SIMD"
+{
+  emit_insn (gen_aarch64_vcond_internal<v_cmp_result> (
+                                               operands[0], operands[1],
+                                               operands[2], operands[3],
+                                               operands[4], operands[5]));
+  DONE;
+})
 
 (define_expand "vcondu<mode><mode>"
   [(set (match_operand:VDQ 0 "register_operand")
          (match_operator 3 "comparison_operator"
            [(match_operand:VDQ 4 "register_operand")
             (match_operand:VDQ 5 "nonmemory_operand")])
-         (match_operand:VDQ 1 "register_operand")
-         (match_operand:VDQ 2 "register_operand")))]
+         (match_operand:VDQ 1 "nonmemory_operand")
+         (match_operand:VDQ 2 "nonmemory_operand")))]
   "TARGET_SIMD"
 {
   emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],