return !cfun || (cfun->curr_properties & PROP_gimple_lvec) != 0;
}
+/* Return true if we can still perform transformations that may introduce
+ vector operations that are not supported by the target. Vector lowering
+ normally handles those, but after that pass, it becomes unsafe. */
+
+static inline bool
+optimize_vectors_before_lowering_p ()
+{
+ return !cfun || (cfun->curr_properties & PROP_gimple_lvec) == 0;
+}
+
/* Return true if pow(cst, x) should be optimized into exp(log(cst) * x).
As a workaround for SPEC CPU2017 628.pop2_s, don't do it if arg0
is an exact integer, arg1 = phi_res +/- cst1 and phi_res = PHI <cst2, ...>
(vec_cond @0 (op! @3 @1) (op! @3 @2))))
#endif
-/* (v ? w : 0) ? a : b is just (v & w) ? a : b */
+/* (v ? w : 0) ? a : b is just (v & w) ? a : b
+ Currently disabled after pass lvec because ARM understands
+ VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
(simplify
(vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_and @0 @3) @1 @2)))
(simplify
(vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_ior @0 @3) @1 @2)))
(simplify
(vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_ior @0 (bit_not @3)) @2 @1)))
(simplify
(vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
/* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */
(simplify
(vec_cond @0 (vec_cond:s @1 @2 @3) @3)
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_and @0 @1) @2 @3)))
(simplify
(vec_cond @0 @2 (vec_cond:s @1 @2 @3))
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_ior @0 @1) @2 @3)))
(simplify
(vec_cond @0 (vec_cond:s @1 @2 @3) @2)
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_ior (bit_not @0) @1) @2 @3)))
(simplify
(vec_cond @0 @3 (vec_cond:s @1 @2 @3))
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_and (bit_not @0) @1) @2 @3)))
/* Simplification moved from fold_cond_expr_with_comparison. It may also