+2018-10-22 Yury Gribov <tetra2005@gmail.com>
+
+ PR tree-optimization/87633
+ * match.pd: Do not generate unordered integer comparisons.
+
2018-10-22 Segher Boessenkool <segher@kernel.crashing.org>
PR rtl-optimization/87600
(cmp @0 @1))))))
/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
-(for cmp (tcc_comparison)
+(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
+ icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
(simplify
(cmp (float@0 @1) (float @2))
(if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
}
(if (fmt.can_represent_integral_type_p (type1)
&& fmt.can_represent_integral_type_p (type2))
- (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
- && type1_signed_p >= type2_signed_p)
- (cmp @1 (convert @2))
- (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
- && type1_signed_p <= type2_signed_p)
- (cmp (convert:type2 @1) @2)
- (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
- && type1_signed_p == type2_signed_p)
- (cmp @1 @2)))))))))
+ (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
+ { constant_boolean_node (cmp == ORDERED_EXPR, type); }
+ (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
+ && type1_signed_p >= type2_signed_p)
+ (icmp @1 (convert @2))
+ (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
+ && type1_signed_p <= type2_signed_p)
+ (icmp (convert:type2 @1) @2)
+ (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
+ && type1_signed_p == type2_signed_p)
+ (icmp @1 @2))))))))))
/* Optimize various special cases of (FTYPE) N CMP CST. */
(for cmp (lt le eq ne ge gt)
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+class a {
+public:
+ double b() const;
+};
+class c {
+public:
+ int m_fn2() const;
+};
+double a::b() const {
+ return 0 == 0 ? reinterpret_cast<const c *>(this)->m_fn2() : 0;
+}
+bool d;
+void e() {
+ a f;
+ double g = f.b();
+ /* { dg-final { scan-tree-dump-not "unord" "optimized" } } */
+ d = __builtin_isnan(g);
+}