nvptx.opt (moptimize): New flag.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "intl.h"
51 #include "cfgloop.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-chrec.h"
55 #include "tree-ssa-threadupdate.h"
56 #include "tree-ssa-scopedtables.h"
57 #include "tree-ssa-threadedge.h"
58 #include "omp-low.h"
59 #include "target.h"
60
61 /* Range of values that can be associated with an SSA_NAME after VRP
62 has executed. */
63 struct value_range
64 {
65 /* Lattice value represented by this range. */
66 enum value_range_type type;
67
68 /* Minimum and maximum values represented by this range. These
69 values should be interpreted as follows:
70
71 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
72 be NULL.
73
74 - If TYPE == VR_RANGE then MIN holds the minimum value and
75 MAX holds the maximum value of the range [MIN, MAX].
76
77 - If TYPE == ANTI_RANGE the variable is known to NOT
78 take any values in the range [MIN, MAX]. */
79 tree min;
80 tree max;
81
82 /* Set of SSA names whose value ranges are equivalent to this one.
83 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
84 bitmap equiv;
85 };
86
87 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
88
89 /* Set of SSA names found live during the RPO traversal of the function
90 for still active basic-blocks. */
91 static sbitmap *live;
92
93 /* Return true if the SSA name NAME is live on the edge E. */
94
95 static bool
96 live_on_edge (edge e, tree name)
97 {
98 return (live[e->dest->index]
99 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
100 }
101
102 /* Local functions. */
103 static int compare_values (tree val1, tree val2);
104 static int compare_values_warnv (tree val1, tree val2, bool *);
105 static void vrp_meet (value_range *, value_range *);
106 static void vrp_intersect_ranges (value_range *, value_range *);
107 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
108 tree, tree, bool, bool *,
109 bool *);
110
111 /* Location information for ASSERT_EXPRs. Each instance of this
112 structure describes an ASSERT_EXPR for an SSA name. Since a single
113 SSA name may have more than one assertion associated with it, these
114 locations are kept in a linked list attached to the corresponding
115 SSA name. */
116 struct assert_locus
117 {
118 /* Basic block where the assertion would be inserted. */
119 basic_block bb;
120
121 /* Some assertions need to be inserted on an edge (e.g., assertions
122 generated by COND_EXPRs). In those cases, BB will be NULL. */
123 edge e;
124
125 /* Pointer to the statement that generated this assertion. */
126 gimple_stmt_iterator si;
127
128 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
129 enum tree_code comp_code;
130
131 /* Value being compared against. */
132 tree val;
133
134 /* Expression to compare. */
135 tree expr;
136
137 /* Next node in the linked list. */
138 assert_locus *next;
139 };
140
141 /* If bit I is present, it means that SSA name N_i has a list of
142 assertions that should be inserted in the IL. */
143 static bitmap need_assert_for;
144
145 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
146 holds a list of ASSERT_LOCUS_T nodes that describe where
147 ASSERT_EXPRs for SSA name N_I should be inserted. */
148 static assert_locus **asserts_for;
149
150 /* Value range array. After propagation, VR_VALUE[I] holds the range
151 of values that SSA name N_I may take. */
152 static unsigned num_vr_values;
153 static value_range **vr_value;
154 static bool values_propagated;
155
156 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
157 number of executable edges we saw the last time we visited the
158 node. */
159 static int *vr_phi_edge_counts;
160
161 struct switch_update {
162 gswitch *stmt;
163 tree vec;
164 };
165
166 static vec<edge> to_remove_edges;
167 static vec<switch_update> to_update_switch_stmts;
168
169
170 /* Return the maximum value for TYPE. */
171
172 static inline tree
173 vrp_val_max (const_tree type)
174 {
175 if (!INTEGRAL_TYPE_P (type))
176 return NULL_TREE;
177
178 return TYPE_MAX_VALUE (type);
179 }
180
181 /* Return the minimum value for TYPE. */
182
183 static inline tree
184 vrp_val_min (const_tree type)
185 {
186 if (!INTEGRAL_TYPE_P (type))
187 return NULL_TREE;
188
189 return TYPE_MIN_VALUE (type);
190 }
191
192 /* Return whether VAL is equal to the maximum value of its type. This
193 will be true for a positive overflow infinity. We can't do a
194 simple equality comparison with TYPE_MAX_VALUE because C typedefs
195 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
196 to the integer constant with the same value in the type. */
197
198 static inline bool
199 vrp_val_is_max (const_tree val)
200 {
201 tree type_max = vrp_val_max (TREE_TYPE (val));
202 return (val == type_max
203 || (type_max != NULL_TREE
204 && operand_equal_p (val, type_max, 0)));
205 }
206
207 /* Return whether VAL is equal to the minimum value of its type. This
208 will be true for a negative overflow infinity. */
209
210 static inline bool
211 vrp_val_is_min (const_tree val)
212 {
213 tree type_min = vrp_val_min (TREE_TYPE (val));
214 return (val == type_min
215 || (type_min != NULL_TREE
216 && operand_equal_p (val, type_min, 0)));
217 }
218
219
220 /* Return whether TYPE should use an overflow infinity distinct from
221 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
222 represent a signed overflow during VRP computations. An infinity
223 is distinct from a half-range, which will go from some number to
224 TYPE_{MIN,MAX}_VALUE. */
225
226 static inline bool
227 needs_overflow_infinity (const_tree type)
228 {
229 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
230 }
231
232 /* Return whether TYPE can support our overflow infinity
233 representation: we use the TREE_OVERFLOW flag, which only exists
234 for constants. If TYPE doesn't support this, we don't optimize
235 cases which would require signed overflow--we drop them to
236 VARYING. */
237
238 static inline bool
239 supports_overflow_infinity (const_tree type)
240 {
241 tree min = vrp_val_min (type), max = vrp_val_max (type);
242 gcc_checking_assert (needs_overflow_infinity (type));
243 return (min != NULL_TREE
244 && CONSTANT_CLASS_P (min)
245 && max != NULL_TREE
246 && CONSTANT_CLASS_P (max));
247 }
248
249 /* VAL is the maximum or minimum value of a type. Return a
250 corresponding overflow infinity. */
251
252 static inline tree
253 make_overflow_infinity (tree val)
254 {
255 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
256 val = copy_node (val);
257 TREE_OVERFLOW (val) = 1;
258 return val;
259 }
260
261 /* Return a negative overflow infinity for TYPE. */
262
263 static inline tree
264 negative_overflow_infinity (tree type)
265 {
266 gcc_checking_assert (supports_overflow_infinity (type));
267 return make_overflow_infinity (vrp_val_min (type));
268 }
269
270 /* Return a positive overflow infinity for TYPE. */
271
272 static inline tree
273 positive_overflow_infinity (tree type)
274 {
275 gcc_checking_assert (supports_overflow_infinity (type));
276 return make_overflow_infinity (vrp_val_max (type));
277 }
278
279 /* Return whether VAL is a negative overflow infinity. */
280
281 static inline bool
282 is_negative_overflow_infinity (const_tree val)
283 {
284 return (TREE_OVERFLOW_P (val)
285 && needs_overflow_infinity (TREE_TYPE (val))
286 && vrp_val_is_min (val));
287 }
288
289 /* Return whether VAL is a positive overflow infinity. */
290
291 static inline bool
292 is_positive_overflow_infinity (const_tree val)
293 {
294 return (TREE_OVERFLOW_P (val)
295 && needs_overflow_infinity (TREE_TYPE (val))
296 && vrp_val_is_max (val));
297 }
298
299 /* Return whether VAL is a positive or negative overflow infinity. */
300
301 static inline bool
302 is_overflow_infinity (const_tree val)
303 {
304 return (TREE_OVERFLOW_P (val)
305 && needs_overflow_infinity (TREE_TYPE (val))
306 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
307 }
308
309 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
310
311 static inline bool
312 stmt_overflow_infinity (gimple *stmt)
313 {
314 if (is_gimple_assign (stmt)
315 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
316 GIMPLE_SINGLE_RHS)
317 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
318 return false;
319 }
320
321 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
322 the same value with TREE_OVERFLOW clear. This can be used to avoid
323 confusing a regular value with an overflow value. */
324
325 static inline tree
326 avoid_overflow_infinity (tree val)
327 {
328 if (!is_overflow_infinity (val))
329 return val;
330
331 if (vrp_val_is_max (val))
332 return vrp_val_max (TREE_TYPE (val));
333 else
334 {
335 gcc_checking_assert (vrp_val_is_min (val));
336 return vrp_val_min (TREE_TYPE (val));
337 }
338 }
339
340
341 /* Set value range VR to VR_UNDEFINED. */
342
343 static inline void
344 set_value_range_to_undefined (value_range *vr)
345 {
346 vr->type = VR_UNDEFINED;
347 vr->min = vr->max = NULL_TREE;
348 if (vr->equiv)
349 bitmap_clear (vr->equiv);
350 }
351
352
353 /* Set value range VR to VR_VARYING. */
354
355 static inline void
356 set_value_range_to_varying (value_range *vr)
357 {
358 vr->type = VR_VARYING;
359 vr->min = vr->max = NULL_TREE;
360 if (vr->equiv)
361 bitmap_clear (vr->equiv);
362 }
363
364
365 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
366
367 static void
368 set_value_range (value_range *vr, enum value_range_type t, tree min,
369 tree max, bitmap equiv)
370 {
371 /* Check the validity of the range. */
372 if (flag_checking
373 && (t == VR_RANGE || t == VR_ANTI_RANGE))
374 {
375 int cmp;
376
377 gcc_assert (min && max);
378
379 gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min))
380 && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max)));
381
382 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
383 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
384
385 cmp = compare_values (min, max);
386 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
387
388 if (needs_overflow_infinity (TREE_TYPE (min)))
389 gcc_assert (!is_overflow_infinity (min)
390 || !is_overflow_infinity (max));
391 }
392
393 if (flag_checking
394 && (t == VR_UNDEFINED || t == VR_VARYING))
395 {
396 gcc_assert (min == NULL_TREE && max == NULL_TREE);
397 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
398 }
399
400 vr->type = t;
401 vr->min = min;
402 vr->max = max;
403
404 /* Since updating the equivalence set involves deep copying the
405 bitmaps, only do it if absolutely necessary. */
406 if (vr->equiv == NULL
407 && equiv != NULL)
408 vr->equiv = BITMAP_ALLOC (NULL);
409
410 if (equiv != vr->equiv)
411 {
412 if (equiv && !bitmap_empty_p (equiv))
413 bitmap_copy (vr->equiv, equiv);
414 else
415 bitmap_clear (vr->equiv);
416 }
417 }
418
419
420 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
421 This means adjusting T, MIN and MAX representing the case of a
422 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
423 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
424 In corner cases where MAX+1 or MIN-1 wraps this will fall back
425 to varying.
426 This routine exists to ease canonicalization in the case where we
427 extract ranges from var + CST op limit. */
428
429 static void
430 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
431 tree min, tree max, bitmap equiv)
432 {
433 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
434 if (t == VR_UNDEFINED)
435 {
436 set_value_range_to_undefined (vr);
437 return;
438 }
439 else if (t == VR_VARYING)
440 {
441 set_value_range_to_varying (vr);
442 return;
443 }
444
445 /* Nothing to canonicalize for symbolic ranges. */
446 if (TREE_CODE (min) != INTEGER_CST
447 || TREE_CODE (max) != INTEGER_CST)
448 {
449 set_value_range (vr, t, min, max, equiv);
450 return;
451 }
452
453 /* Wrong order for min and max, to swap them and the VR type we need
454 to adjust them. */
455 if (tree_int_cst_lt (max, min))
456 {
457 tree one, tmp;
458
459 /* For one bit precision if max < min, then the swapped
460 range covers all values, so for VR_RANGE it is varying and
461 for VR_ANTI_RANGE empty range, so drop to varying as well. */
462 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
463 {
464 set_value_range_to_varying (vr);
465 return;
466 }
467
468 one = build_int_cst (TREE_TYPE (min), 1);
469 tmp = int_const_binop (PLUS_EXPR, max, one);
470 max = int_const_binop (MINUS_EXPR, min, one);
471 min = tmp;
472
473 /* There's one corner case, if we had [C+1, C] before we now have
474 that again. But this represents an empty value range, so drop
475 to varying in this case. */
476 if (tree_int_cst_lt (max, min))
477 {
478 set_value_range_to_varying (vr);
479 return;
480 }
481
482 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
483 }
484
485 /* Anti-ranges that can be represented as ranges should be so. */
486 if (t == VR_ANTI_RANGE)
487 {
488 bool is_min = vrp_val_is_min (min);
489 bool is_max = vrp_val_is_max (max);
490
491 if (is_min && is_max)
492 {
493 /* We cannot deal with empty ranges, drop to varying.
494 ??? This could be VR_UNDEFINED instead. */
495 set_value_range_to_varying (vr);
496 return;
497 }
498 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
499 && (is_min || is_max))
500 {
501 /* Non-empty boolean ranges can always be represented
502 as a singleton range. */
503 if (is_min)
504 min = max = vrp_val_max (TREE_TYPE (min));
505 else
506 min = max = vrp_val_min (TREE_TYPE (min));
507 t = VR_RANGE;
508 }
509 else if (is_min
510 /* As a special exception preserve non-null ranges. */
511 && !(TYPE_UNSIGNED (TREE_TYPE (min))
512 && integer_zerop (max)))
513 {
514 tree one = build_int_cst (TREE_TYPE (max), 1);
515 min = int_const_binop (PLUS_EXPR, max, one);
516 max = vrp_val_max (TREE_TYPE (max));
517 t = VR_RANGE;
518 }
519 else if (is_max)
520 {
521 tree one = build_int_cst (TREE_TYPE (min), 1);
522 max = int_const_binop (MINUS_EXPR, min, one);
523 min = vrp_val_min (TREE_TYPE (min));
524 t = VR_RANGE;
525 }
526 }
527
528 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
529 if (needs_overflow_infinity (TREE_TYPE (min))
530 && is_overflow_infinity (min)
531 && is_overflow_infinity (max))
532 {
533 set_value_range_to_varying (vr);
534 return;
535 }
536
537 set_value_range (vr, t, min, max, equiv);
538 }
539
540 /* Copy value range FROM into value range TO. */
541
542 static inline void
543 copy_value_range (value_range *to, value_range *from)
544 {
545 set_value_range (to, from->type, from->min, from->max, from->equiv);
546 }
547
548 /* Set value range VR to a single value. This function is only called
549 with values we get from statements, and exists to clear the
550 TREE_OVERFLOW flag so that we don't think we have an overflow
551 infinity when we shouldn't. */
552
553 static inline void
554 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
555 {
556 gcc_assert (is_gimple_min_invariant (val));
557 if (TREE_OVERFLOW_P (val))
558 val = drop_tree_overflow (val);
559 set_value_range (vr, VR_RANGE, val, val, equiv);
560 }
561
562 /* Set value range VR to a non-negative range of type TYPE.
563 OVERFLOW_INFINITY indicates whether to use an overflow infinity
564 rather than TYPE_MAX_VALUE; this should be true if we determine
565 that the range is nonnegative based on the assumption that signed
566 overflow does not occur. */
567
568 static inline void
569 set_value_range_to_nonnegative (value_range *vr, tree type,
570 bool overflow_infinity)
571 {
572 tree zero;
573
574 if (overflow_infinity && !supports_overflow_infinity (type))
575 {
576 set_value_range_to_varying (vr);
577 return;
578 }
579
580 zero = build_int_cst (type, 0);
581 set_value_range (vr, VR_RANGE, zero,
582 (overflow_infinity
583 ? positive_overflow_infinity (type)
584 : TYPE_MAX_VALUE (type)),
585 vr->equiv);
586 }
587
588 /* Set value range VR to a non-NULL range of type TYPE. */
589
590 static inline void
591 set_value_range_to_nonnull (value_range *vr, tree type)
592 {
593 tree zero = build_int_cst (type, 0);
594 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
595 }
596
597
598 /* Set value range VR to a NULL range of type TYPE. */
599
600 static inline void
601 set_value_range_to_null (value_range *vr, tree type)
602 {
603 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
604 }
605
606
607 /* Set value range VR to a range of a truthvalue of type TYPE. */
608
609 static inline void
610 set_value_range_to_truthvalue (value_range *vr, tree type)
611 {
612 if (TYPE_PRECISION (type) == 1)
613 set_value_range_to_varying (vr);
614 else
615 set_value_range (vr, VR_RANGE,
616 build_int_cst (type, 0), build_int_cst (type, 1),
617 vr->equiv);
618 }
619
620
621 /* If abs (min) < abs (max), set VR to [-max, max], if
622 abs (min) >= abs (max), set VR to [-min, min]. */
623
624 static void
625 abs_extent_range (value_range *vr, tree min, tree max)
626 {
627 int cmp;
628
629 gcc_assert (TREE_CODE (min) == INTEGER_CST);
630 gcc_assert (TREE_CODE (max) == INTEGER_CST);
631 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
632 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
633 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
634 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
635 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
636 {
637 set_value_range_to_varying (vr);
638 return;
639 }
640 cmp = compare_values (min, max);
641 if (cmp == -1)
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
643 else if (cmp == 0 || cmp == 1)
644 {
645 max = min;
646 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
647 }
648 else
649 {
650 set_value_range_to_varying (vr);
651 return;
652 }
653 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
654 }
655
656
657 /* Return value range information for VAR.
658
659 If we have no values ranges recorded (ie, VRP is not running), then
660 return NULL. Otherwise create an empty range if none existed for VAR. */
661
662 static value_range *
663 get_value_range (const_tree var)
664 {
665 static const value_range vr_const_varying
666 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
667 value_range *vr;
668 tree sym;
669 unsigned ver = SSA_NAME_VERSION (var);
670
671 /* If we have no recorded ranges, then return NULL. */
672 if (! vr_value)
673 return NULL;
674
675 /* If we query the range for a new SSA name return an unmodifiable VARYING.
676 We should get here at most from the substitute-and-fold stage which
677 will never try to change values. */
678 if (ver >= num_vr_values)
679 return CONST_CAST (value_range *, &vr_const_varying);
680
681 vr = vr_value[ver];
682 if (vr)
683 return vr;
684
685 /* After propagation finished do not allocate new value-ranges. */
686 if (values_propagated)
687 return CONST_CAST (value_range *, &vr_const_varying);
688
689 /* Create a default value range. */
690 vr_value[ver] = vr = XCNEW (value_range);
691
692 /* Defer allocating the equivalence set. */
693 vr->equiv = NULL;
694
695 /* If VAR is a default definition of a parameter, the variable can
696 take any value in VAR's type. */
697 if (SSA_NAME_IS_DEFAULT_DEF (var))
698 {
699 sym = SSA_NAME_VAR (var);
700 if (TREE_CODE (sym) == PARM_DECL)
701 {
702 /* Try to use the "nonnull" attribute to create ~[0, 0]
703 anti-ranges for pointers. Note that this is only valid with
704 default definitions of PARM_DECLs. */
705 if (POINTER_TYPE_P (TREE_TYPE (sym))
706 && nonnull_arg_p (sym))
707 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
708 else
709 set_value_range_to_varying (vr);
710 }
711 else if (TREE_CODE (sym) == RESULT_DECL
712 && DECL_BY_REFERENCE (sym))
713 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
714 }
715
716 return vr;
717 }
718
719 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
720
721 static inline bool
722 vrp_operand_equal_p (const_tree val1, const_tree val2)
723 {
724 if (val1 == val2)
725 return true;
726 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
727 return false;
728 return is_overflow_infinity (val1) == is_overflow_infinity (val2);
729 }
730
731 /* Return true, if the bitmaps B1 and B2 are equal. */
732
733 static inline bool
734 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
735 {
736 return (b1 == b2
737 || ((!b1 || bitmap_empty_p (b1))
738 && (!b2 || bitmap_empty_p (b2)))
739 || (b1 && b2
740 && bitmap_equal_p (b1, b2)));
741 }
742
743 /* Update the value range and equivalence set for variable VAR to
744 NEW_VR. Return true if NEW_VR is different from VAR's previous
745 value.
746
747 NOTE: This function assumes that NEW_VR is a temporary value range
748 object created for the sole purpose of updating VAR's range. The
749 storage used by the equivalence set from NEW_VR will be freed by
750 this function. Do not call update_value_range when NEW_VR
751 is the range object associated with another SSA name. */
752
753 static inline bool
754 update_value_range (const_tree var, value_range *new_vr)
755 {
756 value_range *old_vr;
757 bool is_new;
758
759 /* If there is a value-range on the SSA name from earlier analysis
760 factor that in. */
761 if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
762 {
763 wide_int min, max;
764 value_range_type rtype = get_range_info (var, &min, &max);
765 if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
766 {
767 value_range nr;
768 nr.type = rtype;
769 nr.min = wide_int_to_tree (TREE_TYPE (var), min);
770 nr.max = wide_int_to_tree (TREE_TYPE (var), max);
771 nr.equiv = NULL;
772 vrp_intersect_ranges (new_vr, &nr);
773 }
774 }
775
776 /* Update the value range, if necessary. */
777 old_vr = get_value_range (var);
778 is_new = old_vr->type != new_vr->type
779 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
780 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
781 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
782
783 if (is_new)
784 {
785 /* Do not allow transitions up the lattice. The following
786 is slightly more awkward than just new_vr->type < old_vr->type
787 because VR_RANGE and VR_ANTI_RANGE need to be considered
788 the same. We may not have is_new when transitioning to
789 UNDEFINED. If old_vr->type is VARYING, we shouldn't be
790 called. */
791 if (new_vr->type == VR_UNDEFINED)
792 {
793 BITMAP_FREE (new_vr->equiv);
794 set_value_range_to_varying (old_vr);
795 set_value_range_to_varying (new_vr);
796 return true;
797 }
798 else
799 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
800 new_vr->equiv);
801 }
802
803 BITMAP_FREE (new_vr->equiv);
804
805 return is_new;
806 }
807
808
809 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
810 point where equivalence processing can be turned on/off. */
811
812 static void
813 add_equivalence (bitmap *equiv, const_tree var)
814 {
815 unsigned ver = SSA_NAME_VERSION (var);
816 value_range *vr = vr_value[ver];
817
818 if (*equiv == NULL)
819 *equiv = BITMAP_ALLOC (NULL);
820 bitmap_set_bit (*equiv, ver);
821 if (vr && vr->equiv)
822 bitmap_ior_into (*equiv, vr->equiv);
823 }
824
825
826 /* Return true if VR is ~[0, 0]. */
827
828 static inline bool
829 range_is_nonnull (value_range *vr)
830 {
831 return vr->type == VR_ANTI_RANGE
832 && integer_zerop (vr->min)
833 && integer_zerop (vr->max);
834 }
835
836
837 /* Return true if VR is [0, 0]. */
838
839 static inline bool
840 range_is_null (value_range *vr)
841 {
842 return vr->type == VR_RANGE
843 && integer_zerop (vr->min)
844 && integer_zerop (vr->max);
845 }
846
847 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
848 a singleton. */
849
850 static inline bool
851 range_int_cst_p (value_range *vr)
852 {
853 return (vr->type == VR_RANGE
854 && TREE_CODE (vr->max) == INTEGER_CST
855 && TREE_CODE (vr->min) == INTEGER_CST);
856 }
857
858 /* Return true if VR is a INTEGER_CST singleton. */
859
860 static inline bool
861 range_int_cst_singleton_p (value_range *vr)
862 {
863 return (range_int_cst_p (vr)
864 && !is_overflow_infinity (vr->min)
865 && !is_overflow_infinity (vr->max)
866 && tree_int_cst_equal (vr->min, vr->max));
867 }
868
869 /* Return true if value range VR involves at least one symbol. */
870
871 static inline bool
872 symbolic_range_p (value_range *vr)
873 {
874 return (!is_gimple_min_invariant (vr->min)
875 || !is_gimple_min_invariant (vr->max));
876 }
877
878 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
879 otherwise. We only handle additive operations and set NEG to true if the
880 symbol is negated and INV to the invariant part, if any. */
881
882 static tree
883 get_single_symbol (tree t, bool *neg, tree *inv)
884 {
885 bool neg_;
886 tree inv_;
887
888 if (TREE_CODE (t) == PLUS_EXPR
889 || TREE_CODE (t) == POINTER_PLUS_EXPR
890 || TREE_CODE (t) == MINUS_EXPR)
891 {
892 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
893 {
894 neg_ = (TREE_CODE (t) == MINUS_EXPR);
895 inv_ = TREE_OPERAND (t, 0);
896 t = TREE_OPERAND (t, 1);
897 }
898 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
899 {
900 neg_ = false;
901 inv_ = TREE_OPERAND (t, 1);
902 t = TREE_OPERAND (t, 0);
903 }
904 else
905 return NULL_TREE;
906 }
907 else
908 {
909 neg_ = false;
910 inv_ = NULL_TREE;
911 }
912
913 if (TREE_CODE (t) == NEGATE_EXPR)
914 {
915 t = TREE_OPERAND (t, 0);
916 neg_ = !neg_;
917 }
918
919 if (TREE_CODE (t) != SSA_NAME)
920 return NULL_TREE;
921
922 *neg = neg_;
923 *inv = inv_;
924 return t;
925 }
926
927 /* The reverse operation: build a symbolic expression with TYPE
928 from symbol SYM, negated according to NEG, and invariant INV. */
929
930 static tree
931 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
932 {
933 const bool pointer_p = POINTER_TYPE_P (type);
934 tree t = sym;
935
936 if (neg)
937 t = build1 (NEGATE_EXPR, type, t);
938
939 if (integer_zerop (inv))
940 return t;
941
942 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
943 }
944
945 /* Return true if value range VR involves exactly one symbol SYM. */
946
947 static bool
948 symbolic_range_based_on_p (value_range *vr, const_tree sym)
949 {
950 bool neg, min_has_symbol, max_has_symbol;
951 tree inv;
952
953 if (is_gimple_min_invariant (vr->min))
954 min_has_symbol = false;
955 else if (get_single_symbol (vr->min, &neg, &inv) == sym)
956 min_has_symbol = true;
957 else
958 return false;
959
960 if (is_gimple_min_invariant (vr->max))
961 max_has_symbol = false;
962 else if (get_single_symbol (vr->max, &neg, &inv) == sym)
963 max_has_symbol = true;
964 else
965 return false;
966
967 return (min_has_symbol || max_has_symbol);
968 }
969
970 /* Return true if value range VR uses an overflow infinity. */
971
972 static inline bool
973 overflow_infinity_range_p (value_range *vr)
974 {
975 return (vr->type == VR_RANGE
976 && (is_overflow_infinity (vr->min)
977 || is_overflow_infinity (vr->max)));
978 }
979
980 /* Return false if we can not make a valid comparison based on VR;
981 this will be the case if it uses an overflow infinity and overflow
982 is not undefined (i.e., -fno-strict-overflow is in effect).
983 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
984 uses an overflow infinity. */
985
986 static bool
987 usable_range_p (value_range *vr, bool *strict_overflow_p)
988 {
989 gcc_assert (vr->type == VR_RANGE);
990 if (is_overflow_infinity (vr->min))
991 {
992 *strict_overflow_p = true;
993 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
994 return false;
995 }
996 if (is_overflow_infinity (vr->max))
997 {
998 *strict_overflow_p = true;
999 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
1000 return false;
1001 }
1002 return true;
1003 }
1004
1005 /* Return true if the result of assignment STMT is know to be non-zero.
1006 If the return value is based on the assumption that signed overflow is
1007 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1008 *STRICT_OVERFLOW_P.*/
1009
1010 static bool
1011 gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1012 {
1013 enum tree_code code = gimple_assign_rhs_code (stmt);
1014 switch (get_gimple_rhs_class (code))
1015 {
1016 case GIMPLE_UNARY_RHS:
1017 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1018 gimple_expr_type (stmt),
1019 gimple_assign_rhs1 (stmt),
1020 strict_overflow_p);
1021 case GIMPLE_BINARY_RHS:
1022 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1023 gimple_expr_type (stmt),
1024 gimple_assign_rhs1 (stmt),
1025 gimple_assign_rhs2 (stmt),
1026 strict_overflow_p);
1027 case GIMPLE_TERNARY_RHS:
1028 return false;
1029 case GIMPLE_SINGLE_RHS:
1030 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1031 strict_overflow_p);
1032 case GIMPLE_INVALID_RHS:
1033 gcc_unreachable ();
1034 default:
1035 gcc_unreachable ();
1036 }
1037 }
1038
1039 /* Return true if STMT is known to compute a non-zero value.
1040 If the return value is based on the assumption that signed overflow is
1041 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1042 *STRICT_OVERFLOW_P.*/
1043
1044 static bool
1045 gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1046 {
1047 switch (gimple_code (stmt))
1048 {
1049 case GIMPLE_ASSIGN:
1050 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1051 case GIMPLE_CALL:
1052 {
1053 tree fndecl = gimple_call_fndecl (stmt);
1054 if (!fndecl) return false;
1055 if (flag_delete_null_pointer_checks && !flag_check_new
1056 && DECL_IS_OPERATOR_NEW (fndecl)
1057 && !TREE_NOTHROW (fndecl))
1058 return true;
1059 /* References are always non-NULL. */
1060 if (flag_delete_null_pointer_checks
1061 && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
1062 return true;
1063 if (flag_delete_null_pointer_checks &&
1064 lookup_attribute ("returns_nonnull",
1065 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1066 return true;
1067 return gimple_alloca_call_p (stmt);
1068 }
1069 default:
1070 gcc_unreachable ();
1071 }
1072 }
1073
1074 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1075 obtained so far. */
1076
1077 static bool
1078 vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p)
1079 {
1080 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1081 return true;
1082
1083 /* If we have an expression of the form &X->a, then the expression
1084 is nonnull if X is nonnull. */
1085 if (is_gimple_assign (stmt)
1086 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1087 {
1088 tree expr = gimple_assign_rhs1 (stmt);
1089 tree base = get_base_address (TREE_OPERAND (expr, 0));
1090
1091 if (base != NULL_TREE
1092 && TREE_CODE (base) == MEM_REF
1093 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1094 {
1095 value_range *vr = get_value_range (TREE_OPERAND (base, 0));
1096 if (range_is_nonnull (vr))
1097 return true;
1098 }
1099 }
1100
1101 return false;
1102 }
1103
1104 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1105 a gimple invariant, or SSA_NAME +- CST. */
1106
1107 static bool
1108 valid_value_p (tree expr)
1109 {
1110 if (TREE_CODE (expr) == SSA_NAME)
1111 return true;
1112
1113 if (TREE_CODE (expr) == PLUS_EXPR
1114 || TREE_CODE (expr) == MINUS_EXPR)
1115 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1116 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1117
1118 return is_gimple_min_invariant (expr);
1119 }
1120
1121 /* Return
1122 1 if VAL < VAL2
1123 0 if !(VAL < VAL2)
1124 -2 if those are incomparable. */
1125 static inline int
1126 operand_less_p (tree val, tree val2)
1127 {
1128 /* LT is folded faster than GE and others. Inline the common case. */
1129 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1130 return tree_int_cst_lt (val, val2);
1131 else
1132 {
1133 tree tcmp;
1134
1135 fold_defer_overflow_warnings ();
1136
1137 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1138
1139 fold_undefer_and_ignore_overflow_warnings ();
1140
1141 if (!tcmp
1142 || TREE_CODE (tcmp) != INTEGER_CST)
1143 return -2;
1144
1145 if (!integer_zerop (tcmp))
1146 return 1;
1147 }
1148
1149 /* val >= val2, not considering overflow infinity. */
1150 if (is_negative_overflow_infinity (val))
1151 return is_negative_overflow_infinity (val2) ? 0 : 1;
1152 else if (is_positive_overflow_infinity (val2))
1153 return is_positive_overflow_infinity (val) ? 0 : 1;
1154
1155 return 0;
1156 }
1157
1158 /* Compare two values VAL1 and VAL2. Return
1159
1160 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1161 -1 if VAL1 < VAL2,
1162 0 if VAL1 == VAL2,
1163 +1 if VAL1 > VAL2, and
1164 +2 if VAL1 != VAL2
1165
1166 This is similar to tree_int_cst_compare but supports pointer values
1167 and values that cannot be compared at compile time.
1168
1169 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1170 true if the return value is only valid if we assume that signed
1171 overflow is undefined. */
1172
1173 static int
1174 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1175 {
1176 if (val1 == val2)
1177 return 0;
1178
1179 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1180 both integers. */
1181 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1182 == POINTER_TYPE_P (TREE_TYPE (val2)));
1183
1184 /* Convert the two values into the same type. This is needed because
1185 sizetype causes sign extension even for unsigned types. */
1186 val2 = fold_convert (TREE_TYPE (val1), val2);
1187 STRIP_USELESS_TYPE_CONVERSION (val2);
1188
1189 if ((TREE_CODE (val1) == SSA_NAME
1190 || (TREE_CODE (val1) == NEGATE_EXPR
1191 && TREE_CODE (TREE_OPERAND (val1, 0)) == SSA_NAME)
1192 || TREE_CODE (val1) == PLUS_EXPR
1193 || TREE_CODE (val1) == MINUS_EXPR)
1194 && (TREE_CODE (val2) == SSA_NAME
1195 || (TREE_CODE (val2) == NEGATE_EXPR
1196 && TREE_CODE (TREE_OPERAND (val2, 0)) == SSA_NAME)
1197 || TREE_CODE (val2) == PLUS_EXPR
1198 || TREE_CODE (val2) == MINUS_EXPR))
1199 {
1200 tree n1, c1, n2, c2;
1201 enum tree_code code1, code2;
1202
1203 /* If VAL1 and VAL2 are of the form '[-]NAME [+-] CST' or 'NAME',
1204 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1205 same name, return -2. */
1206 if (TREE_CODE (val1) == SSA_NAME || TREE_CODE (val1) == NEGATE_EXPR)
1207 {
1208 code1 = SSA_NAME;
1209 n1 = val1;
1210 c1 = NULL_TREE;
1211 }
1212 else
1213 {
1214 code1 = TREE_CODE (val1);
1215 n1 = TREE_OPERAND (val1, 0);
1216 c1 = TREE_OPERAND (val1, 1);
1217 if (tree_int_cst_sgn (c1) == -1)
1218 {
1219 if (is_negative_overflow_infinity (c1))
1220 return -2;
1221 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1222 if (!c1)
1223 return -2;
1224 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1225 }
1226 }
1227
1228 if (TREE_CODE (val2) == SSA_NAME || TREE_CODE (val2) == NEGATE_EXPR)
1229 {
1230 code2 = SSA_NAME;
1231 n2 = val2;
1232 c2 = NULL_TREE;
1233 }
1234 else
1235 {
1236 code2 = TREE_CODE (val2);
1237 n2 = TREE_OPERAND (val2, 0);
1238 c2 = TREE_OPERAND (val2, 1);
1239 if (tree_int_cst_sgn (c2) == -1)
1240 {
1241 if (is_negative_overflow_infinity (c2))
1242 return -2;
1243 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1244 if (!c2)
1245 return -2;
1246 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1247 }
1248 }
1249
1250 /* Both values must use the same name. */
1251 if (TREE_CODE (n1) == NEGATE_EXPR && TREE_CODE (n2) == NEGATE_EXPR)
1252 {
1253 n1 = TREE_OPERAND (n1, 0);
1254 n2 = TREE_OPERAND (n2, 0);
1255 }
1256 if (n1 != n2)
1257 return -2;
1258
1259 if (code1 == SSA_NAME && code2 == SSA_NAME)
1260 /* NAME == NAME */
1261 return 0;
1262
1263 /* If overflow is defined we cannot simplify more. */
1264 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1265 return -2;
1266
1267 if (strict_overflow_p != NULL
1268 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1269 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1270 *strict_overflow_p = true;
1271
1272 if (code1 == SSA_NAME)
1273 {
1274 if (code2 == PLUS_EXPR)
1275 /* NAME < NAME + CST */
1276 return -1;
1277 else if (code2 == MINUS_EXPR)
1278 /* NAME > NAME - CST */
1279 return 1;
1280 }
1281 else if (code1 == PLUS_EXPR)
1282 {
1283 if (code2 == SSA_NAME)
1284 /* NAME + CST > NAME */
1285 return 1;
1286 else if (code2 == PLUS_EXPR)
1287 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1288 return compare_values_warnv (c1, c2, strict_overflow_p);
1289 else if (code2 == MINUS_EXPR)
1290 /* NAME + CST1 > NAME - CST2 */
1291 return 1;
1292 }
1293 else if (code1 == MINUS_EXPR)
1294 {
1295 if (code2 == SSA_NAME)
1296 /* NAME - CST < NAME */
1297 return -1;
1298 else if (code2 == PLUS_EXPR)
1299 /* NAME - CST1 < NAME + CST2 */
1300 return -1;
1301 else if (code2 == MINUS_EXPR)
1302 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1303 C1 and C2 are swapped in the call to compare_values. */
1304 return compare_values_warnv (c2, c1, strict_overflow_p);
1305 }
1306
1307 gcc_unreachable ();
1308 }
1309
1310 /* We cannot compare non-constants. */
1311 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1312 return -2;
1313
1314 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1315 {
1316 /* We cannot compare overflowed values, except for overflow
1317 infinities. */
1318 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1319 {
1320 if (strict_overflow_p != NULL)
1321 *strict_overflow_p = true;
1322 if (is_negative_overflow_infinity (val1))
1323 return is_negative_overflow_infinity (val2) ? 0 : -1;
1324 else if (is_negative_overflow_infinity (val2))
1325 return 1;
1326 else if (is_positive_overflow_infinity (val1))
1327 return is_positive_overflow_infinity (val2) ? 0 : 1;
1328 else if (is_positive_overflow_infinity (val2))
1329 return -1;
1330 return -2;
1331 }
1332
1333 return tree_int_cst_compare (val1, val2);
1334 }
1335 else
1336 {
1337 tree t;
1338
1339 /* First see if VAL1 and VAL2 are not the same. */
1340 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1341 return 0;
1342
1343 /* If VAL1 is a lower address than VAL2, return -1. */
1344 if (operand_less_p (val1, val2) == 1)
1345 return -1;
1346
1347 /* If VAL1 is a higher address than VAL2, return +1. */
1348 if (operand_less_p (val2, val1) == 1)
1349 return 1;
1350
1351 /* If VAL1 is different than VAL2, return +2.
1352 For integer constants we either have already returned -1 or 1
1353 or they are equivalent. We still might succeed in proving
1354 something about non-trivial operands. */
1355 if (TREE_CODE (val1) != INTEGER_CST
1356 || TREE_CODE (val2) != INTEGER_CST)
1357 {
1358 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1359 if (t && integer_onep (t))
1360 return 2;
1361 }
1362
1363 return -2;
1364 }
1365 }
1366
1367 /* Compare values like compare_values_warnv, but treat comparisons of
1368 nonconstants which rely on undefined overflow as incomparable. */
1369
1370 static int
1371 compare_values (tree val1, tree val2)
1372 {
1373 bool sop;
1374 int ret;
1375
1376 sop = false;
1377 ret = compare_values_warnv (val1, val2, &sop);
1378 if (sop
1379 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1380 ret = -2;
1381 return ret;
1382 }
1383
1384
1385 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1386 0 if VAL is not inside [MIN, MAX],
1387 -2 if we cannot tell either way.
1388
1389 Benchmark compile/20001226-1.c compilation time after changing this
1390 function. */
1391
1392 static inline int
1393 value_inside_range (tree val, tree min, tree max)
1394 {
1395 int cmp1, cmp2;
1396
1397 cmp1 = operand_less_p (val, min);
1398 if (cmp1 == -2)
1399 return -2;
1400 if (cmp1 == 1)
1401 return 0;
1402
1403 cmp2 = operand_less_p (max, val);
1404 if (cmp2 == -2)
1405 return -2;
1406
1407 return !cmp2;
1408 }
1409
1410
1411 /* Return true if value ranges VR0 and VR1 have a non-empty
1412 intersection.
1413
1414 Benchmark compile/20001226-1.c compilation time after changing this
1415 function.
1416 */
1417
1418 static inline bool
1419 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
1420 {
1421 /* The value ranges do not intersect if the maximum of the first range is
1422 less than the minimum of the second range or vice versa.
1423 When those relations are unknown, we can't do any better. */
1424 if (operand_less_p (vr0->max, vr1->min) != 0)
1425 return false;
1426 if (operand_less_p (vr1->max, vr0->min) != 0)
1427 return false;
1428 return true;
1429 }
1430
1431
1432 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1433 include the value zero, -2 if we cannot tell. */
1434
1435 static inline int
1436 range_includes_zero_p (tree min, tree max)
1437 {
1438 tree zero = build_int_cst (TREE_TYPE (min), 0);
1439 return value_inside_range (zero, min, max);
1440 }
1441
1442 /* Return true if *VR is know to only contain nonnegative values. */
1443
1444 static inline bool
1445 value_range_nonnegative_p (value_range *vr)
1446 {
1447 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1448 which would return a useful value should be encoded as a
1449 VR_RANGE. */
1450 if (vr->type == VR_RANGE)
1451 {
1452 int result = compare_values (vr->min, integer_zero_node);
1453 return (result == 0 || result == 1);
1454 }
1455
1456 return false;
1457 }
1458
1459 /* If *VR has a value rante that is a single constant value return that,
1460 otherwise return NULL_TREE. */
1461
1462 static tree
1463 value_range_constant_singleton (value_range *vr)
1464 {
1465 if (vr->type == VR_RANGE
1466 && operand_equal_p (vr->min, vr->max, 0)
1467 && is_gimple_min_invariant (vr->min))
1468 return vr->min;
1469
1470 return NULL_TREE;
1471 }
1472
1473 /* If OP has a value range with a single constant value return that,
1474 otherwise return NULL_TREE. This returns OP itself if OP is a
1475 constant. */
1476
1477 static tree
1478 op_with_constant_singleton_value_range (tree op)
1479 {
1480 if (is_gimple_min_invariant (op))
1481 return op;
1482
1483 if (TREE_CODE (op) != SSA_NAME)
1484 return NULL_TREE;
1485
1486 return value_range_constant_singleton (get_value_range (op));
1487 }
1488
1489 /* Return true if op is in a boolean [0, 1] value-range. */
1490
1491 static bool
1492 op_with_boolean_value_range_p (tree op)
1493 {
1494 value_range *vr;
1495
1496 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1497 return true;
1498
1499 if (integer_zerop (op)
1500 || integer_onep (op))
1501 return true;
1502
1503 if (TREE_CODE (op) != SSA_NAME)
1504 return false;
1505
1506 vr = get_value_range (op);
1507 return (vr->type == VR_RANGE
1508 && integer_zerop (vr->min)
1509 && integer_onep (vr->max));
1510 }
1511
1512 /* Extract value range information from an ASSERT_EXPR EXPR and store
1513 it in *VR_P. */
1514
1515 static void
1516 extract_range_from_assert (value_range *vr_p, tree expr)
1517 {
1518 tree var, cond, limit, min, max, type;
1519 value_range *limit_vr;
1520 enum tree_code cond_code;
1521
1522 var = ASSERT_EXPR_VAR (expr);
1523 cond = ASSERT_EXPR_COND (expr);
1524
1525 gcc_assert (COMPARISON_CLASS_P (cond));
1526
1527 /* Find VAR in the ASSERT_EXPR conditional. */
1528 if (var == TREE_OPERAND (cond, 0)
1529 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1530 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1531 {
1532 /* If the predicate is of the form VAR COMP LIMIT, then we just
1533 take LIMIT from the RHS and use the same comparison code. */
1534 cond_code = TREE_CODE (cond);
1535 limit = TREE_OPERAND (cond, 1);
1536 cond = TREE_OPERAND (cond, 0);
1537 }
1538 else
1539 {
1540 /* If the predicate is of the form LIMIT COMP VAR, then we need
1541 to flip around the comparison code to create the proper range
1542 for VAR. */
1543 cond_code = swap_tree_comparison (TREE_CODE (cond));
1544 limit = TREE_OPERAND (cond, 0);
1545 cond = TREE_OPERAND (cond, 1);
1546 }
1547
1548 limit = avoid_overflow_infinity (limit);
1549
1550 type = TREE_TYPE (var);
1551 gcc_assert (limit != var);
1552
1553 /* For pointer arithmetic, we only keep track of pointer equality
1554 and inequality. */
1555 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1556 {
1557 set_value_range_to_varying (vr_p);
1558 return;
1559 }
1560
1561 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1562 try to use LIMIT's range to avoid creating symbolic ranges
1563 unnecessarily. */
1564 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1565
1566 /* LIMIT's range is only interesting if it has any useful information. */
1567 if (limit_vr
1568 && (limit_vr->type == VR_UNDEFINED
1569 || limit_vr->type == VR_VARYING
1570 || symbolic_range_p (limit_vr)))
1571 limit_vr = NULL;
1572
1573 /* Initially, the new range has the same set of equivalences of
1574 VAR's range. This will be revised before returning the final
1575 value. Since assertions may be chained via mutually exclusive
1576 predicates, we will need to trim the set of equivalences before
1577 we are done. */
1578 gcc_assert (vr_p->equiv == NULL);
1579 add_equivalence (&vr_p->equiv, var);
1580
1581 /* Extract a new range based on the asserted comparison for VAR and
1582 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1583 will only use it for equality comparisons (EQ_EXPR). For any
1584 other kind of assertion, we cannot derive a range from LIMIT's
1585 anti-range that can be used to describe the new range. For
1586 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1587 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1588 no single range for x_2 that could describe LE_EXPR, so we might
1589 as well build the range [b_4, +INF] for it.
1590 One special case we handle is extracting a range from a
1591 range test encoded as (unsigned)var + CST <= limit. */
1592 if (TREE_CODE (cond) == NOP_EXPR
1593 || TREE_CODE (cond) == PLUS_EXPR)
1594 {
1595 if (TREE_CODE (cond) == PLUS_EXPR)
1596 {
1597 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1598 TREE_OPERAND (cond, 1));
1599 max = int_const_binop (PLUS_EXPR, limit, min);
1600 cond = TREE_OPERAND (cond, 0);
1601 }
1602 else
1603 {
1604 min = build_int_cst (TREE_TYPE (var), 0);
1605 max = limit;
1606 }
1607
1608 /* Make sure to not set TREE_OVERFLOW on the final type
1609 conversion. We are willingly interpreting large positive
1610 unsigned values as negative signed values here. */
1611 min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1612 max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
1613
1614 /* We can transform a max, min range to an anti-range or
1615 vice-versa. Use set_and_canonicalize_value_range which does
1616 this for us. */
1617 if (cond_code == LE_EXPR)
1618 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1619 min, max, vr_p->equiv);
1620 else if (cond_code == GT_EXPR)
1621 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1622 min, max, vr_p->equiv);
1623 else
1624 gcc_unreachable ();
1625 }
1626 else if (cond_code == EQ_EXPR)
1627 {
1628 enum value_range_type range_type;
1629
1630 if (limit_vr)
1631 {
1632 range_type = limit_vr->type;
1633 min = limit_vr->min;
1634 max = limit_vr->max;
1635 }
1636 else
1637 {
1638 range_type = VR_RANGE;
1639 min = limit;
1640 max = limit;
1641 }
1642
1643 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1644
1645 /* When asserting the equality VAR == LIMIT and LIMIT is another
1646 SSA name, the new range will also inherit the equivalence set
1647 from LIMIT. */
1648 if (TREE_CODE (limit) == SSA_NAME)
1649 add_equivalence (&vr_p->equiv, limit);
1650 }
1651 else if (cond_code == NE_EXPR)
1652 {
1653 /* As described above, when LIMIT's range is an anti-range and
1654 this assertion is an inequality (NE_EXPR), then we cannot
1655 derive anything from the anti-range. For instance, if
1656 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1657 not imply that VAR's range is [0, 0]. So, in the case of
1658 anti-ranges, we just assert the inequality using LIMIT and
1659 not its anti-range.
1660
1661 If LIMIT_VR is a range, we can only use it to build a new
1662 anti-range if LIMIT_VR is a single-valued range. For
1663 instance, if LIMIT_VR is [0, 1], the predicate
1664 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1665 Rather, it means that for value 0 VAR should be ~[0, 0]
1666 and for value 1, VAR should be ~[1, 1]. We cannot
1667 represent these ranges.
1668
1669 The only situation in which we can build a valid
1670 anti-range is when LIMIT_VR is a single-valued range
1671 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1672 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1673 if (limit_vr
1674 && limit_vr->type == VR_RANGE
1675 && compare_values (limit_vr->min, limit_vr->max) == 0)
1676 {
1677 min = limit_vr->min;
1678 max = limit_vr->max;
1679 }
1680 else
1681 {
1682 /* In any other case, we cannot use LIMIT's range to build a
1683 valid anti-range. */
1684 min = max = limit;
1685 }
1686
1687 /* If MIN and MAX cover the whole range for their type, then
1688 just use the original LIMIT. */
1689 if (INTEGRAL_TYPE_P (type)
1690 && vrp_val_is_min (min)
1691 && vrp_val_is_max (max))
1692 min = max = limit;
1693
1694 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1695 min, max, vr_p->equiv);
1696 }
1697 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1698 {
1699 min = TYPE_MIN_VALUE (type);
1700
1701 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1702 max = limit;
1703 else
1704 {
1705 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1706 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1707 LT_EXPR. */
1708 max = limit_vr->max;
1709 }
1710
1711 /* If the maximum value forces us to be out of bounds, simply punt.
1712 It would be pointless to try and do anything more since this
1713 all should be optimized away above us. */
1714 if ((cond_code == LT_EXPR
1715 && compare_values (max, min) == 0)
1716 || is_overflow_infinity (max))
1717 set_value_range_to_varying (vr_p);
1718 else
1719 {
1720 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1721 if (cond_code == LT_EXPR)
1722 {
1723 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1724 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1725 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1726 build_int_cst (TREE_TYPE (max), -1));
1727 else
1728 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1729 build_int_cst (TREE_TYPE (max), 1));
1730 if (EXPR_P (max))
1731 TREE_NO_WARNING (max) = 1;
1732 }
1733
1734 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1735 }
1736 }
1737 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1738 {
1739 max = TYPE_MAX_VALUE (type);
1740
1741 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1742 min = limit;
1743 else
1744 {
1745 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1746 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1747 GT_EXPR. */
1748 min = limit_vr->min;
1749 }
1750
1751 /* If the minimum value forces us to be out of bounds, simply punt.
1752 It would be pointless to try and do anything more since this
1753 all should be optimized away above us. */
1754 if ((cond_code == GT_EXPR
1755 && compare_values (min, max) == 0)
1756 || is_overflow_infinity (min))
1757 set_value_range_to_varying (vr_p);
1758 else
1759 {
1760 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1761 if (cond_code == GT_EXPR)
1762 {
1763 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1764 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1765 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1766 build_int_cst (TREE_TYPE (min), -1));
1767 else
1768 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1769 build_int_cst (TREE_TYPE (min), 1));
1770 if (EXPR_P (min))
1771 TREE_NO_WARNING (min) = 1;
1772 }
1773
1774 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1775 }
1776 }
1777 else
1778 gcc_unreachable ();
1779
1780 /* Finally intersect the new range with what we already know about var. */
1781 vrp_intersect_ranges (vr_p, get_value_range (var));
1782 }
1783
1784
1785 /* Extract range information from SSA name VAR and store it in VR. If
1786 VAR has an interesting range, use it. Otherwise, create the
1787 range [VAR, VAR] and return it. This is useful in situations where
1788 we may have conditionals testing values of VARYING names. For
1789 instance,
1790
1791 x_3 = y_5;
1792 if (x_3 > y_5)
1793 ...
1794
1795 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1796 always false. */
1797
1798 static void
1799 extract_range_from_ssa_name (value_range *vr, tree var)
1800 {
1801 value_range *var_vr = get_value_range (var);
1802
1803 if (var_vr->type != VR_VARYING)
1804 copy_value_range (vr, var_vr);
1805 else
1806 set_value_range (vr, VR_RANGE, var, var, NULL);
1807
1808 add_equivalence (&vr->equiv, var);
1809 }
1810
1811
1812 /* Wrapper around int_const_binop. If the operation overflows and we
1813 are not using wrapping arithmetic, then adjust the result to be
1814 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1815 NULL_TREE if we need to use an overflow infinity representation but
1816 the type does not support it. */
1817
1818 static tree
1819 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1820 {
1821 tree res;
1822
1823 res = int_const_binop (code, val1, val2);
1824
1825 /* If we are using unsigned arithmetic, operate symbolically
1826 on -INF and +INF as int_const_binop only handles signed overflow. */
1827 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1828 {
1829 int checkz = compare_values (res, val1);
1830 bool overflow = false;
1831
1832 /* Ensure that res = val1 [+*] val2 >= val1
1833 or that res = val1 - val2 <= val1. */
1834 if ((code == PLUS_EXPR
1835 && !(checkz == 1 || checkz == 0))
1836 || (code == MINUS_EXPR
1837 && !(checkz == 0 || checkz == -1)))
1838 {
1839 overflow = true;
1840 }
1841 /* Checking for multiplication overflow is done by dividing the
1842 output of the multiplication by the first input of the
1843 multiplication. If the result of that division operation is
1844 not equal to the second input of the multiplication, then the
1845 multiplication overflowed. */
1846 else if (code == MULT_EXPR && !integer_zerop (val1))
1847 {
1848 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1849 res,
1850 val1);
1851 int check = compare_values (tmp, val2);
1852
1853 if (check != 0)
1854 overflow = true;
1855 }
1856
1857 if (overflow)
1858 {
1859 res = copy_node (res);
1860 TREE_OVERFLOW (res) = 1;
1861 }
1862
1863 }
1864 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1865 /* If the singed operation wraps then int_const_binop has done
1866 everything we want. */
1867 ;
1868 /* Signed division of -1/0 overflows and by the time it gets here
1869 returns NULL_TREE. */
1870 else if (!res)
1871 return NULL_TREE;
1872 else if ((TREE_OVERFLOW (res)
1873 && !TREE_OVERFLOW (val1)
1874 && !TREE_OVERFLOW (val2))
1875 || is_overflow_infinity (val1)
1876 || is_overflow_infinity (val2))
1877 {
1878 /* If the operation overflowed but neither VAL1 nor VAL2 are
1879 overflown, return -INF or +INF depending on the operation
1880 and the combination of signs of the operands. */
1881 int sgn1 = tree_int_cst_sgn (val1);
1882 int sgn2 = tree_int_cst_sgn (val2);
1883
1884 if (needs_overflow_infinity (TREE_TYPE (res))
1885 && !supports_overflow_infinity (TREE_TYPE (res)))
1886 return NULL_TREE;
1887
1888 /* We have to punt on adding infinities of different signs,
1889 since we can't tell what the sign of the result should be.
1890 Likewise for subtracting infinities of the same sign. */
1891 if (((code == PLUS_EXPR && sgn1 != sgn2)
1892 || (code == MINUS_EXPR && sgn1 == sgn2))
1893 && is_overflow_infinity (val1)
1894 && is_overflow_infinity (val2))
1895 return NULL_TREE;
1896
1897 /* Don't try to handle division or shifting of infinities. */
1898 if ((code == TRUNC_DIV_EXPR
1899 || code == FLOOR_DIV_EXPR
1900 || code == CEIL_DIV_EXPR
1901 || code == EXACT_DIV_EXPR
1902 || code == ROUND_DIV_EXPR
1903 || code == RSHIFT_EXPR)
1904 && (is_overflow_infinity (val1)
1905 || is_overflow_infinity (val2)))
1906 return NULL_TREE;
1907
1908 /* Notice that we only need to handle the restricted set of
1909 operations handled by extract_range_from_binary_expr.
1910 Among them, only multiplication, addition and subtraction
1911 can yield overflow without overflown operands because we
1912 are working with integral types only... except in the
1913 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1914 for division too. */
1915
1916 /* For multiplication, the sign of the overflow is given
1917 by the comparison of the signs of the operands. */
1918 if ((code == MULT_EXPR && sgn1 == sgn2)
1919 /* For addition, the operands must be of the same sign
1920 to yield an overflow. Its sign is therefore that
1921 of one of the operands, for example the first. For
1922 infinite operands X + -INF is negative, not positive. */
1923 || (code == PLUS_EXPR
1924 && (sgn1 >= 0
1925 ? !is_negative_overflow_infinity (val2)
1926 : is_positive_overflow_infinity (val2)))
1927 /* For subtraction, non-infinite operands must be of
1928 different signs to yield an overflow. Its sign is
1929 therefore that of the first operand or the opposite of
1930 that of the second operand. A first operand of 0 counts
1931 as positive here, for the corner case 0 - (-INF), which
1932 overflows, but must yield +INF. For infinite operands 0
1933 - INF is negative, not positive. */
1934 || (code == MINUS_EXPR
1935 && (sgn1 >= 0
1936 ? !is_positive_overflow_infinity (val2)
1937 : is_negative_overflow_infinity (val2)))
1938 /* We only get in here with positive shift count, so the
1939 overflow direction is the same as the sign of val1.
1940 Actually rshift does not overflow at all, but we only
1941 handle the case of shifting overflowed -INF and +INF. */
1942 || (code == RSHIFT_EXPR
1943 && sgn1 >= 0)
1944 /* For division, the only case is -INF / -1 = +INF. */
1945 || code == TRUNC_DIV_EXPR
1946 || code == FLOOR_DIV_EXPR
1947 || code == CEIL_DIV_EXPR
1948 || code == EXACT_DIV_EXPR
1949 || code == ROUND_DIV_EXPR)
1950 return (needs_overflow_infinity (TREE_TYPE (res))
1951 ? positive_overflow_infinity (TREE_TYPE (res))
1952 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1953 else
1954 return (needs_overflow_infinity (TREE_TYPE (res))
1955 ? negative_overflow_infinity (TREE_TYPE (res))
1956 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1957 }
1958
1959 return res;
1960 }
1961
1962
1963 /* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
1964 bitmask if some bit is unset, it means for all numbers in the range
1965 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1966 bitmask if some bit is set, it means for all numbers in the range
1967 the bit is 1, otherwise it might be 0 or 1. */
1968
1969 static bool
1970 zero_nonzero_bits_from_vr (const tree expr_type,
1971 value_range *vr,
1972 wide_int *may_be_nonzero,
1973 wide_int *must_be_nonzero)
1974 {
1975 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1976 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1977 if (!range_int_cst_p (vr)
1978 || is_overflow_infinity (vr->min)
1979 || is_overflow_infinity (vr->max))
1980 return false;
1981
1982 if (range_int_cst_singleton_p (vr))
1983 {
1984 *may_be_nonzero = vr->min;
1985 *must_be_nonzero = *may_be_nonzero;
1986 }
1987 else if (tree_int_cst_sgn (vr->min) >= 0
1988 || tree_int_cst_sgn (vr->max) < 0)
1989 {
1990 wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
1991 *may_be_nonzero = wi::bit_or (vr->min, vr->max);
1992 *must_be_nonzero = wi::bit_and (vr->min, vr->max);
1993 if (xor_mask != 0)
1994 {
1995 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1996 may_be_nonzero->get_precision ());
1997 *may_be_nonzero = *may_be_nonzero | mask;
1998 *must_be_nonzero = must_be_nonzero->and_not (mask);
1999 }
2000 }
2001
2002 return true;
2003 }
2004
2005 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2006 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2007 false otherwise. If *AR can be represented with a single range
2008 *VR1 will be VR_UNDEFINED. */
2009
2010 static bool
2011 ranges_from_anti_range (value_range *ar,
2012 value_range *vr0, value_range *vr1)
2013 {
2014 tree type = TREE_TYPE (ar->min);
2015
2016 vr0->type = VR_UNDEFINED;
2017 vr1->type = VR_UNDEFINED;
2018
2019 if (ar->type != VR_ANTI_RANGE
2020 || TREE_CODE (ar->min) != INTEGER_CST
2021 || TREE_CODE (ar->max) != INTEGER_CST
2022 || !vrp_val_min (type)
2023 || !vrp_val_max (type))
2024 return false;
2025
2026 if (!vrp_val_is_min (ar->min))
2027 {
2028 vr0->type = VR_RANGE;
2029 vr0->min = vrp_val_min (type);
2030 vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
2031 }
2032 if (!vrp_val_is_max (ar->max))
2033 {
2034 vr1->type = VR_RANGE;
2035 vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
2036 vr1->max = vrp_val_max (type);
2037 }
2038 if (vr0->type == VR_UNDEFINED)
2039 {
2040 *vr0 = *vr1;
2041 vr1->type = VR_UNDEFINED;
2042 }
2043
2044 return vr0->type != VR_UNDEFINED;
2045 }
2046
2047 /* Helper to extract a value-range *VR for a multiplicative operation
2048 *VR0 CODE *VR1. */
2049
2050 static void
2051 extract_range_from_multiplicative_op_1 (value_range *vr,
2052 enum tree_code code,
2053 value_range *vr0, value_range *vr1)
2054 {
2055 enum value_range_type type;
2056 tree val[4];
2057 size_t i;
2058 tree min, max;
2059 bool sop;
2060 int cmp;
2061
2062 /* Multiplications, divisions and shifts are a bit tricky to handle,
2063 depending on the mix of signs we have in the two ranges, we
2064 need to operate on different values to get the minimum and
2065 maximum values for the new range. One approach is to figure
2066 out all the variations of range combinations and do the
2067 operations.
2068
2069 However, this involves several calls to compare_values and it
2070 is pretty convoluted. It's simpler to do the 4 operations
2071 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2072 MAX1) and then figure the smallest and largest values to form
2073 the new range. */
2074 gcc_assert (code == MULT_EXPR
2075 || code == TRUNC_DIV_EXPR
2076 || code == FLOOR_DIV_EXPR
2077 || code == CEIL_DIV_EXPR
2078 || code == EXACT_DIV_EXPR
2079 || code == ROUND_DIV_EXPR
2080 || code == RSHIFT_EXPR
2081 || code == LSHIFT_EXPR);
2082 gcc_assert ((vr0->type == VR_RANGE
2083 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2084 && vr0->type == vr1->type);
2085
2086 type = vr0->type;
2087
2088 /* Compute the 4 cross operations. */
2089 sop = false;
2090 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2091 if (val[0] == NULL_TREE)
2092 sop = true;
2093
2094 if (vr1->max == vr1->min)
2095 val[1] = NULL_TREE;
2096 else
2097 {
2098 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2099 if (val[1] == NULL_TREE)
2100 sop = true;
2101 }
2102
2103 if (vr0->max == vr0->min)
2104 val[2] = NULL_TREE;
2105 else
2106 {
2107 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2108 if (val[2] == NULL_TREE)
2109 sop = true;
2110 }
2111
2112 if (vr0->min == vr0->max || vr1->min == vr1->max)
2113 val[3] = NULL_TREE;
2114 else
2115 {
2116 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2117 if (val[3] == NULL_TREE)
2118 sop = true;
2119 }
2120
2121 if (sop)
2122 {
2123 set_value_range_to_varying (vr);
2124 return;
2125 }
2126
2127 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2128 of VAL[i]. */
2129 min = val[0];
2130 max = val[0];
2131 for (i = 1; i < 4; i++)
2132 {
2133 if (!is_gimple_min_invariant (min)
2134 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2135 || !is_gimple_min_invariant (max)
2136 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2137 break;
2138
2139 if (val[i])
2140 {
2141 if (!is_gimple_min_invariant (val[i])
2142 || (TREE_OVERFLOW (val[i])
2143 && !is_overflow_infinity (val[i])))
2144 {
2145 /* If we found an overflowed value, set MIN and MAX
2146 to it so that we set the resulting range to
2147 VARYING. */
2148 min = max = val[i];
2149 break;
2150 }
2151
2152 if (compare_values (val[i], min) == -1)
2153 min = val[i];
2154
2155 if (compare_values (val[i], max) == 1)
2156 max = val[i];
2157 }
2158 }
2159
2160 /* If either MIN or MAX overflowed, then set the resulting range to
2161 VARYING. But we do accept an overflow infinity
2162 representation. */
2163 if (min == NULL_TREE
2164 || !is_gimple_min_invariant (min)
2165 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2166 || max == NULL_TREE
2167 || !is_gimple_min_invariant (max)
2168 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2169 {
2170 set_value_range_to_varying (vr);
2171 return;
2172 }
2173
2174 /* We punt if:
2175 1) [-INF, +INF]
2176 2) [-INF, +-INF(OVF)]
2177 3) [+-INF(OVF), +INF]
2178 4) [+-INF(OVF), +-INF(OVF)]
2179 We learn nothing when we have INF and INF(OVF) on both sides.
2180 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2181 overflow. */
2182 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2183 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2184 {
2185 set_value_range_to_varying (vr);
2186 return;
2187 }
2188
2189 cmp = compare_values (min, max);
2190 if (cmp == -2 || cmp == 1)
2191 {
2192 /* If the new range has its limits swapped around (MIN > MAX),
2193 then the operation caused one of them to wrap around, mark
2194 the new range VARYING. */
2195 set_value_range_to_varying (vr);
2196 }
2197 else
2198 set_value_range (vr, type, min, max, NULL);
2199 }
2200
2201 /* Extract range information from a binary operation CODE based on
2202 the ranges of each of its operands *VR0 and *VR1 with resulting
2203 type EXPR_TYPE. The resulting range is stored in *VR. */
2204
2205 static void
2206 extract_range_from_binary_expr_1 (value_range *vr,
2207 enum tree_code code, tree expr_type,
2208 value_range *vr0_, value_range *vr1_)
2209 {
2210 value_range vr0 = *vr0_, vr1 = *vr1_;
2211 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2212 enum value_range_type type;
2213 tree min = NULL_TREE, max = NULL_TREE;
2214 int cmp;
2215
2216 if (!INTEGRAL_TYPE_P (expr_type)
2217 && !POINTER_TYPE_P (expr_type))
2218 {
2219 set_value_range_to_varying (vr);
2220 return;
2221 }
2222
2223 /* Not all binary expressions can be applied to ranges in a
2224 meaningful way. Handle only arithmetic operations. */
2225 if (code != PLUS_EXPR
2226 && code != MINUS_EXPR
2227 && code != POINTER_PLUS_EXPR
2228 && code != MULT_EXPR
2229 && code != TRUNC_DIV_EXPR
2230 && code != FLOOR_DIV_EXPR
2231 && code != CEIL_DIV_EXPR
2232 && code != EXACT_DIV_EXPR
2233 && code != ROUND_DIV_EXPR
2234 && code != TRUNC_MOD_EXPR
2235 && code != RSHIFT_EXPR
2236 && code != LSHIFT_EXPR
2237 && code != MIN_EXPR
2238 && code != MAX_EXPR
2239 && code != BIT_AND_EXPR
2240 && code != BIT_IOR_EXPR
2241 && code != BIT_XOR_EXPR)
2242 {
2243 set_value_range_to_varying (vr);
2244 return;
2245 }
2246
2247 /* If both ranges are UNDEFINED, so is the result. */
2248 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2249 {
2250 set_value_range_to_undefined (vr);
2251 return;
2252 }
2253 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2254 code. At some point we may want to special-case operations that
2255 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2256 operand. */
2257 else if (vr0.type == VR_UNDEFINED)
2258 set_value_range_to_varying (&vr0);
2259 else if (vr1.type == VR_UNDEFINED)
2260 set_value_range_to_varying (&vr1);
2261
2262 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2263 and express ~[] op X as ([]' op X) U ([]'' op X). */
2264 if (vr0.type == VR_ANTI_RANGE
2265 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2266 {
2267 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2268 if (vrtem1.type != VR_UNDEFINED)
2269 {
2270 value_range vrres = VR_INITIALIZER;
2271 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2272 &vrtem1, vr1_);
2273 vrp_meet (vr, &vrres);
2274 }
2275 return;
2276 }
2277 /* Likewise for X op ~[]. */
2278 if (vr1.type == VR_ANTI_RANGE
2279 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2280 {
2281 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2282 if (vrtem1.type != VR_UNDEFINED)
2283 {
2284 value_range vrres = VR_INITIALIZER;
2285 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2286 vr0_, &vrtem1);
2287 vrp_meet (vr, &vrres);
2288 }
2289 return;
2290 }
2291
2292 /* The type of the resulting value range defaults to VR0.TYPE. */
2293 type = vr0.type;
2294
2295 /* Refuse to operate on VARYING ranges, ranges of different kinds
2296 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
2297 because we may be able to derive a useful range even if one of
2298 the operands is VR_VARYING or symbolic range. Similarly for
2299 divisions, MIN/MAX and PLUS/MINUS.
2300
2301 TODO, we may be able to derive anti-ranges in some cases. */
2302 if (code != BIT_AND_EXPR
2303 && code != BIT_IOR_EXPR
2304 && code != TRUNC_DIV_EXPR
2305 && code != FLOOR_DIV_EXPR
2306 && code != CEIL_DIV_EXPR
2307 && code != EXACT_DIV_EXPR
2308 && code != ROUND_DIV_EXPR
2309 && code != TRUNC_MOD_EXPR
2310 && code != MIN_EXPR
2311 && code != MAX_EXPR
2312 && code != PLUS_EXPR
2313 && code != MINUS_EXPR
2314 && code != RSHIFT_EXPR
2315 && (vr0.type == VR_VARYING
2316 || vr1.type == VR_VARYING
2317 || vr0.type != vr1.type
2318 || symbolic_range_p (&vr0)
2319 || symbolic_range_p (&vr1)))
2320 {
2321 set_value_range_to_varying (vr);
2322 return;
2323 }
2324
2325 /* Now evaluate the expression to determine the new range. */
2326 if (POINTER_TYPE_P (expr_type))
2327 {
2328 if (code == MIN_EXPR || code == MAX_EXPR)
2329 {
2330 /* For MIN/MAX expressions with pointers, we only care about
2331 nullness, if both are non null, then the result is nonnull.
2332 If both are null, then the result is null. Otherwise they
2333 are varying. */
2334 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2335 set_value_range_to_nonnull (vr, expr_type);
2336 else if (range_is_null (&vr0) && range_is_null (&vr1))
2337 set_value_range_to_null (vr, expr_type);
2338 else
2339 set_value_range_to_varying (vr);
2340 }
2341 else if (code == POINTER_PLUS_EXPR)
2342 {
2343 /* For pointer types, we are really only interested in asserting
2344 whether the expression evaluates to non-NULL. */
2345 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2346 set_value_range_to_nonnull (vr, expr_type);
2347 else if (range_is_null (&vr0) && range_is_null (&vr1))
2348 set_value_range_to_null (vr, expr_type);
2349 else
2350 set_value_range_to_varying (vr);
2351 }
2352 else if (code == BIT_AND_EXPR)
2353 {
2354 /* For pointer types, we are really only interested in asserting
2355 whether the expression evaluates to non-NULL. */
2356 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2357 set_value_range_to_nonnull (vr, expr_type);
2358 else if (range_is_null (&vr0) || range_is_null (&vr1))
2359 set_value_range_to_null (vr, expr_type);
2360 else
2361 set_value_range_to_varying (vr);
2362 }
2363 else
2364 set_value_range_to_varying (vr);
2365
2366 return;
2367 }
2368
2369 /* For integer ranges, apply the operation to each end of the
2370 range and see what we end up with. */
2371 if (code == PLUS_EXPR || code == MINUS_EXPR)
2372 {
2373 const bool minus_p = (code == MINUS_EXPR);
2374 tree min_op0 = vr0.min;
2375 tree min_op1 = minus_p ? vr1.max : vr1.min;
2376 tree max_op0 = vr0.max;
2377 tree max_op1 = minus_p ? vr1.min : vr1.max;
2378 tree sym_min_op0 = NULL_TREE;
2379 tree sym_min_op1 = NULL_TREE;
2380 tree sym_max_op0 = NULL_TREE;
2381 tree sym_max_op1 = NULL_TREE;
2382 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2383
2384 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2385 single-symbolic ranges, try to compute the precise resulting range,
2386 but only if we know that this resulting range will also be constant
2387 or single-symbolic. */
2388 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2389 && (TREE_CODE (min_op0) == INTEGER_CST
2390 || (sym_min_op0
2391 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2392 && (TREE_CODE (min_op1) == INTEGER_CST
2393 || (sym_min_op1
2394 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2395 && (!(sym_min_op0 && sym_min_op1)
2396 || (sym_min_op0 == sym_min_op1
2397 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2398 && (TREE_CODE (max_op0) == INTEGER_CST
2399 || (sym_max_op0
2400 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2401 && (TREE_CODE (max_op1) == INTEGER_CST
2402 || (sym_max_op1
2403 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2404 && (!(sym_max_op0 && sym_max_op1)
2405 || (sym_max_op0 == sym_max_op1
2406 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
2407 {
2408 const signop sgn = TYPE_SIGN (expr_type);
2409 const unsigned int prec = TYPE_PRECISION (expr_type);
2410 wide_int type_min, type_max, wmin, wmax;
2411 int min_ovf = 0;
2412 int max_ovf = 0;
2413
2414 /* Get the lower and upper bounds of the type. */
2415 if (TYPE_OVERFLOW_WRAPS (expr_type))
2416 {
2417 type_min = wi::min_value (prec, sgn);
2418 type_max = wi::max_value (prec, sgn);
2419 }
2420 else
2421 {
2422 type_min = vrp_val_min (expr_type);
2423 type_max = vrp_val_max (expr_type);
2424 }
2425
2426 /* Combine the lower bounds, if any. */
2427 if (min_op0 && min_op1)
2428 {
2429 if (minus_p)
2430 {
2431 wmin = wi::sub (min_op0, min_op1);
2432
2433 /* Check for overflow. */
2434 if (wi::cmp (0, min_op1, sgn)
2435 != wi::cmp (wmin, min_op0, sgn))
2436 min_ovf = wi::cmp (min_op0, min_op1, sgn);
2437 }
2438 else
2439 {
2440 wmin = wi::add (min_op0, min_op1);
2441
2442 /* Check for overflow. */
2443 if (wi::cmp (min_op1, 0, sgn)
2444 != wi::cmp (wmin, min_op0, sgn))
2445 min_ovf = wi::cmp (min_op0, wmin, sgn);
2446 }
2447 }
2448 else if (min_op0)
2449 wmin = min_op0;
2450 else if (min_op1)
2451 wmin = minus_p ? wi::neg (min_op1) : min_op1;
2452 else
2453 wmin = wi::shwi (0, prec);
2454
2455 /* Combine the upper bounds, if any. */
2456 if (max_op0 && max_op1)
2457 {
2458 if (minus_p)
2459 {
2460 wmax = wi::sub (max_op0, max_op1);
2461
2462 /* Check for overflow. */
2463 if (wi::cmp (0, max_op1, sgn)
2464 != wi::cmp (wmax, max_op0, sgn))
2465 max_ovf = wi::cmp (max_op0, max_op1, sgn);
2466 }
2467 else
2468 {
2469 wmax = wi::add (max_op0, max_op1);
2470
2471 if (wi::cmp (max_op1, 0, sgn)
2472 != wi::cmp (wmax, max_op0, sgn))
2473 max_ovf = wi::cmp (max_op0, wmax, sgn);
2474 }
2475 }
2476 else if (max_op0)
2477 wmax = max_op0;
2478 else if (max_op1)
2479 wmax = minus_p ? wi::neg (max_op1) : max_op1;
2480 else
2481 wmax = wi::shwi (0, prec);
2482
2483 /* Check for type overflow. */
2484 if (min_ovf == 0)
2485 {
2486 if (wi::cmp (wmin, type_min, sgn) == -1)
2487 min_ovf = -1;
2488 else if (wi::cmp (wmin, type_max, sgn) == 1)
2489 min_ovf = 1;
2490 }
2491 if (max_ovf == 0)
2492 {
2493 if (wi::cmp (wmax, type_min, sgn) == -1)
2494 max_ovf = -1;
2495 else if (wi::cmp (wmax, type_max, sgn) == 1)
2496 max_ovf = 1;
2497 }
2498
2499 /* If we have overflow for the constant part and the resulting
2500 range will be symbolic, drop to VR_VARYING. */
2501 if ((min_ovf && sym_min_op0 != sym_min_op1)
2502 || (max_ovf && sym_max_op0 != sym_max_op1))
2503 {
2504 set_value_range_to_varying (vr);
2505 return;
2506 }
2507
2508 if (TYPE_OVERFLOW_WRAPS (expr_type))
2509 {
2510 /* If overflow wraps, truncate the values and adjust the
2511 range kind and bounds appropriately. */
2512 wide_int tmin = wide_int::from (wmin, prec, sgn);
2513 wide_int tmax = wide_int::from (wmax, prec, sgn);
2514 if (min_ovf == max_ovf)
2515 {
2516 /* No overflow or both overflow or underflow. The
2517 range kind stays VR_RANGE. */
2518 min = wide_int_to_tree (expr_type, tmin);
2519 max = wide_int_to_tree (expr_type, tmax);
2520 }
2521 else if (min_ovf == -1 && max_ovf == 1)
2522 {
2523 /* Underflow and overflow, drop to VR_VARYING. */
2524 set_value_range_to_varying (vr);
2525 return;
2526 }
2527 else
2528 {
2529 /* Min underflow or max overflow. The range kind
2530 changes to VR_ANTI_RANGE. */
2531 bool covers = false;
2532 wide_int tem = tmin;
2533 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2534 || (max_ovf == 1 && min_ovf == 0));
2535 type = VR_ANTI_RANGE;
2536 tmin = tmax + 1;
2537 if (wi::cmp (tmin, tmax, sgn) < 0)
2538 covers = true;
2539 tmax = tem - 1;
2540 if (wi::cmp (tmax, tem, sgn) > 0)
2541 covers = true;
2542 /* If the anti-range would cover nothing, drop to varying.
2543 Likewise if the anti-range bounds are outside of the
2544 types values. */
2545 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2546 {
2547 set_value_range_to_varying (vr);
2548 return;
2549 }
2550 min = wide_int_to_tree (expr_type, tmin);
2551 max = wide_int_to_tree (expr_type, tmax);
2552 }
2553 }
2554 else
2555 {
2556 /* If overflow does not wrap, saturate to the types min/max
2557 value. */
2558 if (min_ovf == -1)
2559 {
2560 if (needs_overflow_infinity (expr_type)
2561 && supports_overflow_infinity (expr_type))
2562 min = negative_overflow_infinity (expr_type);
2563 else
2564 min = wide_int_to_tree (expr_type, type_min);
2565 }
2566 else if (min_ovf == 1)
2567 {
2568 if (needs_overflow_infinity (expr_type)
2569 && supports_overflow_infinity (expr_type))
2570 min = positive_overflow_infinity (expr_type);
2571 else
2572 min = wide_int_to_tree (expr_type, type_max);
2573 }
2574 else
2575 min = wide_int_to_tree (expr_type, wmin);
2576
2577 if (max_ovf == -1)
2578 {
2579 if (needs_overflow_infinity (expr_type)
2580 && supports_overflow_infinity (expr_type))
2581 max = negative_overflow_infinity (expr_type);
2582 else
2583 max = wide_int_to_tree (expr_type, type_min);
2584 }
2585 else if (max_ovf == 1)
2586 {
2587 if (needs_overflow_infinity (expr_type)
2588 && supports_overflow_infinity (expr_type))
2589 max = positive_overflow_infinity (expr_type);
2590 else
2591 max = wide_int_to_tree (expr_type, type_max);
2592 }
2593 else
2594 max = wide_int_to_tree (expr_type, wmax);
2595 }
2596
2597 if (needs_overflow_infinity (expr_type)
2598 && supports_overflow_infinity (expr_type))
2599 {
2600 if ((min_op0 && is_negative_overflow_infinity (min_op0))
2601 || (min_op1
2602 && (minus_p
2603 ? is_positive_overflow_infinity (min_op1)
2604 : is_negative_overflow_infinity (min_op1))))
2605 min = negative_overflow_infinity (expr_type);
2606 if ((max_op0 && is_positive_overflow_infinity (max_op0))
2607 || (max_op1
2608 && (minus_p
2609 ? is_negative_overflow_infinity (max_op1)
2610 : is_positive_overflow_infinity (max_op1))))
2611 max = positive_overflow_infinity (expr_type);
2612 }
2613
2614 /* If the result lower bound is constant, we're done;
2615 otherwise, build the symbolic lower bound. */
2616 if (sym_min_op0 == sym_min_op1)
2617 ;
2618 else if (sym_min_op0)
2619 min = build_symbolic_expr (expr_type, sym_min_op0,
2620 neg_min_op0, min);
2621 else if (sym_min_op1)
2622 min = build_symbolic_expr (expr_type, sym_min_op1,
2623 neg_min_op1 ^ minus_p, min);
2624
2625 /* Likewise for the upper bound. */
2626 if (sym_max_op0 == sym_max_op1)
2627 ;
2628 else if (sym_max_op0)
2629 max = build_symbolic_expr (expr_type, sym_max_op0,
2630 neg_max_op0, max);
2631 else if (sym_max_op1)
2632 max = build_symbolic_expr (expr_type, sym_max_op1,
2633 neg_max_op1 ^ minus_p, max);
2634 }
2635 else
2636 {
2637 /* For other cases, for example if we have a PLUS_EXPR with two
2638 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2639 to compute a precise range for such a case.
2640 ??? General even mixed range kind operations can be expressed
2641 by for example transforming ~[3, 5] + [1, 2] to range-only
2642 operations and a union primitive:
2643 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2644 [-INF+1, 4] U [6, +INF(OVF)]
2645 though usually the union is not exactly representable with
2646 a single range or anti-range as the above is
2647 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2648 but one could use a scheme similar to equivalences for this. */
2649 set_value_range_to_varying (vr);
2650 return;
2651 }
2652 }
2653 else if (code == MIN_EXPR
2654 || code == MAX_EXPR)
2655 {
2656 if (vr0.type == VR_RANGE
2657 && !symbolic_range_p (&vr0))
2658 {
2659 type = VR_RANGE;
2660 if (vr1.type == VR_RANGE
2661 && !symbolic_range_p (&vr1))
2662 {
2663 /* For operations that make the resulting range directly
2664 proportional to the original ranges, apply the operation to
2665 the same end of each range. */
2666 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2667 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2668 }
2669 else if (code == MIN_EXPR)
2670 {
2671 min = vrp_val_min (expr_type);
2672 max = vr0.max;
2673 }
2674 else if (code == MAX_EXPR)
2675 {
2676 min = vr0.min;
2677 max = vrp_val_max (expr_type);
2678 }
2679 }
2680 else if (vr1.type == VR_RANGE
2681 && !symbolic_range_p (&vr1))
2682 {
2683 type = VR_RANGE;
2684 if (code == MIN_EXPR)
2685 {
2686 min = vrp_val_min (expr_type);
2687 max = vr1.max;
2688 }
2689 else if (code == MAX_EXPR)
2690 {
2691 min = vr1.min;
2692 max = vrp_val_max (expr_type);
2693 }
2694 }
2695 else
2696 {
2697 set_value_range_to_varying (vr);
2698 return;
2699 }
2700 }
2701 else if (code == MULT_EXPR)
2702 {
2703 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2704 drop to varying. This test requires 2*prec bits if both
2705 operands are signed and 2*prec + 2 bits if either is not. */
2706
2707 signop sign = TYPE_SIGN (expr_type);
2708 unsigned int prec = TYPE_PRECISION (expr_type);
2709
2710 if (range_int_cst_p (&vr0)
2711 && range_int_cst_p (&vr1)
2712 && TYPE_OVERFLOW_WRAPS (expr_type))
2713 {
2714 typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2715 typedef generic_wide_int
2716 <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2717 vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2718 vrp_int size = sizem1 + 1;
2719
2720 /* Extend the values using the sign of the result to PREC2.
2721 From here on out, everthing is just signed math no matter
2722 what the input types were. */
2723 vrp_int min0 = vrp_int_cst (vr0.min);
2724 vrp_int max0 = vrp_int_cst (vr0.max);
2725 vrp_int min1 = vrp_int_cst (vr1.min);
2726 vrp_int max1 = vrp_int_cst (vr1.max);
2727 /* Canonicalize the intervals. */
2728 if (sign == UNSIGNED)
2729 {
2730 if (wi::ltu_p (size, min0 + max0))
2731 {
2732 min0 -= size;
2733 max0 -= size;
2734 }
2735
2736 if (wi::ltu_p (size, min1 + max1))
2737 {
2738 min1 -= size;
2739 max1 -= size;
2740 }
2741 }
2742
2743 vrp_int prod0 = min0 * min1;
2744 vrp_int prod1 = min0 * max1;
2745 vrp_int prod2 = max0 * min1;
2746 vrp_int prod3 = max0 * max1;
2747
2748 /* Sort the 4 products so that min is in prod0 and max is in
2749 prod3. */
2750 /* min0min1 > max0max1 */
2751 if (wi::gts_p (prod0, prod3))
2752 std::swap (prod0, prod3);
2753
2754 /* min0max1 > max0min1 */
2755 if (wi::gts_p (prod1, prod2))
2756 std::swap (prod1, prod2);
2757
2758 if (wi::gts_p (prod0, prod1))
2759 std::swap (prod0, prod1);
2760
2761 if (wi::gts_p (prod2, prod3))
2762 std::swap (prod2, prod3);
2763
2764 /* diff = max - min. */
2765 prod2 = prod3 - prod0;
2766 if (wi::geu_p (prod2, sizem1))
2767 {
2768 /* the range covers all values. */
2769 set_value_range_to_varying (vr);
2770 return;
2771 }
2772
2773 /* The following should handle the wrapping and selecting
2774 VR_ANTI_RANGE for us. */
2775 min = wide_int_to_tree (expr_type, prod0);
2776 max = wide_int_to_tree (expr_type, prod3);
2777 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2778 return;
2779 }
2780
2781 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2782 drop to VR_VARYING. It would take more effort to compute a
2783 precise range for such a case. For example, if we have
2784 op0 == 65536 and op1 == 65536 with their ranges both being
2785 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2786 we cannot claim that the product is in ~[0,0]. Note that we
2787 are guaranteed to have vr0.type == vr1.type at this
2788 point. */
2789 if (vr0.type == VR_ANTI_RANGE
2790 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2791 {
2792 set_value_range_to_varying (vr);
2793 return;
2794 }
2795
2796 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2797 return;
2798 }
2799 else if (code == RSHIFT_EXPR
2800 || code == LSHIFT_EXPR)
2801 {
2802 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2803 then drop to VR_VARYING. Outside of this range we get undefined
2804 behavior from the shift operation. We cannot even trust
2805 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2806 shifts, and the operation at the tree level may be widened. */
2807 if (range_int_cst_p (&vr1)
2808 && compare_tree_int (vr1.min, 0) >= 0
2809 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2810 {
2811 if (code == RSHIFT_EXPR)
2812 {
2813 /* Even if vr0 is VARYING or otherwise not usable, we can derive
2814 useful ranges just from the shift count. E.g.
2815 x >> 63 for signed 64-bit x is always [-1, 0]. */
2816 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2817 {
2818 vr0.type = type = VR_RANGE;
2819 vr0.min = vrp_val_min (expr_type);
2820 vr0.max = vrp_val_max (expr_type);
2821 }
2822 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2823 return;
2824 }
2825 /* We can map lshifts by constants to MULT_EXPR handling. */
2826 else if (code == LSHIFT_EXPR
2827 && range_int_cst_singleton_p (&vr1))
2828 {
2829 bool saved_flag_wrapv;
2830 value_range vr1p = VR_INITIALIZER;
2831 vr1p.type = VR_RANGE;
2832 vr1p.min = (wide_int_to_tree
2833 (expr_type,
2834 wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2835 TYPE_PRECISION (expr_type))));
2836 vr1p.max = vr1p.min;
2837 /* We have to use a wrapping multiply though as signed overflow
2838 on lshifts is implementation defined in C89. */
2839 saved_flag_wrapv = flag_wrapv;
2840 flag_wrapv = 1;
2841 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2842 &vr0, &vr1p);
2843 flag_wrapv = saved_flag_wrapv;
2844 return;
2845 }
2846 else if (code == LSHIFT_EXPR
2847 && range_int_cst_p (&vr0))
2848 {
2849 int prec = TYPE_PRECISION (expr_type);
2850 int overflow_pos = prec;
2851 int bound_shift;
2852 wide_int low_bound, high_bound;
2853 bool uns = TYPE_UNSIGNED (expr_type);
2854 bool in_bounds = false;
2855
2856 if (!uns)
2857 overflow_pos -= 1;
2858
2859 bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2860 /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2861 overflow. However, for that to happen, vr1.max needs to be
2862 zero, which means vr1 is a singleton range of zero, which
2863 means it should be handled by the previous LSHIFT_EXPR
2864 if-clause. */
2865 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2866 wide_int complement = ~(bound - 1);
2867
2868 if (uns)
2869 {
2870 low_bound = bound;
2871 high_bound = complement;
2872 if (wi::ltu_p (vr0.max, low_bound))
2873 {
2874 /* [5, 6] << [1, 2] == [10, 24]. */
2875 /* We're shifting out only zeroes, the value increases
2876 monotonically. */
2877 in_bounds = true;
2878 }
2879 else if (wi::ltu_p (high_bound, vr0.min))
2880 {
2881 /* [0xffffff00, 0xffffffff] << [1, 2]
2882 == [0xfffffc00, 0xfffffffe]. */
2883 /* We're shifting out only ones, the value decreases
2884 monotonically. */
2885 in_bounds = true;
2886 }
2887 }
2888 else
2889 {
2890 /* [-1, 1] << [1, 2] == [-4, 4]. */
2891 low_bound = complement;
2892 high_bound = bound;
2893 if (wi::lts_p (vr0.max, high_bound)
2894 && wi::lts_p (low_bound, vr0.min))
2895 {
2896 /* For non-negative numbers, we're shifting out only
2897 zeroes, the value increases monotonically.
2898 For negative numbers, we're shifting out only ones, the
2899 value decreases monotomically. */
2900 in_bounds = true;
2901 }
2902 }
2903
2904 if (in_bounds)
2905 {
2906 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2907 return;
2908 }
2909 }
2910 }
2911 set_value_range_to_varying (vr);
2912 return;
2913 }
2914 else if (code == TRUNC_DIV_EXPR
2915 || code == FLOOR_DIV_EXPR
2916 || code == CEIL_DIV_EXPR
2917 || code == EXACT_DIV_EXPR
2918 || code == ROUND_DIV_EXPR)
2919 {
2920 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2921 {
2922 /* For division, if op1 has VR_RANGE but op0 does not, something
2923 can be deduced just from that range. Say [min, max] / [4, max]
2924 gives [min / 4, max / 4] range. */
2925 if (vr1.type == VR_RANGE
2926 && !symbolic_range_p (&vr1)
2927 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2928 {
2929 vr0.type = type = VR_RANGE;
2930 vr0.min = vrp_val_min (expr_type);
2931 vr0.max = vrp_val_max (expr_type);
2932 }
2933 else
2934 {
2935 set_value_range_to_varying (vr);
2936 return;
2937 }
2938 }
2939
2940 /* For divisions, if flag_non_call_exceptions is true, we must
2941 not eliminate a division by zero. */
2942 if (cfun->can_throw_non_call_exceptions
2943 && (vr1.type != VR_RANGE
2944 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2945 {
2946 set_value_range_to_varying (vr);
2947 return;
2948 }
2949
2950 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2951 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2952 include 0. */
2953 if (vr0.type == VR_RANGE
2954 && (vr1.type != VR_RANGE
2955 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2956 {
2957 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2958 int cmp;
2959
2960 min = NULL_TREE;
2961 max = NULL_TREE;
2962 if (TYPE_UNSIGNED (expr_type)
2963 || value_range_nonnegative_p (&vr1))
2964 {
2965 /* For unsigned division or when divisor is known
2966 to be non-negative, the range has to cover
2967 all numbers from 0 to max for positive max
2968 and all numbers from min to 0 for negative min. */
2969 cmp = compare_values (vr0.max, zero);
2970 if (cmp == -1)
2971 {
2972 /* When vr0.max < 0, vr1.min != 0 and value
2973 ranges for dividend and divisor are available. */
2974 if (vr1.type == VR_RANGE
2975 && !symbolic_range_p (&vr0)
2976 && !symbolic_range_p (&vr1)
2977 && !compare_values (vr1.min, zero))
2978 max = int_const_binop (code, vr0.max, vr1.min);
2979 else
2980 max = zero;
2981 }
2982 else if (cmp == 0 || cmp == 1)
2983 max = vr0.max;
2984 else
2985 type = VR_VARYING;
2986 cmp = compare_values (vr0.min, zero);
2987 if (cmp == 1)
2988 {
2989 /* For unsigned division when value ranges for dividend
2990 and divisor are available. */
2991 if (vr1.type == VR_RANGE
2992 && !symbolic_range_p (&vr0)
2993 && !symbolic_range_p (&vr1))
2994 min = int_const_binop (code, vr0.min, vr1.max);
2995 else
2996 min = zero;
2997 }
2998 else if (cmp == 0 || cmp == -1)
2999 min = vr0.min;
3000 else
3001 type = VR_VARYING;
3002 }
3003 else
3004 {
3005 /* Otherwise the range is -max .. max or min .. -min
3006 depending on which bound is bigger in absolute value,
3007 as the division can change the sign. */
3008 abs_extent_range (vr, vr0.min, vr0.max);
3009 return;
3010 }
3011 if (type == VR_VARYING)
3012 {
3013 set_value_range_to_varying (vr);
3014 return;
3015 }
3016 }
3017 else
3018 {
3019 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
3020 return;
3021 }
3022 }
3023 else if (code == TRUNC_MOD_EXPR)
3024 {
3025 if (range_is_null (&vr1))
3026 {
3027 set_value_range_to_undefined (vr);
3028 return;
3029 }
3030 /* ABS (A % B) < ABS (B) and either
3031 0 <= A % B <= A or A <= A % B <= 0. */
3032 type = VR_RANGE;
3033 signop sgn = TYPE_SIGN (expr_type);
3034 unsigned int prec = TYPE_PRECISION (expr_type);
3035 wide_int wmin, wmax, tmp;
3036 wide_int zero = wi::zero (prec);
3037 wide_int one = wi::one (prec);
3038 if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
3039 {
3040 wmax = wi::sub (vr1.max, one);
3041 if (sgn == SIGNED)
3042 {
3043 tmp = wi::sub (wi::minus_one (prec), vr1.min);
3044 wmax = wi::smax (wmax, tmp);
3045 }
3046 }
3047 else
3048 {
3049 wmax = wi::max_value (prec, sgn);
3050 /* X % INT_MIN may be INT_MAX. */
3051 if (sgn == UNSIGNED)
3052 wmax = wmax - one;
3053 }
3054
3055 if (sgn == UNSIGNED)
3056 wmin = zero;
3057 else
3058 {
3059 wmin = -wmax;
3060 if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
3061 {
3062 tmp = vr0.min;
3063 if (wi::gts_p (tmp, zero))
3064 tmp = zero;
3065 wmin = wi::smax (wmin, tmp);
3066 }
3067 }
3068
3069 if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
3070 {
3071 tmp = vr0.max;
3072 if (sgn == SIGNED && wi::neg_p (tmp))
3073 tmp = zero;
3074 wmax = wi::min (wmax, tmp, sgn);
3075 }
3076
3077 min = wide_int_to_tree (expr_type, wmin);
3078 max = wide_int_to_tree (expr_type, wmax);
3079 }
3080 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3081 {
3082 bool int_cst_range0, int_cst_range1;
3083 wide_int may_be_nonzero0, may_be_nonzero1;
3084 wide_int must_be_nonzero0, must_be_nonzero1;
3085
3086 int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
3087 &may_be_nonzero0,
3088 &must_be_nonzero0);
3089 int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
3090 &may_be_nonzero1,
3091 &must_be_nonzero1);
3092
3093 type = VR_RANGE;
3094 if (code == BIT_AND_EXPR)
3095 {
3096 min = wide_int_to_tree (expr_type,
3097 must_be_nonzero0 & must_be_nonzero1);
3098 wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
3099 /* If both input ranges contain only negative values we can
3100 truncate the result range maximum to the minimum of the
3101 input range maxima. */
3102 if (int_cst_range0 && int_cst_range1
3103 && tree_int_cst_sgn (vr0.max) < 0
3104 && tree_int_cst_sgn (vr1.max) < 0)
3105 {
3106 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3107 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3108 }
3109 /* If either input range contains only non-negative values
3110 we can truncate the result range maximum to the respective
3111 maximum of the input range. */
3112 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3113 wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3114 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3115 wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3116 max = wide_int_to_tree (expr_type, wmax);
3117 }
3118 else if (code == BIT_IOR_EXPR)
3119 {
3120 max = wide_int_to_tree (expr_type,
3121 may_be_nonzero0 | may_be_nonzero1);
3122 wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
3123 /* If the input ranges contain only positive values we can
3124 truncate the minimum of the result range to the maximum
3125 of the input range minima. */
3126 if (int_cst_range0 && int_cst_range1
3127 && tree_int_cst_sgn (vr0.min) >= 0
3128 && tree_int_cst_sgn (vr1.min) >= 0)
3129 {
3130 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3131 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3132 }
3133 /* If either input range contains only negative values
3134 we can truncate the minimum of the result range to the
3135 respective minimum range. */
3136 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3137 wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3138 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3139 wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3140 min = wide_int_to_tree (expr_type, wmin);
3141 }
3142 else if (code == BIT_XOR_EXPR)
3143 {
3144 wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
3145 | ~(may_be_nonzero0 | may_be_nonzero1));
3146 wide_int result_one_bits
3147 = (must_be_nonzero0.and_not (may_be_nonzero1)
3148 | must_be_nonzero1.and_not (may_be_nonzero0));
3149 max = wide_int_to_tree (expr_type, ~result_zero_bits);
3150 min = wide_int_to_tree (expr_type, result_one_bits);
3151 /* If the range has all positive or all negative values the
3152 result is better than VARYING. */
3153 if (tree_int_cst_sgn (min) < 0
3154 || tree_int_cst_sgn (max) >= 0)
3155 ;
3156 else
3157 max = min = NULL_TREE;
3158 }
3159 }
3160 else
3161 gcc_unreachable ();
3162
3163 /* If either MIN or MAX overflowed, then set the resulting range to
3164 VARYING. But we do accept an overflow infinity representation. */
3165 if (min == NULL_TREE
3166 || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min))
3167 || max == NULL_TREE
3168 || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max)))
3169 {
3170 set_value_range_to_varying (vr);
3171 return;
3172 }
3173
3174 /* We punt if:
3175 1) [-INF, +INF]
3176 2) [-INF, +-INF(OVF)]
3177 3) [+-INF(OVF), +INF]
3178 4) [+-INF(OVF), +-INF(OVF)]
3179 We learn nothing when we have INF and INF(OVF) on both sides.
3180 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3181 overflow. */
3182 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3183 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3184 {
3185 set_value_range_to_varying (vr);
3186 return;
3187 }
3188
3189 cmp = compare_values (min, max);
3190 if (cmp == -2 || cmp == 1)
3191 {
3192 /* If the new range has its limits swapped around (MIN > MAX),
3193 then the operation caused one of them to wrap around, mark
3194 the new range VARYING. */
3195 set_value_range_to_varying (vr);
3196 }
3197 else
3198 set_value_range (vr, type, min, max, NULL);
3199 }
3200
3201 /* Extract range information from a binary expression OP0 CODE OP1 based on
3202 the ranges of each of its operands with resulting type EXPR_TYPE.
3203 The resulting range is stored in *VR. */
3204
3205 static void
3206 extract_range_from_binary_expr (value_range *vr,
3207 enum tree_code code,
3208 tree expr_type, tree op0, tree op1)
3209 {
3210 value_range vr0 = VR_INITIALIZER;
3211 value_range vr1 = VR_INITIALIZER;
3212
3213 /* Get value ranges for each operand. For constant operands, create
3214 a new value range with the operand to simplify processing. */
3215 if (TREE_CODE (op0) == SSA_NAME)
3216 vr0 = *(get_value_range (op0));
3217 else if (is_gimple_min_invariant (op0))
3218 set_value_range_to_value (&vr0, op0, NULL);
3219 else
3220 set_value_range_to_varying (&vr0);
3221
3222 if (TREE_CODE (op1) == SSA_NAME)
3223 vr1 = *(get_value_range (op1));
3224 else if (is_gimple_min_invariant (op1))
3225 set_value_range_to_value (&vr1, op1, NULL);
3226 else
3227 set_value_range_to_varying (&vr1);
3228
3229 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3230
3231 /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3232 and based on the other operand, for example if it was deduced from a
3233 symbolic comparison. When a bound of the range of the first operand
3234 is invariant, we set the corresponding bound of the new range to INF
3235 in order to avoid recursing on the range of the second operand. */
3236 if (vr->type == VR_VARYING
3237 && (code == PLUS_EXPR || code == MINUS_EXPR)
3238 && TREE_CODE (op1) == SSA_NAME
3239 && vr0.type == VR_RANGE
3240 && symbolic_range_based_on_p (&vr0, op1))
3241 {
3242 const bool minus_p = (code == MINUS_EXPR);
3243 value_range n_vr1 = VR_INITIALIZER;
3244
3245 /* Try with VR0 and [-INF, OP1]. */
3246 if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3247 set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3248
3249 /* Try with VR0 and [OP1, +INF]. */
3250 else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3251 set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3252
3253 /* Try with VR0 and [OP1, OP1]. */
3254 else
3255 set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3256
3257 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3258 }
3259
3260 if (vr->type == VR_VARYING
3261 && (code == PLUS_EXPR || code == MINUS_EXPR)
3262 && TREE_CODE (op0) == SSA_NAME
3263 && vr1.type == VR_RANGE
3264 && symbolic_range_based_on_p (&vr1, op0))
3265 {
3266 const bool minus_p = (code == MINUS_EXPR);
3267 value_range n_vr0 = VR_INITIALIZER;
3268
3269 /* Try with [-INF, OP0] and VR1. */
3270 if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3271 set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3272
3273 /* Try with [OP0, +INF] and VR1. */
3274 else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3275 set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3276
3277 /* Try with [OP0, OP0] and VR1. */
3278 else
3279 set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3280
3281 extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3282 }
3283 }
3284
3285 /* Extract range information from a unary operation CODE based on
3286 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3287 The resulting range is stored in *VR. */
3288
3289 static void
3290 extract_range_from_unary_expr_1 (value_range *vr,
3291 enum tree_code code, tree type,
3292 value_range *vr0_, tree op0_type)
3293 {
3294 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3295
3296 /* VRP only operates on integral and pointer types. */
3297 if (!(INTEGRAL_TYPE_P (op0_type)
3298 || POINTER_TYPE_P (op0_type))
3299 || !(INTEGRAL_TYPE_P (type)
3300 || POINTER_TYPE_P (type)))
3301 {
3302 set_value_range_to_varying (vr);
3303 return;
3304 }
3305
3306 /* If VR0 is UNDEFINED, so is the result. */
3307 if (vr0.type == VR_UNDEFINED)
3308 {
3309 set_value_range_to_undefined (vr);
3310 return;
3311 }
3312
3313 /* Handle operations that we express in terms of others. */
3314 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3315 {
3316 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
3317 copy_value_range (vr, &vr0);
3318 return;
3319 }
3320 else if (code == NEGATE_EXPR)
3321 {
3322 /* -X is simply 0 - X, so re-use existing code that also handles
3323 anti-ranges fine. */
3324 value_range zero = VR_INITIALIZER;
3325 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3326 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3327 return;
3328 }
3329 else if (code == BIT_NOT_EXPR)
3330 {
3331 /* ~X is simply -1 - X, so re-use existing code that also handles
3332 anti-ranges fine. */
3333 value_range minusone = VR_INITIALIZER;
3334 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3335 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3336 type, &minusone, &vr0);
3337 return;
3338 }
3339
3340 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3341 and express op ~[] as (op []') U (op []''). */
3342 if (vr0.type == VR_ANTI_RANGE
3343 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3344 {
3345 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3346 if (vrtem1.type != VR_UNDEFINED)
3347 {
3348 value_range vrres = VR_INITIALIZER;
3349 extract_range_from_unary_expr_1 (&vrres, code, type,
3350 &vrtem1, op0_type);
3351 vrp_meet (vr, &vrres);
3352 }
3353 return;
3354 }
3355
3356 if (CONVERT_EXPR_CODE_P (code))
3357 {
3358 tree inner_type = op0_type;
3359 tree outer_type = type;
3360
3361 /* If the expression evaluates to a pointer, we are only interested in
3362 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3363 if (POINTER_TYPE_P (type))
3364 {
3365 if (range_is_nonnull (&vr0))
3366 set_value_range_to_nonnull (vr, type);
3367 else if (range_is_null (&vr0))
3368 set_value_range_to_null (vr, type);
3369 else
3370 set_value_range_to_varying (vr);
3371 return;
3372 }
3373
3374 /* If VR0 is varying and we increase the type precision, assume
3375 a full range for the following transformation. */
3376 if (vr0.type == VR_VARYING
3377 && INTEGRAL_TYPE_P (inner_type)
3378 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3379 {
3380 vr0.type = VR_RANGE;
3381 vr0.min = TYPE_MIN_VALUE (inner_type);
3382 vr0.max = TYPE_MAX_VALUE (inner_type);
3383 }
3384
3385 /* If VR0 is a constant range or anti-range and the conversion is
3386 not truncating we can convert the min and max values and
3387 canonicalize the resulting range. Otherwise we can do the
3388 conversion if the size of the range is less than what the
3389 precision of the target type can represent and the range is
3390 not an anti-range. */
3391 if ((vr0.type == VR_RANGE
3392 || vr0.type == VR_ANTI_RANGE)
3393 && TREE_CODE (vr0.min) == INTEGER_CST
3394 && TREE_CODE (vr0.max) == INTEGER_CST
3395 && (!is_overflow_infinity (vr0.min)
3396 || (vr0.type == VR_RANGE
3397 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3398 && needs_overflow_infinity (outer_type)
3399 && supports_overflow_infinity (outer_type)))
3400 && (!is_overflow_infinity (vr0.max)
3401 || (vr0.type == VR_RANGE
3402 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3403 && needs_overflow_infinity (outer_type)
3404 && supports_overflow_infinity (outer_type)))
3405 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3406 || (vr0.type == VR_RANGE
3407 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3408 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3409 size_int (TYPE_PRECISION (outer_type)))))))
3410 {
3411 tree new_min, new_max;
3412 if (is_overflow_infinity (vr0.min))
3413 new_min = negative_overflow_infinity (outer_type);
3414 else
3415 new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3416 0, false);
3417 if (is_overflow_infinity (vr0.max))
3418 new_max = positive_overflow_infinity (outer_type);
3419 else
3420 new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3421 0, false);
3422 set_and_canonicalize_value_range (vr, vr0.type,
3423 new_min, new_max, NULL);
3424 return;
3425 }
3426
3427 set_value_range_to_varying (vr);
3428 return;
3429 }
3430 else if (code == ABS_EXPR)
3431 {
3432 tree min, max;
3433 int cmp;
3434
3435 /* Pass through vr0 in the easy cases. */
3436 if (TYPE_UNSIGNED (type)
3437 || value_range_nonnegative_p (&vr0))
3438 {
3439 copy_value_range (vr, &vr0);
3440 return;
3441 }
3442
3443 /* For the remaining varying or symbolic ranges we can't do anything
3444 useful. */
3445 if (vr0.type == VR_VARYING
3446 || symbolic_range_p (&vr0))
3447 {
3448 set_value_range_to_varying (vr);
3449 return;
3450 }
3451
3452 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3453 useful range. */
3454 if (!TYPE_OVERFLOW_UNDEFINED (type)
3455 && ((vr0.type == VR_RANGE
3456 && vrp_val_is_min (vr0.min))
3457 || (vr0.type == VR_ANTI_RANGE
3458 && !vrp_val_is_min (vr0.min))))
3459 {
3460 set_value_range_to_varying (vr);
3461 return;
3462 }
3463
3464 /* ABS_EXPR may flip the range around, if the original range
3465 included negative values. */
3466 if (is_overflow_infinity (vr0.min))
3467 min = positive_overflow_infinity (type);
3468 else if (!vrp_val_is_min (vr0.min))
3469 min = fold_unary_to_constant (code, type, vr0.min);
3470 else if (!needs_overflow_infinity (type))
3471 min = TYPE_MAX_VALUE (type);
3472 else if (supports_overflow_infinity (type))
3473 min = positive_overflow_infinity (type);
3474 else
3475 {
3476 set_value_range_to_varying (vr);
3477 return;
3478 }
3479
3480 if (is_overflow_infinity (vr0.max))
3481 max = positive_overflow_infinity (type);
3482 else if (!vrp_val_is_min (vr0.max))
3483 max = fold_unary_to_constant (code, type, vr0.max);
3484 else if (!needs_overflow_infinity (type))
3485 max = TYPE_MAX_VALUE (type);
3486 else if (supports_overflow_infinity (type)
3487 /* We shouldn't generate [+INF, +INF] as set_value_range
3488 doesn't like this and ICEs. */
3489 && !is_positive_overflow_infinity (min))
3490 max = positive_overflow_infinity (type);
3491 else
3492 {
3493 set_value_range_to_varying (vr);
3494 return;
3495 }
3496
3497 cmp = compare_values (min, max);
3498
3499 /* If a VR_ANTI_RANGEs contains zero, then we have
3500 ~[-INF, min(MIN, MAX)]. */
3501 if (vr0.type == VR_ANTI_RANGE)
3502 {
3503 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3504 {
3505 /* Take the lower of the two values. */
3506 if (cmp != 1)
3507 max = min;
3508
3509 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3510 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3511 flag_wrapv is set and the original anti-range doesn't include
3512 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3513 if (TYPE_OVERFLOW_WRAPS (type))
3514 {
3515 tree type_min_value = TYPE_MIN_VALUE (type);
3516
3517 min = (vr0.min != type_min_value
3518 ? int_const_binop (PLUS_EXPR, type_min_value,
3519 build_int_cst (TREE_TYPE (type_min_value), 1))
3520 : type_min_value);
3521 }
3522 else
3523 {
3524 if (overflow_infinity_range_p (&vr0))
3525 min = negative_overflow_infinity (type);
3526 else
3527 min = TYPE_MIN_VALUE (type);
3528 }
3529 }
3530 else
3531 {
3532 /* All else has failed, so create the range [0, INF], even for
3533 flag_wrapv since TYPE_MIN_VALUE is in the original
3534 anti-range. */
3535 vr0.type = VR_RANGE;
3536 min = build_int_cst (type, 0);
3537 if (needs_overflow_infinity (type))
3538 {
3539 if (supports_overflow_infinity (type))
3540 max = positive_overflow_infinity (type);
3541 else
3542 {
3543 set_value_range_to_varying (vr);
3544 return;
3545 }
3546 }
3547 else
3548 max = TYPE_MAX_VALUE (type);
3549 }
3550 }
3551
3552 /* If the range contains zero then we know that the minimum value in the
3553 range will be zero. */
3554 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3555 {
3556 if (cmp == 1)
3557 max = min;
3558 min = build_int_cst (type, 0);
3559 }
3560 else
3561 {
3562 /* If the range was reversed, swap MIN and MAX. */
3563 if (cmp == 1)
3564 std::swap (min, max);
3565 }
3566
3567 cmp = compare_values (min, max);
3568 if (cmp == -2 || cmp == 1)
3569 {
3570 /* If the new range has its limits swapped around (MIN > MAX),
3571 then the operation caused one of them to wrap around, mark
3572 the new range VARYING. */
3573 set_value_range_to_varying (vr);
3574 }
3575 else
3576 set_value_range (vr, vr0.type, min, max, NULL);
3577 return;
3578 }
3579
3580 /* For unhandled operations fall back to varying. */
3581 set_value_range_to_varying (vr);
3582 return;
3583 }
3584
3585
3586 /* Extract range information from a unary expression CODE OP0 based on
3587 the range of its operand with resulting type TYPE.
3588 The resulting range is stored in *VR. */
3589
3590 static void
3591 extract_range_from_unary_expr (value_range *vr, enum tree_code code,
3592 tree type, tree op0)
3593 {
3594 value_range vr0 = VR_INITIALIZER;
3595
3596 /* Get value ranges for the operand. For constant operands, create
3597 a new value range with the operand to simplify processing. */
3598 if (TREE_CODE (op0) == SSA_NAME)
3599 vr0 = *(get_value_range (op0));
3600 else if (is_gimple_min_invariant (op0))
3601 set_value_range_to_value (&vr0, op0, NULL);
3602 else
3603 set_value_range_to_varying (&vr0);
3604
3605 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3606 }
3607
3608
3609 /* Extract range information from a conditional expression STMT based on
3610 the ranges of each of its operands and the expression code. */
3611
3612 static void
3613 extract_range_from_cond_expr (value_range *vr, gassign *stmt)
3614 {
3615 tree op0, op1;
3616 value_range vr0 = VR_INITIALIZER;
3617 value_range vr1 = VR_INITIALIZER;
3618
3619 /* Get value ranges for each operand. For constant operands, create
3620 a new value range with the operand to simplify processing. */
3621 op0 = gimple_assign_rhs2 (stmt);
3622 if (TREE_CODE (op0) == SSA_NAME)
3623 vr0 = *(get_value_range (op0));
3624 else if (is_gimple_min_invariant (op0))
3625 set_value_range_to_value (&vr0, op0, NULL);
3626 else
3627 set_value_range_to_varying (&vr0);
3628
3629 op1 = gimple_assign_rhs3 (stmt);
3630 if (TREE_CODE (op1) == SSA_NAME)
3631 vr1 = *(get_value_range (op1));
3632 else if (is_gimple_min_invariant (op1))
3633 set_value_range_to_value (&vr1, op1, NULL);
3634 else
3635 set_value_range_to_varying (&vr1);
3636
3637 /* The resulting value range is the union of the operand ranges */
3638 copy_value_range (vr, &vr0);
3639 vrp_meet (vr, &vr1);
3640 }
3641
3642
3643 /* Extract range information from a comparison expression EXPR based
3644 on the range of its operand and the expression code. */
3645
3646 static void
3647 extract_range_from_comparison (value_range *vr, enum tree_code code,
3648 tree type, tree op0, tree op1)
3649 {
3650 bool sop = false;
3651 tree val;
3652
3653 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3654 NULL);
3655
3656 /* A disadvantage of using a special infinity as an overflow
3657 representation is that we lose the ability to record overflow
3658 when we don't have an infinity. So we have to ignore a result
3659 which relies on overflow. */
3660
3661 if (val && !is_overflow_infinity (val) && !sop)
3662 {
3663 /* Since this expression was found on the RHS of an assignment,
3664 its type may be different from _Bool. Convert VAL to EXPR's
3665 type. */
3666 val = fold_convert (type, val);
3667 if (is_gimple_min_invariant (val))
3668 set_value_range_to_value (vr, val, vr->equiv);
3669 else
3670 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3671 }
3672 else
3673 /* The result of a comparison is always true or false. */
3674 set_value_range_to_truthvalue (vr, type);
3675 }
3676
3677 /* Helper function for simplify_internal_call_using_ranges and
3678 extract_range_basic. Return true if OP0 SUBCODE OP1 for
3679 SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3680 always overflow. Set *OVF to true if it is known to always
3681 overflow. */
3682
3683 static bool
3684 check_for_binary_op_overflow (enum tree_code subcode, tree type,
3685 tree op0, tree op1, bool *ovf)
3686 {
3687 value_range vr0 = VR_INITIALIZER;
3688 value_range vr1 = VR_INITIALIZER;
3689 if (TREE_CODE (op0) == SSA_NAME)
3690 vr0 = *get_value_range (op0);
3691 else if (TREE_CODE (op0) == INTEGER_CST)
3692 set_value_range_to_value (&vr0, op0, NULL);
3693 else
3694 set_value_range_to_varying (&vr0);
3695
3696 if (TREE_CODE (op1) == SSA_NAME)
3697 vr1 = *get_value_range (op1);
3698 else if (TREE_CODE (op1) == INTEGER_CST)
3699 set_value_range_to_value (&vr1, op1, NULL);
3700 else
3701 set_value_range_to_varying (&vr1);
3702
3703 if (!range_int_cst_p (&vr0)
3704 || TREE_OVERFLOW (vr0.min)
3705 || TREE_OVERFLOW (vr0.max))
3706 {
3707 vr0.min = vrp_val_min (TREE_TYPE (op0));
3708 vr0.max = vrp_val_max (TREE_TYPE (op0));
3709 }
3710 if (!range_int_cst_p (&vr1)
3711 || TREE_OVERFLOW (vr1.min)
3712 || TREE_OVERFLOW (vr1.max))
3713 {
3714 vr1.min = vrp_val_min (TREE_TYPE (op1));
3715 vr1.max = vrp_val_max (TREE_TYPE (op1));
3716 }
3717 *ovf = arith_overflowed_p (subcode, type, vr0.min,
3718 subcode == MINUS_EXPR ? vr1.max : vr1.min);
3719 if (arith_overflowed_p (subcode, type, vr0.max,
3720 subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3721 return false;
3722 if (subcode == MULT_EXPR)
3723 {
3724 if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3725 || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3726 return false;
3727 }
3728 if (*ovf)
3729 {
3730 /* So far we found that there is an overflow on the boundaries.
3731 That doesn't prove that there is an overflow even for all values
3732 in between the boundaries. For that compute widest_int range
3733 of the result and see if it doesn't overlap the range of
3734 type. */
3735 widest_int wmin, wmax;
3736 widest_int w[4];
3737 int i;
3738 w[0] = wi::to_widest (vr0.min);
3739 w[1] = wi::to_widest (vr0.max);
3740 w[2] = wi::to_widest (vr1.min);
3741 w[3] = wi::to_widest (vr1.max);
3742 for (i = 0; i < 4; i++)
3743 {
3744 widest_int wt;
3745 switch (subcode)
3746 {
3747 case PLUS_EXPR:
3748 wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3749 break;
3750 case MINUS_EXPR:
3751 wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3752 break;
3753 case MULT_EXPR:
3754 wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3755 break;
3756 default:
3757 gcc_unreachable ();
3758 }
3759 if (i == 0)
3760 {
3761 wmin = wt;
3762 wmax = wt;
3763 }
3764 else
3765 {
3766 wmin = wi::smin (wmin, wt);
3767 wmax = wi::smax (wmax, wt);
3768 }
3769 }
3770 /* The result of op0 CODE op1 is known to be in range
3771 [wmin, wmax]. */
3772 widest_int wtmin = wi::to_widest (vrp_val_min (type));
3773 widest_int wtmax = wi::to_widest (vrp_val_max (type));
3774 /* If all values in [wmin, wmax] are smaller than
3775 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3776 the arithmetic operation will always overflow. */
3777 if (wi::lts_p (wmax, wtmin) || wi::gts_p (wmin, wtmax))
3778 return true;
3779 return false;
3780 }
3781 return true;
3782 }
3783
3784 /* Try to derive a nonnegative or nonzero range out of STMT relying
3785 primarily on generic routines in fold in conjunction with range data.
3786 Store the result in *VR */
3787
3788 static void
3789 extract_range_basic (value_range *vr, gimple *stmt)
3790 {
3791 bool sop = false;
3792 tree type = gimple_expr_type (stmt);
3793
3794 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3795 {
3796 tree fndecl = gimple_call_fndecl (stmt), arg;
3797 int mini, maxi, zerov = 0, prec;
3798
3799 switch (DECL_FUNCTION_CODE (fndecl))
3800 {
3801 case BUILT_IN_CONSTANT_P:
3802 /* If the call is __builtin_constant_p and the argument is a
3803 function parameter resolve it to false. This avoids bogus
3804 array bound warnings.
3805 ??? We could do this as early as inlining is finished. */
3806 arg = gimple_call_arg (stmt, 0);
3807 if (TREE_CODE (arg) == SSA_NAME
3808 && SSA_NAME_IS_DEFAULT_DEF (arg)
3809 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3810 {
3811 set_value_range_to_null (vr, type);
3812 return;
3813 }
3814 break;
3815 /* Both __builtin_ffs* and __builtin_popcount return
3816 [0, prec]. */
3817 CASE_INT_FN (BUILT_IN_FFS):
3818 CASE_INT_FN (BUILT_IN_POPCOUNT):
3819 arg = gimple_call_arg (stmt, 0);
3820 prec = TYPE_PRECISION (TREE_TYPE (arg));
3821 mini = 0;
3822 maxi = prec;
3823 if (TREE_CODE (arg) == SSA_NAME)
3824 {
3825 value_range *vr0 = get_value_range (arg);
3826 /* If arg is non-zero, then ffs or popcount
3827 are non-zero. */
3828 if (((vr0->type == VR_RANGE
3829 && range_includes_zero_p (vr0->min, vr0->max) == 0)
3830 || (vr0->type == VR_ANTI_RANGE
3831 && range_includes_zero_p (vr0->min, vr0->max) == 1))
3832 && !is_overflow_infinity (vr0->min)
3833 && !is_overflow_infinity (vr0->max))
3834 mini = 1;
3835 /* If some high bits are known to be zero,
3836 we can decrease the maximum. */
3837 if (vr0->type == VR_RANGE
3838 && TREE_CODE (vr0->max) == INTEGER_CST
3839 && !operand_less_p (vr0->min,
3840 build_zero_cst (TREE_TYPE (vr0->min)))
3841 && !is_overflow_infinity (vr0->max))
3842 maxi = tree_floor_log2 (vr0->max) + 1;
3843 }
3844 goto bitop_builtin;
3845 /* __builtin_parity* returns [0, 1]. */
3846 CASE_INT_FN (BUILT_IN_PARITY):
3847 mini = 0;
3848 maxi = 1;
3849 goto bitop_builtin;
3850 /* __builtin_c[lt]z* return [0, prec-1], except for
3851 when the argument is 0, but that is undefined behavior.
3852 On many targets where the CLZ RTL or optab value is defined
3853 for 0 the value is prec, so include that in the range
3854 by default. */
3855 CASE_INT_FN (BUILT_IN_CLZ):
3856 arg = gimple_call_arg (stmt, 0);
3857 prec = TYPE_PRECISION (TREE_TYPE (arg));
3858 mini = 0;
3859 maxi = prec;
3860 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3861 != CODE_FOR_nothing
3862 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3863 zerov)
3864 /* Handle only the single common value. */
3865 && zerov != prec)
3866 /* Magic value to give up, unless vr0 proves
3867 arg is non-zero. */
3868 mini = -2;
3869 if (TREE_CODE (arg) == SSA_NAME)
3870 {
3871 value_range *vr0 = get_value_range (arg);
3872 /* From clz of VR_RANGE minimum we can compute
3873 result maximum. */
3874 if (vr0->type == VR_RANGE
3875 && TREE_CODE (vr0->min) == INTEGER_CST
3876 && !is_overflow_infinity (vr0->min))
3877 {
3878 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3879 if (maxi != prec)
3880 mini = 0;
3881 }
3882 else if (vr0->type == VR_ANTI_RANGE
3883 && integer_zerop (vr0->min)
3884 && !is_overflow_infinity (vr0->min))
3885 {
3886 maxi = prec - 1;
3887 mini = 0;
3888 }
3889 if (mini == -2)
3890 break;
3891 /* From clz of VR_RANGE maximum we can compute
3892 result minimum. */
3893 if (vr0->type == VR_RANGE
3894 && TREE_CODE (vr0->max) == INTEGER_CST
3895 && !is_overflow_infinity (vr0->max))
3896 {
3897 mini = prec - 1 - tree_floor_log2 (vr0->max);
3898 if (mini == prec)
3899 break;
3900 }
3901 }
3902 if (mini == -2)
3903 break;
3904 goto bitop_builtin;
3905 /* __builtin_ctz* return [0, prec-1], except for
3906 when the argument is 0, but that is undefined behavior.
3907 If there is a ctz optab for this mode and
3908 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3909 otherwise just assume 0 won't be seen. */
3910 CASE_INT_FN (BUILT_IN_CTZ):
3911 arg = gimple_call_arg (stmt, 0);
3912 prec = TYPE_PRECISION (TREE_TYPE (arg));
3913 mini = 0;
3914 maxi = prec - 1;
3915 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3916 != CODE_FOR_nothing
3917 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3918 zerov))
3919 {
3920 /* Handle only the two common values. */
3921 if (zerov == -1)
3922 mini = -1;
3923 else if (zerov == prec)
3924 maxi = prec;
3925 else
3926 /* Magic value to give up, unless vr0 proves
3927 arg is non-zero. */
3928 mini = -2;
3929 }
3930 if (TREE_CODE (arg) == SSA_NAME)
3931 {
3932 value_range *vr0 = get_value_range (arg);
3933 /* If arg is non-zero, then use [0, prec - 1]. */
3934 if (((vr0->type == VR_RANGE
3935 && integer_nonzerop (vr0->min))
3936 || (vr0->type == VR_ANTI_RANGE
3937 && integer_zerop (vr0->min)))
3938 && !is_overflow_infinity (vr0->min))
3939 {
3940 mini = 0;
3941 maxi = prec - 1;
3942 }
3943 /* If some high bits are known to be zero,
3944 we can decrease the result maximum. */
3945 if (vr0->type == VR_RANGE
3946 && TREE_CODE (vr0->max) == INTEGER_CST
3947 && !is_overflow_infinity (vr0->max))
3948 {
3949 maxi = tree_floor_log2 (vr0->max);
3950 /* For vr0 [0, 0] give up. */
3951 if (maxi == -1)
3952 break;
3953 }
3954 }
3955 if (mini == -2)
3956 break;
3957 goto bitop_builtin;
3958 /* __builtin_clrsb* returns [0, prec-1]. */
3959 CASE_INT_FN (BUILT_IN_CLRSB):
3960 arg = gimple_call_arg (stmt, 0);
3961 prec = TYPE_PRECISION (TREE_TYPE (arg));
3962 mini = 0;
3963 maxi = prec - 1;
3964 goto bitop_builtin;
3965 bitop_builtin:
3966 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3967 build_int_cst (type, maxi), NULL);
3968 return;
3969 default:
3970 break;
3971 }
3972 }
3973 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
3974 {
3975 enum tree_code subcode = ERROR_MARK;
3976 unsigned ifn_code = gimple_call_internal_fn (stmt);
3977
3978 switch (ifn_code)
3979 {
3980 case IFN_UBSAN_CHECK_ADD:
3981 subcode = PLUS_EXPR;
3982 break;
3983 case IFN_UBSAN_CHECK_SUB:
3984 subcode = MINUS_EXPR;
3985 break;
3986 case IFN_UBSAN_CHECK_MUL:
3987 subcode = MULT_EXPR;
3988 break;
3989 case IFN_GOACC_DIM_SIZE:
3990 case IFN_GOACC_DIM_POS:
3991 /* Optimizing these two internal functions helps the loop
3992 optimizer eliminate outer comparisons. Size is [1,N]
3993 and pos is [0,N-1]. */
3994 {
3995 bool is_pos = ifn_code == IFN_GOACC_DIM_POS;
3996 int axis = get_oacc_ifn_dim_arg (stmt);
3997 int size = get_oacc_fn_dim_size (current_function_decl, axis);
3998
3999 if (!size)
4000 /* If it's dynamic, the backend might know a hardware
4001 limitation. */
4002 size = targetm.goacc.dim_limit (axis);
4003
4004 tree type = TREE_TYPE (gimple_call_lhs (stmt));
4005 set_value_range (vr, VR_RANGE,
4006 build_int_cst (type, is_pos ? 0 : 1),
4007 size ? build_int_cst (type, size - is_pos)
4008 : vrp_val_max (type), NULL);
4009 }
4010 return;
4011 default:
4012 break;
4013 }
4014 if (subcode != ERROR_MARK)
4015 {
4016 bool saved_flag_wrapv = flag_wrapv;
4017 /* Pretend the arithmetics is wrapping. If there is
4018 any overflow, we'll complain, but will actually do
4019 wrapping operation. */
4020 flag_wrapv = 1;
4021 extract_range_from_binary_expr (vr, subcode, type,
4022 gimple_call_arg (stmt, 0),
4023 gimple_call_arg (stmt, 1));
4024 flag_wrapv = saved_flag_wrapv;
4025
4026 /* If for both arguments vrp_valueize returned non-NULL,
4027 this should have been already folded and if not, it
4028 wasn't folded because of overflow. Avoid removing the
4029 UBSAN_CHECK_* calls in that case. */
4030 if (vr->type == VR_RANGE
4031 && (vr->min == vr->max
4032 || operand_equal_p (vr->min, vr->max, 0)))
4033 set_value_range_to_varying (vr);
4034 return;
4035 }
4036 }
4037 /* Handle extraction of the two results (result of arithmetics and
4038 a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
4039 internal function. */
4040 else if (is_gimple_assign (stmt)
4041 && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
4042 || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
4043 && INTEGRAL_TYPE_P (type))
4044 {
4045 enum tree_code code = gimple_assign_rhs_code (stmt);
4046 tree op = gimple_assign_rhs1 (stmt);
4047 if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
4048 {
4049 gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
4050 if (is_gimple_call (g) && gimple_call_internal_p (g))
4051 {
4052 enum tree_code subcode = ERROR_MARK;
4053 switch (gimple_call_internal_fn (g))
4054 {
4055 case IFN_ADD_OVERFLOW:
4056 subcode = PLUS_EXPR;
4057 break;
4058 case IFN_SUB_OVERFLOW:
4059 subcode = MINUS_EXPR;
4060 break;
4061 case IFN_MUL_OVERFLOW:
4062 subcode = MULT_EXPR;
4063 break;
4064 default:
4065 break;
4066 }
4067 if (subcode != ERROR_MARK)
4068 {
4069 tree op0 = gimple_call_arg (g, 0);
4070 tree op1 = gimple_call_arg (g, 1);
4071 if (code == IMAGPART_EXPR)
4072 {
4073 bool ovf = false;
4074 if (check_for_binary_op_overflow (subcode, type,
4075 op0, op1, &ovf))
4076 set_value_range_to_value (vr,
4077 build_int_cst (type, ovf),
4078 NULL);
4079 else
4080 set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
4081 build_int_cst (type, 1), NULL);
4082 }
4083 else if (types_compatible_p (type, TREE_TYPE (op0))
4084 && types_compatible_p (type, TREE_TYPE (op1)))
4085 {
4086 bool saved_flag_wrapv = flag_wrapv;
4087 /* Pretend the arithmetics is wrapping. If there is
4088 any overflow, IMAGPART_EXPR will be set. */
4089 flag_wrapv = 1;
4090 extract_range_from_binary_expr (vr, subcode, type,
4091 op0, op1);
4092 flag_wrapv = saved_flag_wrapv;
4093 }
4094 else
4095 {
4096 value_range vr0 = VR_INITIALIZER;
4097 value_range vr1 = VR_INITIALIZER;
4098 bool saved_flag_wrapv = flag_wrapv;
4099 /* Pretend the arithmetics is wrapping. If there is
4100 any overflow, IMAGPART_EXPR will be set. */
4101 flag_wrapv = 1;
4102 extract_range_from_unary_expr (&vr0, NOP_EXPR,
4103 type, op0);
4104 extract_range_from_unary_expr (&vr1, NOP_EXPR,
4105 type, op1);
4106 extract_range_from_binary_expr_1 (vr, subcode, type,
4107 &vr0, &vr1);
4108 flag_wrapv = saved_flag_wrapv;
4109 }
4110 return;
4111 }
4112 }
4113 }
4114 }
4115 if (INTEGRAL_TYPE_P (type)
4116 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
4117 set_value_range_to_nonnegative (vr, type,
4118 sop || stmt_overflow_infinity (stmt));
4119 else if (vrp_stmt_computes_nonzero (stmt, &sop)
4120 && !sop)
4121 set_value_range_to_nonnull (vr, type);
4122 else
4123 set_value_range_to_varying (vr);
4124 }
4125
4126
4127 /* Try to compute a useful range out of assignment STMT and store it
4128 in *VR. */
4129
4130 static void
4131 extract_range_from_assignment (value_range *vr, gassign *stmt)
4132 {
4133 enum tree_code code = gimple_assign_rhs_code (stmt);
4134
4135 if (code == ASSERT_EXPR)
4136 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
4137 else if (code == SSA_NAME)
4138 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
4139 else if (TREE_CODE_CLASS (code) == tcc_binary)
4140 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
4141 gimple_expr_type (stmt),
4142 gimple_assign_rhs1 (stmt),
4143 gimple_assign_rhs2 (stmt));
4144 else if (TREE_CODE_CLASS (code) == tcc_unary)
4145 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
4146 gimple_expr_type (stmt),
4147 gimple_assign_rhs1 (stmt));
4148 else if (code == COND_EXPR)
4149 extract_range_from_cond_expr (vr, stmt);
4150 else if (TREE_CODE_CLASS (code) == tcc_comparison)
4151 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
4152 gimple_expr_type (stmt),
4153 gimple_assign_rhs1 (stmt),
4154 gimple_assign_rhs2 (stmt));
4155 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
4156 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
4157 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
4158 else
4159 set_value_range_to_varying (vr);
4160
4161 if (vr->type == VR_VARYING)
4162 extract_range_basic (vr, stmt);
4163 }
4164
4165 /* Given a range VR, a LOOP and a variable VAR, determine whether it
4166 would be profitable to adjust VR using scalar evolution information
4167 for VAR. If so, update VR with the new limits. */
4168
4169 static void
4170 adjust_range_with_scev (value_range *vr, struct loop *loop,
4171 gimple *stmt, tree var)
4172 {
4173 tree init, step, chrec, tmin, tmax, min, max, type, tem;
4174 enum ev_direction dir;
4175
4176 /* TODO. Don't adjust anti-ranges. An anti-range may provide
4177 better opportunities than a regular range, but I'm not sure. */
4178 if (vr->type == VR_ANTI_RANGE)
4179 return;
4180
4181 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
4182
4183 /* Like in PR19590, scev can return a constant function. */
4184 if (is_gimple_min_invariant (chrec))
4185 {
4186 set_value_range_to_value (vr, chrec, vr->equiv);
4187 return;
4188 }
4189
4190 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4191 return;
4192
4193 init = initial_condition_in_loop_num (chrec, loop->num);
4194 tem = op_with_constant_singleton_value_range (init);
4195 if (tem)
4196 init = tem;
4197 step = evolution_part_in_loop_num (chrec, loop->num);
4198 tem = op_with_constant_singleton_value_range (step);
4199 if (tem)
4200 step = tem;
4201
4202 /* If STEP is symbolic, we can't know whether INIT will be the
4203 minimum or maximum value in the range. Also, unless INIT is
4204 a simple expression, compare_values and possibly other functions
4205 in tree-vrp won't be able to handle it. */
4206 if (step == NULL_TREE
4207 || !is_gimple_min_invariant (step)
4208 || !valid_value_p (init))
4209 return;
4210
4211 dir = scev_direction (chrec);
4212 if (/* Do not adjust ranges if we do not know whether the iv increases
4213 or decreases, ... */
4214 dir == EV_DIR_UNKNOWN
4215 /* ... or if it may wrap. */
4216 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
4217 true))
4218 return;
4219
4220 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
4221 negative_overflow_infinity and positive_overflow_infinity,
4222 because we have concluded that the loop probably does not
4223 wrap. */
4224
4225 type = TREE_TYPE (var);
4226 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4227 tmin = lower_bound_in_type (type, type);
4228 else
4229 tmin = TYPE_MIN_VALUE (type);
4230 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4231 tmax = upper_bound_in_type (type, type);
4232 else
4233 tmax = TYPE_MAX_VALUE (type);
4234
4235 /* Try to use estimated number of iterations for the loop to constrain the
4236 final value in the evolution. */
4237 if (TREE_CODE (step) == INTEGER_CST
4238 && is_gimple_val (init)
4239 && (TREE_CODE (init) != SSA_NAME
4240 || get_value_range (init)->type == VR_RANGE))
4241 {
4242 widest_int nit;
4243
4244 /* We are only entering here for loop header PHI nodes, so using
4245 the number of latch executions is the correct thing to use. */
4246 if (max_loop_iterations (loop, &nit))
4247 {
4248 value_range maxvr = VR_INITIALIZER;
4249 signop sgn = TYPE_SIGN (TREE_TYPE (step));
4250 bool overflow;
4251
4252 widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4253 &overflow);
4254 /* If the multiplication overflowed we can't do a meaningful
4255 adjustment. Likewise if the result doesn't fit in the type
4256 of the induction variable. For a signed type we have to
4257 check whether the result has the expected signedness which
4258 is that of the step as number of iterations is unsigned. */
4259 if (!overflow
4260 && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4261 && (sgn == UNSIGNED
4262 || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
4263 {
4264 tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
4265 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4266 TREE_TYPE (init), init, tem);
4267 /* Likewise if the addition did. */
4268 if (maxvr.type == VR_RANGE)
4269 {
4270 tmin = maxvr.min;
4271 tmax = maxvr.max;
4272 }
4273 }
4274 }
4275 }
4276
4277 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4278 {
4279 min = tmin;
4280 max = tmax;
4281
4282 /* For VARYING or UNDEFINED ranges, just about anything we get
4283 from scalar evolutions should be better. */
4284
4285 if (dir == EV_DIR_DECREASES)
4286 max = init;
4287 else
4288 min = init;
4289 }
4290 else if (vr->type == VR_RANGE)
4291 {
4292 min = vr->min;
4293 max = vr->max;
4294
4295 if (dir == EV_DIR_DECREASES)
4296 {
4297 /* INIT is the maximum value. If INIT is lower than VR->MAX
4298 but no smaller than VR->MIN, set VR->MAX to INIT. */
4299 if (compare_values (init, max) == -1)
4300 max = init;
4301
4302 /* According to the loop information, the variable does not
4303 overflow. If we think it does, probably because of an
4304 overflow due to arithmetic on a different INF value,
4305 reset now. */
4306 if (is_negative_overflow_infinity (min)
4307 || compare_values (min, tmin) == -1)
4308 min = tmin;
4309
4310 }
4311 else
4312 {
4313 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
4314 if (compare_values (init, min) == 1)
4315 min = init;
4316
4317 if (is_positive_overflow_infinity (max)
4318 || compare_values (tmax, max) == -1)
4319 max = tmax;
4320 }
4321 }
4322 else
4323 return;
4324
4325 /* If we just created an invalid range with the minimum
4326 greater than the maximum, we fail conservatively.
4327 This should happen only in unreachable
4328 parts of code, or for invalid programs. */
4329 if (compare_values (min, max) == 1
4330 || (is_negative_overflow_infinity (min)
4331 && is_positive_overflow_infinity (max)))
4332 return;
4333
4334 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
4335 }
4336
4337
4338 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4339
4340 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4341 all the values in the ranges.
4342
4343 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4344
4345 - Return NULL_TREE if it is not always possible to determine the
4346 value of the comparison.
4347
4348 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4349 overflow infinity was used in the test. */
4350
4351
4352 static tree
4353 compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
4354 bool *strict_overflow_p)
4355 {
4356 /* VARYING or UNDEFINED ranges cannot be compared. */
4357 if (vr0->type == VR_VARYING
4358 || vr0->type == VR_UNDEFINED
4359 || vr1->type == VR_VARYING
4360 || vr1->type == VR_UNDEFINED)
4361 return NULL_TREE;
4362
4363 /* Anti-ranges need to be handled separately. */
4364 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4365 {
4366 /* If both are anti-ranges, then we cannot compute any
4367 comparison. */
4368 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4369 return NULL_TREE;
4370
4371 /* These comparisons are never statically computable. */
4372 if (comp == GT_EXPR
4373 || comp == GE_EXPR
4374 || comp == LT_EXPR
4375 || comp == LE_EXPR)
4376 return NULL_TREE;
4377
4378 /* Equality can be computed only between a range and an
4379 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4380 if (vr0->type == VR_RANGE)
4381 {
4382 /* To simplify processing, make VR0 the anti-range. */
4383 value_range *tmp = vr0;
4384 vr0 = vr1;
4385 vr1 = tmp;
4386 }
4387
4388 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4389
4390 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4391 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4392 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4393
4394 return NULL_TREE;
4395 }
4396
4397 if (!usable_range_p (vr0, strict_overflow_p)
4398 || !usable_range_p (vr1, strict_overflow_p))
4399 return NULL_TREE;
4400
4401 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4402 operands around and change the comparison code. */
4403 if (comp == GT_EXPR || comp == GE_EXPR)
4404 {
4405 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4406 std::swap (vr0, vr1);
4407 }
4408
4409 if (comp == EQ_EXPR)
4410 {
4411 /* Equality may only be computed if both ranges represent
4412 exactly one value. */
4413 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4414 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4415 {
4416 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4417 strict_overflow_p);
4418 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4419 strict_overflow_p);
4420 if (cmp_min == 0 && cmp_max == 0)
4421 return boolean_true_node;
4422 else if (cmp_min != -2 && cmp_max != -2)
4423 return boolean_false_node;
4424 }
4425 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4426 else if (compare_values_warnv (vr0->min, vr1->max,
4427 strict_overflow_p) == 1
4428 || compare_values_warnv (vr1->min, vr0->max,
4429 strict_overflow_p) == 1)
4430 return boolean_false_node;
4431
4432 return NULL_TREE;
4433 }
4434 else if (comp == NE_EXPR)
4435 {
4436 int cmp1, cmp2;
4437
4438 /* If VR0 is completely to the left or completely to the right
4439 of VR1, they are always different. Notice that we need to
4440 make sure that both comparisons yield similar results to
4441 avoid comparing values that cannot be compared at
4442 compile-time. */
4443 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4444 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4445 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4446 return boolean_true_node;
4447
4448 /* If VR0 and VR1 represent a single value and are identical,
4449 return false. */
4450 else if (compare_values_warnv (vr0->min, vr0->max,
4451 strict_overflow_p) == 0
4452 && compare_values_warnv (vr1->min, vr1->max,
4453 strict_overflow_p) == 0
4454 && compare_values_warnv (vr0->min, vr1->min,
4455 strict_overflow_p) == 0
4456 && compare_values_warnv (vr0->max, vr1->max,
4457 strict_overflow_p) == 0)
4458 return boolean_false_node;
4459
4460 /* Otherwise, they may or may not be different. */
4461 else
4462 return NULL_TREE;
4463 }
4464 else if (comp == LT_EXPR || comp == LE_EXPR)
4465 {
4466 int tst;
4467
4468 /* If VR0 is to the left of VR1, return true. */
4469 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4470 if ((comp == LT_EXPR && tst == -1)
4471 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4472 {
4473 if (overflow_infinity_range_p (vr0)
4474 || overflow_infinity_range_p (vr1))
4475 *strict_overflow_p = true;
4476 return boolean_true_node;
4477 }
4478
4479 /* If VR0 is to the right of VR1, return false. */
4480 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4481 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4482 || (comp == LE_EXPR && tst == 1))
4483 {
4484 if (overflow_infinity_range_p (vr0)
4485 || overflow_infinity_range_p (vr1))
4486 *strict_overflow_p = true;
4487 return boolean_false_node;
4488 }
4489
4490 /* Otherwise, we don't know. */
4491 return NULL_TREE;
4492 }
4493
4494 gcc_unreachable ();
4495 }
4496
4497
4498 /* Given a value range VR, a value VAL and a comparison code COMP, return
4499 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4500 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4501 always returns false. Return NULL_TREE if it is not always
4502 possible to determine the value of the comparison. Also set
4503 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4504 infinity was used in the test. */
4505
4506 static tree
4507 compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
4508 bool *strict_overflow_p)
4509 {
4510 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4511 return NULL_TREE;
4512
4513 /* Anti-ranges need to be handled separately. */
4514 if (vr->type == VR_ANTI_RANGE)
4515 {
4516 /* For anti-ranges, the only predicates that we can compute at
4517 compile time are equality and inequality. */
4518 if (comp == GT_EXPR
4519 || comp == GE_EXPR
4520 || comp == LT_EXPR
4521 || comp == LE_EXPR)
4522 return NULL_TREE;
4523
4524 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4525 if (value_inside_range (val, vr->min, vr->max) == 1)
4526 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4527
4528 return NULL_TREE;
4529 }
4530
4531 if (!usable_range_p (vr, strict_overflow_p))
4532 return NULL_TREE;
4533
4534 if (comp == EQ_EXPR)
4535 {
4536 /* EQ_EXPR may only be computed if VR represents exactly
4537 one value. */
4538 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4539 {
4540 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4541 if (cmp == 0)
4542 return boolean_true_node;
4543 else if (cmp == -1 || cmp == 1 || cmp == 2)
4544 return boolean_false_node;
4545 }
4546 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4547 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4548 return boolean_false_node;
4549
4550 return NULL_TREE;
4551 }
4552 else if (comp == NE_EXPR)
4553 {
4554 /* If VAL is not inside VR, then they are always different. */
4555 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4556 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4557 return boolean_true_node;
4558
4559 /* If VR represents exactly one value equal to VAL, then return
4560 false. */
4561 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4562 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4563 return boolean_false_node;
4564
4565 /* Otherwise, they may or may not be different. */
4566 return NULL_TREE;
4567 }
4568 else if (comp == LT_EXPR || comp == LE_EXPR)
4569 {
4570 int tst;
4571
4572 /* If VR is to the left of VAL, return true. */
4573 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4574 if ((comp == LT_EXPR && tst == -1)
4575 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4576 {
4577 if (overflow_infinity_range_p (vr))
4578 *strict_overflow_p = true;
4579 return boolean_true_node;
4580 }
4581
4582 /* If VR is to the right of VAL, return false. */
4583 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4584 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4585 || (comp == LE_EXPR && tst == 1))
4586 {
4587 if (overflow_infinity_range_p (vr))
4588 *strict_overflow_p = true;
4589 return boolean_false_node;
4590 }
4591
4592 /* Otherwise, we don't know. */
4593 return NULL_TREE;
4594 }
4595 else if (comp == GT_EXPR || comp == GE_EXPR)
4596 {
4597 int tst;
4598
4599 /* If VR is to the right of VAL, return true. */
4600 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4601 if ((comp == GT_EXPR && tst == 1)
4602 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4603 {
4604 if (overflow_infinity_range_p (vr))
4605 *strict_overflow_p = true;
4606 return boolean_true_node;
4607 }
4608
4609 /* If VR is to the left of VAL, return false. */
4610 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4611 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4612 || (comp == GE_EXPR && tst == -1))
4613 {
4614 if (overflow_infinity_range_p (vr))
4615 *strict_overflow_p = true;
4616 return boolean_false_node;
4617 }
4618
4619 /* Otherwise, we don't know. */
4620 return NULL_TREE;
4621 }
4622
4623 gcc_unreachable ();
4624 }
4625
4626
4627 /* Debugging dumps. */
4628
4629 void dump_value_range (FILE *, value_range *);
4630 void debug_value_range (value_range *);
4631 void dump_all_value_ranges (FILE *);
4632 void debug_all_value_ranges (void);
4633 void dump_vr_equiv (FILE *, bitmap);
4634 void debug_vr_equiv (bitmap);
4635
4636
4637 /* Dump value range VR to FILE. */
4638
4639 void
4640 dump_value_range (FILE *file, value_range *vr)
4641 {
4642 if (vr == NULL)
4643 fprintf (file, "[]");
4644 else if (vr->type == VR_UNDEFINED)
4645 fprintf (file, "UNDEFINED");
4646 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4647 {
4648 tree type = TREE_TYPE (vr->min);
4649
4650 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4651
4652 if (is_negative_overflow_infinity (vr->min))
4653 fprintf (file, "-INF(OVF)");
4654 else if (INTEGRAL_TYPE_P (type)
4655 && !TYPE_UNSIGNED (type)
4656 && vrp_val_is_min (vr->min))
4657 fprintf (file, "-INF");
4658 else
4659 print_generic_expr (file, vr->min, 0);
4660
4661 fprintf (file, ", ");
4662
4663 if (is_positive_overflow_infinity (vr->max))
4664 fprintf (file, "+INF(OVF)");
4665 else if (INTEGRAL_TYPE_P (type)
4666 && vrp_val_is_max (vr->max))
4667 fprintf (file, "+INF");
4668 else
4669 print_generic_expr (file, vr->max, 0);
4670
4671 fprintf (file, "]");
4672
4673 if (vr->equiv)
4674 {
4675 bitmap_iterator bi;
4676 unsigned i, c = 0;
4677
4678 fprintf (file, " EQUIVALENCES: { ");
4679
4680 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4681 {
4682 print_generic_expr (file, ssa_name (i), 0);
4683 fprintf (file, " ");
4684 c++;
4685 }
4686
4687 fprintf (file, "} (%u elements)", c);
4688 }
4689 }
4690 else if (vr->type == VR_VARYING)
4691 fprintf (file, "VARYING");
4692 else
4693 fprintf (file, "INVALID RANGE");
4694 }
4695
4696
4697 /* Dump value range VR to stderr. */
4698
4699 DEBUG_FUNCTION void
4700 debug_value_range (value_range *vr)
4701 {
4702 dump_value_range (stderr, vr);
4703 fprintf (stderr, "\n");
4704 }
4705
4706
4707 /* Dump value ranges of all SSA_NAMEs to FILE. */
4708
4709 void
4710 dump_all_value_ranges (FILE *file)
4711 {
4712 size_t i;
4713
4714 for (i = 0; i < num_vr_values; i++)
4715 {
4716 if (vr_value[i])
4717 {
4718 print_generic_expr (file, ssa_name (i), 0);
4719 fprintf (file, ": ");
4720 dump_value_range (file, vr_value[i]);
4721 fprintf (file, "\n");
4722 }
4723 }
4724
4725 fprintf (file, "\n");
4726 }
4727
4728
4729 /* Dump all value ranges to stderr. */
4730
4731 DEBUG_FUNCTION void
4732 debug_all_value_ranges (void)
4733 {
4734 dump_all_value_ranges (stderr);
4735 }
4736
4737
4738 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4739 create a new SSA name N and return the assertion assignment
4740 'N = ASSERT_EXPR <V, V OP W>'. */
4741
4742 static gimple *
4743 build_assert_expr_for (tree cond, tree v)
4744 {
4745 tree a;
4746 gassign *assertion;
4747
4748 gcc_assert (TREE_CODE (v) == SSA_NAME
4749 && COMPARISON_CLASS_P (cond));
4750
4751 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4752 assertion = gimple_build_assign (NULL_TREE, a);
4753
4754 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4755 operand of the ASSERT_EXPR. Create it so the new name and the old one
4756 are registered in the replacement table so that we can fix the SSA web
4757 after adding all the ASSERT_EXPRs. */
4758 create_new_def_for (v, assertion, NULL);
4759
4760 return assertion;
4761 }
4762
4763
4764 /* Return false if EXPR is a predicate expression involving floating
4765 point values. */
4766
4767 static inline bool
4768 fp_predicate (gimple *stmt)
4769 {
4770 GIMPLE_CHECK (stmt, GIMPLE_COND);
4771
4772 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4773 }
4774
4775 /* If the range of values taken by OP can be inferred after STMT executes,
4776 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4777 describes the inferred range. Return true if a range could be
4778 inferred. */
4779
4780 static bool
4781 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
4782 {
4783 *val_p = NULL_TREE;
4784 *comp_code_p = ERROR_MARK;
4785
4786 /* Do not attempt to infer anything in names that flow through
4787 abnormal edges. */
4788 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4789 return false;
4790
4791 /* Similarly, don't infer anything from statements that may throw
4792 exceptions. ??? Relax this requirement? */
4793 if (stmt_could_throw_p (stmt))
4794 return false;
4795
4796 /* If STMT is the last statement of a basic block with no normal
4797 successors, there is no point inferring anything about any of its
4798 operands. We would not be able to find a proper insertion point
4799 for the assertion, anyway. */
4800 if (stmt_ends_bb_p (stmt))
4801 {
4802 edge_iterator ei;
4803 edge e;
4804
4805 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
4806 if (!(e->flags & EDGE_ABNORMAL))
4807 break;
4808 if (e == NULL)
4809 return false;
4810 }
4811
4812 if (infer_nonnull_range (stmt, op))
4813 {
4814 *val_p = build_int_cst (TREE_TYPE (op), 0);
4815 *comp_code_p = NE_EXPR;
4816 return true;
4817 }
4818
4819 return false;
4820 }
4821
4822
4823 void dump_asserts_for (FILE *, tree);
4824 void debug_asserts_for (tree);
4825 void dump_all_asserts (FILE *);
4826 void debug_all_asserts (void);
4827
4828 /* Dump all the registered assertions for NAME to FILE. */
4829
4830 void
4831 dump_asserts_for (FILE *file, tree name)
4832 {
4833 assert_locus *loc;
4834
4835 fprintf (file, "Assertions to be inserted for ");
4836 print_generic_expr (file, name, 0);
4837 fprintf (file, "\n");
4838
4839 loc = asserts_for[SSA_NAME_VERSION (name)];
4840 while (loc)
4841 {
4842 fprintf (file, "\t");
4843 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4844 fprintf (file, "\n\tBB #%d", loc->bb->index);
4845 if (loc->e)
4846 {
4847 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4848 loc->e->dest->index);
4849 dump_edge_info (file, loc->e, dump_flags, 0);
4850 }
4851 fprintf (file, "\n\tPREDICATE: ");
4852 print_generic_expr (file, name, 0);
4853 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4854 print_generic_expr (file, loc->val, 0);
4855 fprintf (file, "\n\n");
4856 loc = loc->next;
4857 }
4858
4859 fprintf (file, "\n");
4860 }
4861
4862
4863 /* Dump all the registered assertions for NAME to stderr. */
4864
4865 DEBUG_FUNCTION void
4866 debug_asserts_for (tree name)
4867 {
4868 dump_asserts_for (stderr, name);
4869 }
4870
4871
4872 /* Dump all the registered assertions for all the names to FILE. */
4873
4874 void
4875 dump_all_asserts (FILE *file)
4876 {
4877 unsigned i;
4878 bitmap_iterator bi;
4879
4880 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4881 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4882 dump_asserts_for (file, ssa_name (i));
4883 fprintf (file, "\n");
4884 }
4885
4886
4887 /* Dump all the registered assertions for all the names to stderr. */
4888
4889 DEBUG_FUNCTION void
4890 debug_all_asserts (void)
4891 {
4892 dump_all_asserts (stderr);
4893 }
4894
4895
4896 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4897 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4898 E->DEST, then register this location as a possible insertion point
4899 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4900
4901 BB, E and SI provide the exact insertion point for the new
4902 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4903 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4904 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4905 must not be NULL. */
4906
4907 static void
4908 register_new_assert_for (tree name, tree expr,
4909 enum tree_code comp_code,
4910 tree val,
4911 basic_block bb,
4912 edge e,
4913 gimple_stmt_iterator si)
4914 {
4915 assert_locus *n, *loc, *last_loc;
4916 basic_block dest_bb;
4917
4918 gcc_checking_assert (bb == NULL || e == NULL);
4919
4920 if (e == NULL)
4921 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4922 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4923
4924 /* Never build an assert comparing against an integer constant with
4925 TREE_OVERFLOW set. This confuses our undefined overflow warning
4926 machinery. */
4927 if (TREE_OVERFLOW_P (val))
4928 val = drop_tree_overflow (val);
4929
4930 /* The new assertion A will be inserted at BB or E. We need to
4931 determine if the new location is dominated by a previously
4932 registered location for A. If we are doing an edge insertion,
4933 assume that A will be inserted at E->DEST. Note that this is not
4934 necessarily true.
4935
4936 If E is a critical edge, it will be split. But even if E is
4937 split, the new block will dominate the same set of blocks that
4938 E->DEST dominates.
4939
4940 The reverse, however, is not true, blocks dominated by E->DEST
4941 will not be dominated by the new block created to split E. So,
4942 if the insertion location is on a critical edge, we will not use
4943 the new location to move another assertion previously registered
4944 at a block dominated by E->DEST. */
4945 dest_bb = (bb) ? bb : e->dest;
4946
4947 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4948 VAL at a block dominating DEST_BB, then we don't need to insert a new
4949 one. Similarly, if the same assertion already exists at a block
4950 dominated by DEST_BB and the new location is not on a critical
4951 edge, then update the existing location for the assertion (i.e.,
4952 move the assertion up in the dominance tree).
4953
4954 Note, this is implemented as a simple linked list because there
4955 should not be more than a handful of assertions registered per
4956 name. If this becomes a performance problem, a table hashed by
4957 COMP_CODE and VAL could be implemented. */
4958 loc = asserts_for[SSA_NAME_VERSION (name)];
4959 last_loc = loc;
4960 while (loc)
4961 {
4962 if (loc->comp_code == comp_code
4963 && (loc->val == val
4964 || operand_equal_p (loc->val, val, 0))
4965 && (loc->expr == expr
4966 || operand_equal_p (loc->expr, expr, 0)))
4967 {
4968 /* If E is not a critical edge and DEST_BB
4969 dominates the existing location for the assertion, move
4970 the assertion up in the dominance tree by updating its
4971 location information. */
4972 if ((e == NULL || !EDGE_CRITICAL_P (e))
4973 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4974 {
4975 loc->bb = dest_bb;
4976 loc->e = e;
4977 loc->si = si;
4978 return;
4979 }
4980 }
4981
4982 /* Update the last node of the list and move to the next one. */
4983 last_loc = loc;
4984 loc = loc->next;
4985 }
4986
4987 /* If we didn't find an assertion already registered for
4988 NAME COMP_CODE VAL, add a new one at the end of the list of
4989 assertions associated with NAME. */
4990 n = XNEW (struct assert_locus);
4991 n->bb = dest_bb;
4992 n->e = e;
4993 n->si = si;
4994 n->comp_code = comp_code;
4995 n->val = val;
4996 n->expr = expr;
4997 n->next = NULL;
4998
4999 if (last_loc)
5000 last_loc->next = n;
5001 else
5002 asserts_for[SSA_NAME_VERSION (name)] = n;
5003
5004 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
5005 }
5006
5007 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
5008 Extract a suitable test code and value and store them into *CODE_P and
5009 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
5010
5011 If no extraction was possible, return FALSE, otherwise return TRUE.
5012
5013 If INVERT is true, then we invert the result stored into *CODE_P. */
5014
5015 static bool
5016 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
5017 tree cond_op0, tree cond_op1,
5018 bool invert, enum tree_code *code_p,
5019 tree *val_p)
5020 {
5021 enum tree_code comp_code;
5022 tree val;
5023
5024 /* Otherwise, we have a comparison of the form NAME COMP VAL
5025 or VAL COMP NAME. */
5026 if (name == cond_op1)
5027 {
5028 /* If the predicate is of the form VAL COMP NAME, flip
5029 COMP around because we need to register NAME as the
5030 first operand in the predicate. */
5031 comp_code = swap_tree_comparison (cond_code);
5032 val = cond_op0;
5033 }
5034 else
5035 {
5036 /* The comparison is of the form NAME COMP VAL, so the
5037 comparison code remains unchanged. */
5038 comp_code = cond_code;
5039 val = cond_op1;
5040 }
5041
5042 /* Invert the comparison code as necessary. */
5043 if (invert)
5044 comp_code = invert_tree_comparison (comp_code, 0);
5045
5046 /* VRP does not handle float types. */
5047 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
5048 return false;
5049
5050 /* Do not register always-false predicates.
5051 FIXME: this works around a limitation in fold() when dealing with
5052 enumerations. Given 'enum { N1, N2 } x;', fold will not
5053 fold 'if (x > N2)' to 'if (0)'. */
5054 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
5055 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
5056 {
5057 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
5058 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
5059
5060 if (comp_code == GT_EXPR
5061 && (!max
5062 || compare_values (val, max) == 0))
5063 return false;
5064
5065 if (comp_code == LT_EXPR
5066 && (!min
5067 || compare_values (val, min) == 0))
5068 return false;
5069 }
5070 *code_p = comp_code;
5071 *val_p = val;
5072 return true;
5073 }
5074
5075 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
5076 (otherwise return VAL). VAL and MASK must be zero-extended for
5077 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
5078 (to transform signed values into unsigned) and at the end xor
5079 SGNBIT back. */
5080
5081 static wide_int
5082 masked_increment (const wide_int &val_in, const wide_int &mask,
5083 const wide_int &sgnbit, unsigned int prec)
5084 {
5085 wide_int bit = wi::one (prec), res;
5086 unsigned int i;
5087
5088 wide_int val = val_in ^ sgnbit;
5089 for (i = 0; i < prec; i++, bit += bit)
5090 {
5091 res = mask;
5092 if ((res & bit) == 0)
5093 continue;
5094 res = bit - 1;
5095 res = (val + bit).and_not (res);
5096 res &= mask;
5097 if (wi::gtu_p (res, val))
5098 return res ^ sgnbit;
5099 }
5100 return val ^ sgnbit;
5101 }
5102
5103 /* Try to register an edge assertion for SSA name NAME on edge E for
5104 the condition COND contributing to the conditional jump pointed to by BSI.
5105 Invert the condition COND if INVERT is true. */
5106
5107 static void
5108 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
5109 enum tree_code cond_code,
5110 tree cond_op0, tree cond_op1, bool invert)
5111 {
5112 tree val;
5113 enum tree_code comp_code;
5114
5115 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5116 cond_op0,
5117 cond_op1,
5118 invert, &comp_code, &val))
5119 return;
5120
5121 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5122 reachable from E. */
5123 if (live_on_edge (e, name)
5124 && !has_single_use (name))
5125 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
5126
5127 /* In the case of NAME <= CST and NAME being defined as
5128 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5129 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
5130 This catches range and anti-range tests. */
5131 if ((comp_code == LE_EXPR
5132 || comp_code == GT_EXPR)
5133 && TREE_CODE (val) == INTEGER_CST
5134 && TYPE_UNSIGNED (TREE_TYPE (val)))
5135 {
5136 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5137 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
5138
5139 /* Extract CST2 from the (optional) addition. */
5140 if (is_gimple_assign (def_stmt)
5141 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
5142 {
5143 name2 = gimple_assign_rhs1 (def_stmt);
5144 cst2 = gimple_assign_rhs2 (def_stmt);
5145 if (TREE_CODE (name2) == SSA_NAME
5146 && TREE_CODE (cst2) == INTEGER_CST)
5147 def_stmt = SSA_NAME_DEF_STMT (name2);
5148 }
5149
5150 /* Extract NAME2 from the (optional) sign-changing cast. */
5151 if (gimple_assign_cast_p (def_stmt))
5152 {
5153 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
5154 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5155 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5156 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5157 name3 = gimple_assign_rhs1 (def_stmt);
5158 }
5159
5160 /* If name3 is used later, create an ASSERT_EXPR for it. */
5161 if (name3 != NULL_TREE
5162 && TREE_CODE (name3) == SSA_NAME
5163 && (cst2 == NULL_TREE
5164 || TREE_CODE (cst2) == INTEGER_CST)
5165 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
5166 && live_on_edge (e, name3)
5167 && !has_single_use (name3))
5168 {
5169 tree tmp;
5170
5171 /* Build an expression for the range test. */
5172 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5173 if (cst2 != NULL_TREE)
5174 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5175
5176 if (dump_file)
5177 {
5178 fprintf (dump_file, "Adding assert for ");
5179 print_generic_expr (dump_file, name3, 0);
5180 fprintf (dump_file, " from ");
5181 print_generic_expr (dump_file, tmp, 0);
5182 fprintf (dump_file, "\n");
5183 }
5184
5185 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
5186 }
5187
5188 /* If name2 is used later, create an ASSERT_EXPR for it. */
5189 if (name2 != NULL_TREE
5190 && TREE_CODE (name2) == SSA_NAME
5191 && TREE_CODE (cst2) == INTEGER_CST
5192 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5193 && live_on_edge (e, name2)
5194 && !has_single_use (name2))
5195 {
5196 tree tmp;
5197
5198 /* Build an expression for the range test. */
5199 tmp = name2;
5200 if (TREE_TYPE (name) != TREE_TYPE (name2))
5201 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5202 if (cst2 != NULL_TREE)
5203 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5204
5205 if (dump_file)
5206 {
5207 fprintf (dump_file, "Adding assert for ");
5208 print_generic_expr (dump_file, name2, 0);
5209 fprintf (dump_file, " from ");
5210 print_generic_expr (dump_file, tmp, 0);
5211 fprintf (dump_file, "\n");
5212 }
5213
5214 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
5215 }
5216 }
5217
5218 /* In the case of post-in/decrement tests like if (i++) ... and uses
5219 of the in/decremented value on the edge the extra name we want to
5220 assert for is not on the def chain of the name compared. Instead
5221 it is in the set of use stmts.
5222 Similar cases happen for conversions that were simplified through
5223 fold_{sign_changed,widened}_comparison. */
5224 if ((comp_code == NE_EXPR
5225 || comp_code == EQ_EXPR)
5226 && TREE_CODE (val) == INTEGER_CST)
5227 {
5228 imm_use_iterator ui;
5229 gimple *use_stmt;
5230 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5231 {
5232 if (!is_gimple_assign (use_stmt))
5233 continue;
5234
5235 /* Cut off to use-stmts that are dominating the predecessor. */
5236 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
5237 continue;
5238
5239 tree name2 = gimple_assign_lhs (use_stmt);
5240 if (TREE_CODE (name2) != SSA_NAME
5241 || !live_on_edge (e, name2))
5242 continue;
5243
5244 enum tree_code code = gimple_assign_rhs_code (use_stmt);
5245 tree cst;
5246 if (code == PLUS_EXPR
5247 || code == MINUS_EXPR)
5248 {
5249 cst = gimple_assign_rhs2 (use_stmt);
5250 if (TREE_CODE (cst) != INTEGER_CST)
5251 continue;
5252 cst = int_const_binop (code, val, cst);
5253 }
5254 else if (CONVERT_EXPR_CODE_P (code))
5255 {
5256 /* For truncating conversions we cannot record
5257 an inequality. */
5258 if (comp_code == NE_EXPR
5259 && (TYPE_PRECISION (TREE_TYPE (name2))
5260 < TYPE_PRECISION (TREE_TYPE (name))))
5261 continue;
5262 cst = fold_convert (TREE_TYPE (name2), val);
5263 }
5264 else
5265 continue;
5266
5267 if (TREE_OVERFLOW_P (cst))
5268 cst = drop_tree_overflow (cst);
5269 register_new_assert_for (name2, name2, comp_code, cst,
5270 NULL, e, bsi);
5271 }
5272 }
5273
5274 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5275 && TREE_CODE (val) == INTEGER_CST)
5276 {
5277 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5278 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5279 tree val2 = NULL_TREE;
5280 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5281 wide_int mask = wi::zero (prec);
5282 unsigned int nprec = prec;
5283 enum tree_code rhs_code = ERROR_MARK;
5284
5285 if (is_gimple_assign (def_stmt))
5286 rhs_code = gimple_assign_rhs_code (def_stmt);
5287
5288 /* Add asserts for NAME cmp CST and NAME being defined
5289 as NAME = (int) NAME2. */
5290 if (!TYPE_UNSIGNED (TREE_TYPE (val))
5291 && (comp_code == LE_EXPR || comp_code == LT_EXPR
5292 || comp_code == GT_EXPR || comp_code == GE_EXPR)
5293 && gimple_assign_cast_p (def_stmt))
5294 {
5295 name2 = gimple_assign_rhs1 (def_stmt);
5296 if (CONVERT_EXPR_CODE_P (rhs_code)
5297 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5298 && TYPE_UNSIGNED (TREE_TYPE (name2))
5299 && prec == TYPE_PRECISION (TREE_TYPE (name2))
5300 && (comp_code == LE_EXPR || comp_code == GT_EXPR
5301 || !tree_int_cst_equal (val,
5302 TYPE_MIN_VALUE (TREE_TYPE (val))))
5303 && live_on_edge (e, name2)
5304 && !has_single_use (name2))
5305 {
5306 tree tmp, cst;
5307 enum tree_code new_comp_code = comp_code;
5308
5309 cst = fold_convert (TREE_TYPE (name2),
5310 TYPE_MIN_VALUE (TREE_TYPE (val)));
5311 /* Build an expression for the range test. */
5312 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5313 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5314 fold_convert (TREE_TYPE (name2), val));
5315 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5316 {
5317 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5318 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5319 build_int_cst (TREE_TYPE (name2), 1));
5320 }
5321
5322 if (dump_file)
5323 {
5324 fprintf (dump_file, "Adding assert for ");
5325 print_generic_expr (dump_file, name2, 0);
5326 fprintf (dump_file, " from ");
5327 print_generic_expr (dump_file, tmp, 0);
5328 fprintf (dump_file, "\n");
5329 }
5330
5331 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5332 e, bsi);
5333 }
5334 }
5335
5336 /* Add asserts for NAME cmp CST and NAME being defined as
5337 NAME = NAME2 >> CST2.
5338
5339 Extract CST2 from the right shift. */
5340 if (rhs_code == RSHIFT_EXPR)
5341 {
5342 name2 = gimple_assign_rhs1 (def_stmt);
5343 cst2 = gimple_assign_rhs2 (def_stmt);
5344 if (TREE_CODE (name2) == SSA_NAME
5345 && tree_fits_uhwi_p (cst2)
5346 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5347 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
5348 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5349 && live_on_edge (e, name2)
5350 && !has_single_use (name2))
5351 {
5352 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
5353 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5354 }
5355 }
5356 if (val2 != NULL_TREE
5357 && TREE_CODE (val2) == INTEGER_CST
5358 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5359 TREE_TYPE (val),
5360 val2, cst2), val))
5361 {
5362 enum tree_code new_comp_code = comp_code;
5363 tree tmp, new_val;
5364
5365 tmp = name2;
5366 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5367 {
5368 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5369 {
5370 tree type = build_nonstandard_integer_type (prec, 1);
5371 tmp = build1 (NOP_EXPR, type, name2);
5372 val2 = fold_convert (type, val2);
5373 }
5374 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5375 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5376 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5377 }
5378 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5379 {
5380 wide_int minval
5381 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5382 new_val = val2;
5383 if (minval == new_val)
5384 new_val = NULL_TREE;
5385 }
5386 else
5387 {
5388 wide_int maxval
5389 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5390 mask |= val2;
5391 if (mask == maxval)
5392 new_val = NULL_TREE;
5393 else
5394 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5395 }
5396
5397 if (new_val)
5398 {
5399 if (dump_file)
5400 {
5401 fprintf (dump_file, "Adding assert for ");
5402 print_generic_expr (dump_file, name2, 0);
5403 fprintf (dump_file, " from ");
5404 print_generic_expr (dump_file, tmp, 0);
5405 fprintf (dump_file, "\n");
5406 }
5407
5408 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5409 NULL, e, bsi);
5410 }
5411 }
5412
5413 /* Add asserts for NAME cmp CST and NAME being defined as
5414 NAME = NAME2 & CST2.
5415
5416 Extract CST2 from the and.
5417
5418 Also handle
5419 NAME = (unsigned) NAME2;
5420 casts where NAME's type is unsigned and has smaller precision
5421 than NAME2's type as if it was NAME = NAME2 & MASK. */
5422 names[0] = NULL_TREE;
5423 names[1] = NULL_TREE;
5424 cst2 = NULL_TREE;
5425 if (rhs_code == BIT_AND_EXPR
5426 || (CONVERT_EXPR_CODE_P (rhs_code)
5427 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
5428 && TYPE_UNSIGNED (TREE_TYPE (val))
5429 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5430 > prec))
5431 {
5432 name2 = gimple_assign_rhs1 (def_stmt);
5433 if (rhs_code == BIT_AND_EXPR)
5434 cst2 = gimple_assign_rhs2 (def_stmt);
5435 else
5436 {
5437 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5438 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5439 }
5440 if (TREE_CODE (name2) == SSA_NAME
5441 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5442 && TREE_CODE (cst2) == INTEGER_CST
5443 && !integer_zerop (cst2)
5444 && (nprec > 1
5445 || TYPE_UNSIGNED (TREE_TYPE (val))))
5446 {
5447 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
5448 if (gimple_assign_cast_p (def_stmt2))
5449 {
5450 names[1] = gimple_assign_rhs1 (def_stmt2);
5451 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5452 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5453 || (TYPE_PRECISION (TREE_TYPE (name2))
5454 != TYPE_PRECISION (TREE_TYPE (names[1])))
5455 || !live_on_edge (e, names[1])
5456 || has_single_use (names[1]))
5457 names[1] = NULL_TREE;
5458 }
5459 if (live_on_edge (e, name2)
5460 && !has_single_use (name2))
5461 names[0] = name2;
5462 }
5463 }
5464 if (names[0] || names[1])
5465 {
5466 wide_int minv, maxv, valv, cst2v;
5467 wide_int tem, sgnbit;
5468 bool valid_p = false, valn, cst2n;
5469 enum tree_code ccode = comp_code;
5470
5471 valv = wide_int::from (val, nprec, UNSIGNED);
5472 cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5473 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5474 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
5475 /* If CST2 doesn't have most significant bit set,
5476 but VAL is negative, we have comparison like
5477 if ((x & 0x123) > -4) (always true). Just give up. */
5478 if (!cst2n && valn)
5479 ccode = ERROR_MARK;
5480 if (cst2n)
5481 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5482 else
5483 sgnbit = wi::zero (nprec);
5484 minv = valv & cst2v;
5485 switch (ccode)
5486 {
5487 case EQ_EXPR:
5488 /* Minimum unsigned value for equality is VAL & CST2
5489 (should be equal to VAL, otherwise we probably should
5490 have folded the comparison into false) and
5491 maximum unsigned value is VAL | ~CST2. */
5492 maxv = valv | ~cst2v;
5493 valid_p = true;
5494 break;
5495
5496 case NE_EXPR:
5497 tem = valv | ~cst2v;
5498 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5499 if (valv == 0)
5500 {
5501 cst2n = false;
5502 sgnbit = wi::zero (nprec);
5503 goto gt_expr;
5504 }
5505 /* If (VAL | ~CST2) is all ones, handle it as
5506 (X & CST2) < VAL. */
5507 if (tem == -1)
5508 {
5509 cst2n = false;
5510 valn = false;
5511 sgnbit = wi::zero (nprec);
5512 goto lt_expr;
5513 }
5514 if (!cst2n && wi::neg_p (cst2v))
5515 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5516 if (sgnbit != 0)
5517 {
5518 if (valv == sgnbit)
5519 {
5520 cst2n = true;
5521 valn = true;
5522 goto gt_expr;
5523 }
5524 if (tem == wi::mask (nprec - 1, false, nprec))
5525 {
5526 cst2n = true;
5527 goto lt_expr;
5528 }
5529 if (!cst2n)
5530 sgnbit = wi::zero (nprec);
5531 }
5532 break;
5533
5534 case GE_EXPR:
5535 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5536 is VAL and maximum unsigned value is ~0. For signed
5537 comparison, if CST2 doesn't have most significant bit
5538 set, handle it similarly. If CST2 has MSB set,
5539 the minimum is the same, and maximum is ~0U/2. */
5540 if (minv != valv)
5541 {
5542 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5543 VAL. */
5544 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5545 if (minv == valv)
5546 break;
5547 }
5548 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5549 valid_p = true;
5550 break;
5551
5552 case GT_EXPR:
5553 gt_expr:
5554 /* Find out smallest MINV where MINV > VAL
5555 && (MINV & CST2) == MINV, if any. If VAL is signed and
5556 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5557 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5558 if (minv == valv)
5559 break;
5560 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5561 valid_p = true;
5562 break;
5563
5564 case LE_EXPR:
5565 /* Minimum unsigned value for <= is 0 and maximum
5566 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5567 Otherwise, find smallest VAL2 where VAL2 > VAL
5568 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5569 as maximum.
5570 For signed comparison, if CST2 doesn't have most
5571 significant bit set, handle it similarly. If CST2 has
5572 MSB set, the maximum is the same and minimum is INT_MIN. */
5573 if (minv == valv)
5574 maxv = valv;
5575 else
5576 {
5577 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5578 if (maxv == valv)
5579 break;
5580 maxv -= 1;
5581 }
5582 maxv |= ~cst2v;
5583 minv = sgnbit;
5584 valid_p = true;
5585 break;
5586
5587 case LT_EXPR:
5588 lt_expr:
5589 /* Minimum unsigned value for < is 0 and maximum
5590 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5591 Otherwise, find smallest VAL2 where VAL2 > VAL
5592 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5593 as maximum.
5594 For signed comparison, if CST2 doesn't have most
5595 significant bit set, handle it similarly. If CST2 has
5596 MSB set, the maximum is the same and minimum is INT_MIN. */
5597 if (minv == valv)
5598 {
5599 if (valv == sgnbit)
5600 break;
5601 maxv = valv;
5602 }
5603 else
5604 {
5605 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5606 if (maxv == valv)
5607 break;
5608 }
5609 maxv -= 1;
5610 maxv |= ~cst2v;
5611 minv = sgnbit;
5612 valid_p = true;
5613 break;
5614
5615 default:
5616 break;
5617 }
5618 if (valid_p
5619 && (maxv - minv) != -1)
5620 {
5621 tree tmp, new_val, type;
5622 int i;
5623
5624 for (i = 0; i < 2; i++)
5625 if (names[i])
5626 {
5627 wide_int maxv2 = maxv;
5628 tmp = names[i];
5629 type = TREE_TYPE (names[i]);
5630 if (!TYPE_UNSIGNED (type))
5631 {
5632 type = build_nonstandard_integer_type (nprec, 1);
5633 tmp = build1 (NOP_EXPR, type, names[i]);
5634 }
5635 if (minv != 0)
5636 {
5637 tmp = build2 (PLUS_EXPR, type, tmp,
5638 wide_int_to_tree (type, -minv));
5639 maxv2 = maxv - minv;
5640 }
5641 new_val = wide_int_to_tree (type, maxv2);
5642
5643 if (dump_file)
5644 {
5645 fprintf (dump_file, "Adding assert for ");
5646 print_generic_expr (dump_file, names[i], 0);
5647 fprintf (dump_file, " from ");
5648 print_generic_expr (dump_file, tmp, 0);
5649 fprintf (dump_file, "\n");
5650 }
5651
5652 register_new_assert_for (names[i], tmp, LE_EXPR,
5653 new_val, NULL, e, bsi);
5654 }
5655 }
5656 }
5657 }
5658 }
5659
5660 /* OP is an operand of a truth value expression which is known to have
5661 a particular value. Register any asserts for OP and for any
5662 operands in OP's defining statement.
5663
5664 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5665 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5666
5667 static void
5668 register_edge_assert_for_1 (tree op, enum tree_code code,
5669 edge e, gimple_stmt_iterator bsi)
5670 {
5671 gimple *op_def;
5672 tree val;
5673 enum tree_code rhs_code;
5674
5675 /* We only care about SSA_NAMEs. */
5676 if (TREE_CODE (op) != SSA_NAME)
5677 return;
5678
5679 /* We know that OP will have a zero or nonzero value. If OP is used
5680 more than once go ahead and register an assert for OP. */
5681 if (live_on_edge (e, op)
5682 && !has_single_use (op))
5683 {
5684 val = build_int_cst (TREE_TYPE (op), 0);
5685 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5686 }
5687
5688 /* Now look at how OP is set. If it's set from a comparison,
5689 a truth operation or some bit operations, then we may be able
5690 to register information about the operands of that assignment. */
5691 op_def = SSA_NAME_DEF_STMT (op);
5692 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5693 return;
5694
5695 rhs_code = gimple_assign_rhs_code (op_def);
5696
5697 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5698 {
5699 bool invert = (code == EQ_EXPR ? true : false);
5700 tree op0 = gimple_assign_rhs1 (op_def);
5701 tree op1 = gimple_assign_rhs2 (op_def);
5702
5703 if (TREE_CODE (op0) == SSA_NAME)
5704 register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert);
5705 if (TREE_CODE (op1) == SSA_NAME)
5706 register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert);
5707 }
5708 else if ((code == NE_EXPR
5709 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5710 || (code == EQ_EXPR
5711 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5712 {
5713 /* Recurse on each operand. */
5714 tree op0 = gimple_assign_rhs1 (op_def);
5715 tree op1 = gimple_assign_rhs2 (op_def);
5716 if (TREE_CODE (op0) == SSA_NAME
5717 && has_single_use (op0))
5718 register_edge_assert_for_1 (op0, code, e, bsi);
5719 if (TREE_CODE (op1) == SSA_NAME
5720 && has_single_use (op1))
5721 register_edge_assert_for_1 (op1, code, e, bsi);
5722 }
5723 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5724 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5725 {
5726 /* Recurse, flipping CODE. */
5727 code = invert_tree_comparison (code, false);
5728 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5729 }
5730 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5731 {
5732 /* Recurse through the copy. */
5733 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5734 }
5735 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5736 {
5737 /* Recurse through the type conversion, unless it is a narrowing
5738 conversion or conversion from non-integral type. */
5739 tree rhs = gimple_assign_rhs1 (op_def);
5740 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5741 && (TYPE_PRECISION (TREE_TYPE (rhs))
5742 <= TYPE_PRECISION (TREE_TYPE (op))))
5743 register_edge_assert_for_1 (rhs, code, e, bsi);
5744 }
5745 }
5746
5747 /* Try to register an edge assertion for SSA name NAME on edge E for
5748 the condition COND contributing to the conditional jump pointed to by
5749 SI. */
5750
5751 static void
5752 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5753 enum tree_code cond_code, tree cond_op0,
5754 tree cond_op1)
5755 {
5756 tree val;
5757 enum tree_code comp_code;
5758 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5759
5760 /* Do not attempt to infer anything in names that flow through
5761 abnormal edges. */
5762 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5763 return;
5764
5765 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5766 cond_op0, cond_op1,
5767 is_else_edge,
5768 &comp_code, &val))
5769 return;
5770
5771 /* Register ASSERT_EXPRs for name. */
5772 register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5773 cond_op1, is_else_edge);
5774
5775
5776 /* If COND is effectively an equality test of an SSA_NAME against
5777 the value zero or one, then we may be able to assert values
5778 for SSA_NAMEs which flow into COND. */
5779
5780 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5781 statement of NAME we can assert both operands of the BIT_AND_EXPR
5782 have nonzero value. */
5783 if (((comp_code == EQ_EXPR && integer_onep (val))
5784 || (comp_code == NE_EXPR && integer_zerop (val))))
5785 {
5786 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5787
5788 if (is_gimple_assign (def_stmt)
5789 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5790 {
5791 tree op0 = gimple_assign_rhs1 (def_stmt);
5792 tree op1 = gimple_assign_rhs2 (def_stmt);
5793 register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5794 register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5795 }
5796 }
5797
5798 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5799 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5800 have zero value. */
5801 if (((comp_code == EQ_EXPR && integer_zerop (val))
5802 || (comp_code == NE_EXPR && integer_onep (val))))
5803 {
5804 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5805
5806 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5807 necessarily zero value, or if type-precision is one. */
5808 if (is_gimple_assign (def_stmt)
5809 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5810 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5811 || comp_code == EQ_EXPR)))
5812 {
5813 tree op0 = gimple_assign_rhs1 (def_stmt);
5814 tree op1 = gimple_assign_rhs2 (def_stmt);
5815 register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5816 register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5817 }
5818 }
5819 }
5820
5821
5822 /* Determine whether the outgoing edges of BB should receive an
5823 ASSERT_EXPR for each of the operands of BB's LAST statement.
5824 The last statement of BB must be a COND_EXPR.
5825
5826 If any of the sub-graphs rooted at BB have an interesting use of
5827 the predicate operands, an assert location node is added to the
5828 list of assertions for the corresponding operands. */
5829
5830 static void
5831 find_conditional_asserts (basic_block bb, gcond *last)
5832 {
5833 gimple_stmt_iterator bsi;
5834 tree op;
5835 edge_iterator ei;
5836 edge e;
5837 ssa_op_iter iter;
5838
5839 bsi = gsi_for_stmt (last);
5840
5841 /* Look for uses of the operands in each of the sub-graphs
5842 rooted at BB. We need to check each of the outgoing edges
5843 separately, so that we know what kind of ASSERT_EXPR to
5844 insert. */
5845 FOR_EACH_EDGE (e, ei, bb->succs)
5846 {
5847 if (e->dest == bb)
5848 continue;
5849
5850 /* Register the necessary assertions for each operand in the
5851 conditional predicate. */
5852 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5853 register_edge_assert_for (op, e, bsi,
5854 gimple_cond_code (last),
5855 gimple_cond_lhs (last),
5856 gimple_cond_rhs (last));
5857 }
5858 }
5859
5860 struct case_info
5861 {
5862 tree expr;
5863 basic_block bb;
5864 };
5865
5866 /* Compare two case labels sorting first by the destination bb index
5867 and then by the case value. */
5868
5869 static int
5870 compare_case_labels (const void *p1, const void *p2)
5871 {
5872 const struct case_info *ci1 = (const struct case_info *) p1;
5873 const struct case_info *ci2 = (const struct case_info *) p2;
5874 int idx1 = ci1->bb->index;
5875 int idx2 = ci2->bb->index;
5876
5877 if (idx1 < idx2)
5878 return -1;
5879 else if (idx1 == idx2)
5880 {
5881 /* Make sure the default label is first in a group. */
5882 if (!CASE_LOW (ci1->expr))
5883 return -1;
5884 else if (!CASE_LOW (ci2->expr))
5885 return 1;
5886 else
5887 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5888 CASE_LOW (ci2->expr));
5889 }
5890 else
5891 return 1;
5892 }
5893
5894 /* Determine whether the outgoing edges of BB should receive an
5895 ASSERT_EXPR for each of the operands of BB's LAST statement.
5896 The last statement of BB must be a SWITCH_EXPR.
5897
5898 If any of the sub-graphs rooted at BB have an interesting use of
5899 the predicate operands, an assert location node is added to the
5900 list of assertions for the corresponding operands. */
5901
5902 static void
5903 find_switch_asserts (basic_block bb, gswitch *last)
5904 {
5905 gimple_stmt_iterator bsi;
5906 tree op;
5907 edge e;
5908 struct case_info *ci;
5909 size_t n = gimple_switch_num_labels (last);
5910 #if GCC_VERSION >= 4000
5911 unsigned int idx;
5912 #else
5913 /* Work around GCC 3.4 bug (PR 37086). */
5914 volatile unsigned int idx;
5915 #endif
5916
5917 bsi = gsi_for_stmt (last);
5918 op = gimple_switch_index (last);
5919 if (TREE_CODE (op) != SSA_NAME)
5920 return;
5921
5922 /* Build a vector of case labels sorted by destination label. */
5923 ci = XNEWVEC (struct case_info, n);
5924 for (idx = 0; idx < n; ++idx)
5925 {
5926 ci[idx].expr = gimple_switch_label (last, idx);
5927 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5928 }
5929 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5930
5931 for (idx = 0; idx < n; ++idx)
5932 {
5933 tree min, max;
5934 tree cl = ci[idx].expr;
5935 basic_block cbb = ci[idx].bb;
5936
5937 min = CASE_LOW (cl);
5938 max = CASE_HIGH (cl);
5939
5940 /* If there are multiple case labels with the same destination
5941 we need to combine them to a single value range for the edge. */
5942 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5943 {
5944 /* Skip labels until the last of the group. */
5945 do {
5946 ++idx;
5947 } while (idx < n && cbb == ci[idx].bb);
5948 --idx;
5949
5950 /* Pick up the maximum of the case label range. */
5951 if (CASE_HIGH (ci[idx].expr))
5952 max = CASE_HIGH (ci[idx].expr);
5953 else
5954 max = CASE_LOW (ci[idx].expr);
5955 }
5956
5957 /* Nothing to do if the range includes the default label until we
5958 can register anti-ranges. */
5959 if (min == NULL_TREE)
5960 continue;
5961
5962 /* Find the edge to register the assert expr on. */
5963 e = find_edge (bb, cbb);
5964
5965 /* Register the necessary assertions for the operand in the
5966 SWITCH_EXPR. */
5967 register_edge_assert_for (op, e, bsi,
5968 max ? GE_EXPR : EQ_EXPR,
5969 op, fold_convert (TREE_TYPE (op), min));
5970 if (max)
5971 register_edge_assert_for (op, e, bsi, LE_EXPR, op,
5972 fold_convert (TREE_TYPE (op), max));
5973 }
5974
5975 XDELETEVEC (ci);
5976 }
5977
5978
5979 /* Traverse all the statements in block BB looking for statements that
5980 may generate useful assertions for the SSA names in their operand.
5981 If a statement produces a useful assertion A for name N_i, then the
5982 list of assertions already generated for N_i is scanned to
5983 determine if A is actually needed.
5984
5985 If N_i already had the assertion A at a location dominating the
5986 current location, then nothing needs to be done. Otherwise, the
5987 new location for A is recorded instead.
5988
5989 1- For every statement S in BB, all the variables used by S are
5990 added to bitmap FOUND_IN_SUBGRAPH.
5991
5992 2- If statement S uses an operand N in a way that exposes a known
5993 value range for N, then if N was not already generated by an
5994 ASSERT_EXPR, create a new assert location for N. For instance,
5995 if N is a pointer and the statement dereferences it, we can
5996 assume that N is not NULL.
5997
5998 3- COND_EXPRs are a special case of #2. We can derive range
5999 information from the predicate but need to insert different
6000 ASSERT_EXPRs for each of the sub-graphs rooted at the
6001 conditional block. If the last statement of BB is a conditional
6002 expression of the form 'X op Y', then
6003
6004 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6005
6006 b) If the conditional is the only entry point to the sub-graph
6007 corresponding to the THEN_CLAUSE, recurse into it. On
6008 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6009 an ASSERT_EXPR is added for the corresponding variable.
6010
6011 c) Repeat step (b) on the ELSE_CLAUSE.
6012
6013 d) Mark X and Y in FOUND_IN_SUBGRAPH.
6014
6015 For instance,
6016
6017 if (a == 9)
6018 b = a;
6019 else
6020 b = c + 1;
6021
6022 In this case, an assertion on the THEN clause is useful to
6023 determine that 'a' is always 9 on that edge. However, an assertion
6024 on the ELSE clause would be unnecessary.
6025
6026 4- If BB does not end in a conditional expression, then we recurse
6027 into BB's dominator children.
6028
6029 At the end of the recursive traversal, every SSA name will have a
6030 list of locations where ASSERT_EXPRs should be added. When a new
6031 location for name N is found, it is registered by calling
6032 register_new_assert_for. That function keeps track of all the
6033 registered assertions to prevent adding unnecessary assertions.
6034 For instance, if a pointer P_4 is dereferenced more than once in a
6035 dominator tree, only the location dominating all the dereference of
6036 P_4 will receive an ASSERT_EXPR. */
6037
6038 static void
6039 find_assert_locations_1 (basic_block bb, sbitmap live)
6040 {
6041 gimple *last;
6042
6043 last = last_stmt (bb);
6044
6045 /* If BB's last statement is a conditional statement involving integer
6046 operands, determine if we need to add ASSERT_EXPRs. */
6047 if (last
6048 && gimple_code (last) == GIMPLE_COND
6049 && !fp_predicate (last)
6050 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6051 find_conditional_asserts (bb, as_a <gcond *> (last));
6052
6053 /* If BB's last statement is a switch statement involving integer
6054 operands, determine if we need to add ASSERT_EXPRs. */
6055 if (last
6056 && gimple_code (last) == GIMPLE_SWITCH
6057 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6058 find_switch_asserts (bb, as_a <gswitch *> (last));
6059
6060 /* Traverse all the statements in BB marking used names and looking
6061 for statements that may infer assertions for their used operands. */
6062 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6063 gsi_prev (&si))
6064 {
6065 gimple *stmt;
6066 tree op;
6067 ssa_op_iter i;
6068
6069 stmt = gsi_stmt (si);
6070
6071 if (is_gimple_debug (stmt))
6072 continue;
6073
6074 /* See if we can derive an assertion for any of STMT's operands. */
6075 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6076 {
6077 tree value;
6078 enum tree_code comp_code;
6079
6080 /* If op is not live beyond this stmt, do not bother to insert
6081 asserts for it. */
6082 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
6083 continue;
6084
6085 /* If OP is used in such a way that we can infer a value
6086 range for it, and we don't find a previous assertion for
6087 it, create a new assertion location node for OP. */
6088 if (infer_value_range (stmt, op, &comp_code, &value))
6089 {
6090 /* If we are able to infer a nonzero value range for OP,
6091 then walk backwards through the use-def chain to see if OP
6092 was set via a typecast.
6093
6094 If so, then we can also infer a nonzero value range
6095 for the operand of the NOP_EXPR. */
6096 if (comp_code == NE_EXPR && integer_zerop (value))
6097 {
6098 tree t = op;
6099 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
6100
6101 while (is_gimple_assign (def_stmt)
6102 && CONVERT_EXPR_CODE_P
6103 (gimple_assign_rhs_code (def_stmt))
6104 && TREE_CODE
6105 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
6106 && POINTER_TYPE_P
6107 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
6108 {
6109 t = gimple_assign_rhs1 (def_stmt);
6110 def_stmt = SSA_NAME_DEF_STMT (t);
6111
6112 /* Note we want to register the assert for the
6113 operand of the NOP_EXPR after SI, not after the
6114 conversion. */
6115 if (! has_single_use (t))
6116 register_new_assert_for (t, t, comp_code, value,
6117 bb, NULL, si);
6118 }
6119 }
6120
6121 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
6122 }
6123 }
6124
6125 /* Update live. */
6126 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6127 bitmap_set_bit (live, SSA_NAME_VERSION (op));
6128 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
6129 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
6130 }
6131
6132 /* Traverse all PHI nodes in BB, updating live. */
6133 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6134 gsi_next (&si))
6135 {
6136 use_operand_p arg_p;
6137 ssa_op_iter i;
6138 gphi *phi = si.phi ();
6139 tree res = gimple_phi_result (phi);
6140
6141 if (virtual_operand_p (res))
6142 continue;
6143
6144 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6145 {
6146 tree arg = USE_FROM_PTR (arg_p);
6147 if (TREE_CODE (arg) == SSA_NAME)
6148 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
6149 }
6150
6151 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
6152 }
6153 }
6154
6155 /* Do an RPO walk over the function computing SSA name liveness
6156 on-the-fly and deciding on assert expressions to insert. */
6157
6158 static void
6159 find_assert_locations (void)
6160 {
6161 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6162 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6163 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6164 int rpo_cnt, i;
6165
6166 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
6167 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6168 for (i = 0; i < rpo_cnt; ++i)
6169 bb_rpo[rpo[i]] = i;
6170
6171 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
6172 the order we compute liveness and insert asserts we otherwise
6173 fail to insert asserts into the loop latch. */
6174 loop_p loop;
6175 FOR_EACH_LOOP (loop, 0)
6176 {
6177 i = loop->latch->index;
6178 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
6179 for (gphi_iterator gsi = gsi_start_phis (loop->header);
6180 !gsi_end_p (gsi); gsi_next (&gsi))
6181 {
6182 gphi *phi = gsi.phi ();
6183 if (virtual_operand_p (gimple_phi_result (phi)))
6184 continue;
6185 tree arg = gimple_phi_arg_def (phi, j);
6186 if (TREE_CODE (arg) == SSA_NAME)
6187 {
6188 if (live[i] == NULL)
6189 {
6190 live[i] = sbitmap_alloc (num_ssa_names);
6191 bitmap_clear (live[i]);
6192 }
6193 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6194 }
6195 }
6196 }
6197
6198 for (i = rpo_cnt - 1; i >= 0; --i)
6199 {
6200 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
6201 edge e;
6202 edge_iterator ei;
6203
6204 if (!live[rpo[i]])
6205 {
6206 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
6207 bitmap_clear (live[rpo[i]]);
6208 }
6209
6210 /* Process BB and update the live information with uses in
6211 this block. */
6212 find_assert_locations_1 (bb, live[rpo[i]]);
6213
6214 /* Merge liveness into the predecessor blocks and free it. */
6215 if (!bitmap_empty_p (live[rpo[i]]))
6216 {
6217 int pred_rpo = i;
6218 FOR_EACH_EDGE (e, ei, bb->preds)
6219 {
6220 int pred = e->src->index;
6221 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
6222 continue;
6223
6224 if (!live[pred])
6225 {
6226 live[pred] = sbitmap_alloc (num_ssa_names);
6227 bitmap_clear (live[pred]);
6228 }
6229 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
6230
6231 if (bb_rpo[pred] < pred_rpo)
6232 pred_rpo = bb_rpo[pred];
6233 }
6234
6235 /* Record the RPO number of the last visited block that needs
6236 live information from this block. */
6237 last_rpo[rpo[i]] = pred_rpo;
6238 }
6239 else
6240 {
6241 sbitmap_free (live[rpo[i]]);
6242 live[rpo[i]] = NULL;
6243 }
6244
6245 /* We can free all successors live bitmaps if all their
6246 predecessors have been visited already. */
6247 FOR_EACH_EDGE (e, ei, bb->succs)
6248 if (last_rpo[e->dest->index] == i
6249 && live[e->dest->index])
6250 {
6251 sbitmap_free (live[e->dest->index]);
6252 live[e->dest->index] = NULL;
6253 }
6254 }
6255
6256 XDELETEVEC (rpo);
6257 XDELETEVEC (bb_rpo);
6258 XDELETEVEC (last_rpo);
6259 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
6260 if (live[i])
6261 sbitmap_free (live[i]);
6262 XDELETEVEC (live);
6263 }
6264
6265 /* Create an ASSERT_EXPR for NAME and insert it in the location
6266 indicated by LOC. Return true if we made any edge insertions. */
6267
6268 static bool
6269 process_assert_insertions_for (tree name, assert_locus *loc)
6270 {
6271 /* Build the comparison expression NAME_i COMP_CODE VAL. */
6272 gimple *stmt;
6273 tree cond;
6274 gimple *assert_stmt;
6275 edge_iterator ei;
6276 edge e;
6277
6278 /* If we have X <=> X do not insert an assert expr for that. */
6279 if (loc->expr == loc->val)
6280 return false;
6281
6282 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6283 assert_stmt = build_assert_expr_for (cond, name);
6284 if (loc->e)
6285 {
6286 /* We have been asked to insert the assertion on an edge. This
6287 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6288 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6289 || (gimple_code (gsi_stmt (loc->si))
6290 == GIMPLE_SWITCH));
6291
6292 gsi_insert_on_edge (loc->e, assert_stmt);
6293 return true;
6294 }
6295
6296 /* Otherwise, we can insert right after LOC->SI iff the
6297 statement must not be the last statement in the block. */
6298 stmt = gsi_stmt (loc->si);
6299 if (!stmt_ends_bb_p (stmt))
6300 {
6301 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6302 return false;
6303 }
6304
6305 /* If STMT must be the last statement in BB, we can only insert new
6306 assertions on the non-abnormal edge out of BB. Note that since
6307 STMT is not control flow, there may only be one non-abnormal edge
6308 out of BB. */
6309 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6310 if (!(e->flags & EDGE_ABNORMAL))
6311 {
6312 gsi_insert_on_edge (e, assert_stmt);
6313 return true;
6314 }
6315
6316 gcc_unreachable ();
6317 }
6318
6319
6320 /* Process all the insertions registered for every name N_i registered
6321 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6322 found in ASSERTS_FOR[i]. */
6323
6324 static void
6325 process_assert_insertions (void)
6326 {
6327 unsigned i;
6328 bitmap_iterator bi;
6329 bool update_edges_p = false;
6330 int num_asserts = 0;
6331
6332 if (dump_file && (dump_flags & TDF_DETAILS))
6333 dump_all_asserts (dump_file);
6334
6335 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6336 {
6337 assert_locus *loc = asserts_for[i];
6338 gcc_assert (loc);
6339
6340 while (loc)
6341 {
6342 assert_locus *next = loc->next;
6343 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6344 free (loc);
6345 loc = next;
6346 num_asserts++;
6347 }
6348 }
6349
6350 if (update_edges_p)
6351 gsi_commit_edge_inserts ();
6352
6353 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6354 num_asserts);
6355 }
6356
6357
6358 /* Traverse the flowgraph looking for conditional jumps to insert range
6359 expressions. These range expressions are meant to provide information
6360 to optimizations that need to reason in terms of value ranges. They
6361 will not be expanded into RTL. For instance, given:
6362
6363 x = ...
6364 y = ...
6365 if (x < y)
6366 y = x - 2;
6367 else
6368 x = y + 3;
6369
6370 this pass will transform the code into:
6371
6372 x = ...
6373 y = ...
6374 if (x < y)
6375 {
6376 x = ASSERT_EXPR <x, x < y>
6377 y = x - 2
6378 }
6379 else
6380 {
6381 y = ASSERT_EXPR <y, x >= y>
6382 x = y + 3
6383 }
6384
6385 The idea is that once copy and constant propagation have run, other
6386 optimizations will be able to determine what ranges of values can 'x'
6387 take in different paths of the code, simply by checking the reaching
6388 definition of 'x'. */
6389
6390 static void
6391 insert_range_assertions (void)
6392 {
6393 need_assert_for = BITMAP_ALLOC (NULL);
6394 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
6395
6396 calculate_dominance_info (CDI_DOMINATORS);
6397
6398 find_assert_locations ();
6399 if (!bitmap_empty_p (need_assert_for))
6400 {
6401 process_assert_insertions ();
6402 update_ssa (TODO_update_ssa_no_phi);
6403 }
6404
6405 if (dump_file && (dump_flags & TDF_DETAILS))
6406 {
6407 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6408 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6409 }
6410
6411 free (asserts_for);
6412 BITMAP_FREE (need_assert_for);
6413 }
6414
6415 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6416 and "struct" hacks. If VRP can determine that the
6417 array subscript is a constant, check if it is outside valid
6418 range. If the array subscript is a RANGE, warn if it is
6419 non-overlapping with valid range.
6420 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6421
6422 static void
6423 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6424 {
6425 value_range *vr = NULL;
6426 tree low_sub, up_sub;
6427 tree low_bound, up_bound, up_bound_p1;
6428 tree base;
6429
6430 if (TREE_NO_WARNING (ref))
6431 return;
6432
6433 low_sub = up_sub = TREE_OPERAND (ref, 1);
6434 up_bound = array_ref_up_bound (ref);
6435
6436 /* Can not check flexible arrays. */
6437 if (!up_bound
6438 || TREE_CODE (up_bound) != INTEGER_CST)
6439 return;
6440
6441 /* Accesses to trailing arrays via pointers may access storage
6442 beyond the types array bounds. */
6443 base = get_base_address (ref);
6444 if ((warn_array_bounds < 2)
6445 && base && TREE_CODE (base) == MEM_REF)
6446 {
6447 tree cref, next = NULL_TREE;
6448
6449 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
6450 return;
6451
6452 cref = TREE_OPERAND (ref, 0);
6453 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
6454 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
6455 next && TREE_CODE (next) != FIELD_DECL;
6456 next = DECL_CHAIN (next))
6457 ;
6458
6459 /* If this is the last field in a struct type or a field in a
6460 union type do not warn. */
6461 if (!next)
6462 return;
6463 }
6464
6465 low_bound = array_ref_low_bound (ref);
6466 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6467 build_int_cst (TREE_TYPE (up_bound), 1));
6468
6469 /* Empty array. */
6470 if (tree_int_cst_equal (low_bound, up_bound_p1))
6471 {
6472 warning_at (location, OPT_Warray_bounds,
6473 "array subscript is above array bounds");
6474 TREE_NO_WARNING (ref) = 1;
6475 }
6476
6477 if (TREE_CODE (low_sub) == SSA_NAME)
6478 {
6479 vr = get_value_range (low_sub);
6480 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6481 {
6482 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6483 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6484 }
6485 }
6486
6487 if (vr && vr->type == VR_ANTI_RANGE)
6488 {
6489 if (TREE_CODE (up_sub) == INTEGER_CST
6490 && (ignore_off_by_one
6491 ? tree_int_cst_lt (up_bound, up_sub)
6492 : tree_int_cst_le (up_bound, up_sub))
6493 && TREE_CODE (low_sub) == INTEGER_CST
6494 && tree_int_cst_le (low_sub, low_bound))
6495 {
6496 warning_at (location, OPT_Warray_bounds,
6497 "array subscript is outside array bounds");
6498 TREE_NO_WARNING (ref) = 1;
6499 }
6500 }
6501 else if (TREE_CODE (up_sub) == INTEGER_CST
6502 && (ignore_off_by_one
6503 ? !tree_int_cst_le (up_sub, up_bound_p1)
6504 : !tree_int_cst_le (up_sub, up_bound)))
6505 {
6506 if (dump_file && (dump_flags & TDF_DETAILS))
6507 {
6508 fprintf (dump_file, "Array bound warning for ");
6509 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6510 fprintf (dump_file, "\n");
6511 }
6512 warning_at (location, OPT_Warray_bounds,
6513 "array subscript is above array bounds");
6514 TREE_NO_WARNING (ref) = 1;
6515 }
6516 else if (TREE_CODE (low_sub) == INTEGER_CST
6517 && tree_int_cst_lt (low_sub, low_bound))
6518 {
6519 if (dump_file && (dump_flags & TDF_DETAILS))
6520 {
6521 fprintf (dump_file, "Array bound warning for ");
6522 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6523 fprintf (dump_file, "\n");
6524 }
6525 warning_at (location, OPT_Warray_bounds,
6526 "array subscript is below array bounds");
6527 TREE_NO_WARNING (ref) = 1;
6528 }
6529 }
6530
6531 /* Searches if the expr T, located at LOCATION computes
6532 address of an ARRAY_REF, and call check_array_ref on it. */
6533
6534 static void
6535 search_for_addr_array (tree t, location_t location)
6536 {
6537 /* Check each ARRAY_REFs in the reference chain. */
6538 do
6539 {
6540 if (TREE_CODE (t) == ARRAY_REF)
6541 check_array_ref (location, t, true /*ignore_off_by_one*/);
6542
6543 t = TREE_OPERAND (t, 0);
6544 }
6545 while (handled_component_p (t));
6546
6547 if (TREE_CODE (t) == MEM_REF
6548 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6549 && !TREE_NO_WARNING (t))
6550 {
6551 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6552 tree low_bound, up_bound, el_sz;
6553 offset_int idx;
6554 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6555 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6556 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6557 return;
6558
6559 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6560 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6561 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6562 if (!low_bound
6563 || TREE_CODE (low_bound) != INTEGER_CST
6564 || !up_bound
6565 || TREE_CODE (up_bound) != INTEGER_CST
6566 || !el_sz
6567 || TREE_CODE (el_sz) != INTEGER_CST)
6568 return;
6569
6570 idx = mem_ref_offset (t);
6571 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
6572 if (wi::lts_p (idx, 0))
6573 {
6574 if (dump_file && (dump_flags & TDF_DETAILS))
6575 {
6576 fprintf (dump_file, "Array bound warning for ");
6577 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6578 fprintf (dump_file, "\n");
6579 }
6580 warning_at (location, OPT_Warray_bounds,
6581 "array subscript is below array bounds");
6582 TREE_NO_WARNING (t) = 1;
6583 }
6584 else if (wi::gts_p (idx, (wi::to_offset (up_bound)
6585 - wi::to_offset (low_bound) + 1)))
6586 {
6587 if (dump_file && (dump_flags & TDF_DETAILS))
6588 {
6589 fprintf (dump_file, "Array bound warning for ");
6590 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6591 fprintf (dump_file, "\n");
6592 }
6593 warning_at (location, OPT_Warray_bounds,
6594 "array subscript is above array bounds");
6595 TREE_NO_WARNING (t) = 1;
6596 }
6597 }
6598 }
6599
6600 /* walk_tree() callback that checks if *TP is
6601 an ARRAY_REF inside an ADDR_EXPR (in which an array
6602 subscript one outside the valid range is allowed). Call
6603 check_array_ref for each ARRAY_REF found. The location is
6604 passed in DATA. */
6605
6606 static tree
6607 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6608 {
6609 tree t = *tp;
6610 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6611 location_t location;
6612
6613 if (EXPR_HAS_LOCATION (t))
6614 location = EXPR_LOCATION (t);
6615 else
6616 {
6617 location_t *locp = (location_t *) wi->info;
6618 location = *locp;
6619 }
6620
6621 *walk_subtree = TRUE;
6622
6623 if (TREE_CODE (t) == ARRAY_REF)
6624 check_array_ref (location, t, false /*ignore_off_by_one*/);
6625
6626 else if (TREE_CODE (t) == ADDR_EXPR)
6627 {
6628 search_for_addr_array (t, location);
6629 *walk_subtree = FALSE;
6630 }
6631
6632 return NULL_TREE;
6633 }
6634
6635 /* Walk over all statements of all reachable BBs and call check_array_bounds
6636 on them. */
6637
6638 static void
6639 check_all_array_refs (void)
6640 {
6641 basic_block bb;
6642 gimple_stmt_iterator si;
6643
6644 FOR_EACH_BB_FN (bb, cfun)
6645 {
6646 edge_iterator ei;
6647 edge e;
6648 bool executable = false;
6649
6650 /* Skip blocks that were found to be unreachable. */
6651 FOR_EACH_EDGE (e, ei, bb->preds)
6652 executable |= !!(e->flags & EDGE_EXECUTABLE);
6653 if (!executable)
6654 continue;
6655
6656 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6657 {
6658 gimple *stmt = gsi_stmt (si);
6659 struct walk_stmt_info wi;
6660 if (!gimple_has_location (stmt)
6661 || is_gimple_debug (stmt))
6662 continue;
6663
6664 memset (&wi, 0, sizeof (wi));
6665
6666 location_t loc = gimple_location (stmt);
6667 wi.info = &loc;
6668
6669 walk_gimple_op (gsi_stmt (si),
6670 check_array_bounds,
6671 &wi);
6672 }
6673 }
6674 }
6675
6676 /* Return true if all imm uses of VAR are either in STMT, or
6677 feed (optionally through a chain of single imm uses) GIMPLE_COND
6678 in basic block COND_BB. */
6679
6680 static bool
6681 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
6682 {
6683 use_operand_p use_p, use2_p;
6684 imm_use_iterator iter;
6685
6686 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
6687 if (USE_STMT (use_p) != stmt)
6688 {
6689 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
6690 if (is_gimple_debug (use_stmt))
6691 continue;
6692 while (is_gimple_assign (use_stmt)
6693 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
6694 && single_imm_use (gimple_assign_lhs (use_stmt),
6695 &use2_p, &use_stmt2))
6696 use_stmt = use_stmt2;
6697 if (gimple_code (use_stmt) != GIMPLE_COND
6698 || gimple_bb (use_stmt) != cond_bb)
6699 return false;
6700 }
6701 return true;
6702 }
6703
6704 /* Handle
6705 _4 = x_3 & 31;
6706 if (_4 != 0)
6707 goto <bb 6>;
6708 else
6709 goto <bb 7>;
6710 <bb 6>:
6711 __builtin_unreachable ();
6712 <bb 7>:
6713 x_5 = ASSERT_EXPR <x_3, ...>;
6714 If x_3 has no other immediate uses (checked by caller),
6715 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
6716 from the non-zero bitmask. */
6717
6718 static void
6719 maybe_set_nonzero_bits (basic_block bb, tree var)
6720 {
6721 edge e = single_pred_edge (bb);
6722 basic_block cond_bb = e->src;
6723 gimple *stmt = last_stmt (cond_bb);
6724 tree cst;
6725
6726 if (stmt == NULL
6727 || gimple_code (stmt) != GIMPLE_COND
6728 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
6729 ? EQ_EXPR : NE_EXPR)
6730 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
6731 || !integer_zerop (gimple_cond_rhs (stmt)))
6732 return;
6733
6734 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
6735 if (!is_gimple_assign (stmt)
6736 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
6737 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
6738 return;
6739 if (gimple_assign_rhs1 (stmt) != var)
6740 {
6741 gimple *stmt2;
6742
6743 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6744 return;
6745 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
6746 if (!gimple_assign_cast_p (stmt2)
6747 || gimple_assign_rhs1 (stmt2) != var
6748 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
6749 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6750 != TYPE_PRECISION (TREE_TYPE (var))))
6751 return;
6752 }
6753 cst = gimple_assign_rhs2 (stmt);
6754 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
6755 }
6756
6757 /* Convert range assertion expressions into the implied copies and
6758 copy propagate away the copies. Doing the trivial copy propagation
6759 here avoids the need to run the full copy propagation pass after
6760 VRP.
6761
6762 FIXME, this will eventually lead to copy propagation removing the
6763 names that had useful range information attached to them. For
6764 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6765 then N_i will have the range [3, +INF].
6766
6767 However, by converting the assertion into the implied copy
6768 operation N_i = N_j, we will then copy-propagate N_j into the uses
6769 of N_i and lose the range information. We may want to hold on to
6770 ASSERT_EXPRs a little while longer as the ranges could be used in
6771 things like jump threading.
6772
6773 The problem with keeping ASSERT_EXPRs around is that passes after
6774 VRP need to handle them appropriately.
6775
6776 Another approach would be to make the range information a first
6777 class property of the SSA_NAME so that it can be queried from
6778 any pass. This is made somewhat more complex by the need for
6779 multiple ranges to be associated with one SSA_NAME. */
6780
6781 static void
6782 remove_range_assertions (void)
6783 {
6784 basic_block bb;
6785 gimple_stmt_iterator si;
6786 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
6787 a basic block preceeded by GIMPLE_COND branching to it and
6788 __builtin_trap, -1 if not yet checked, 0 otherwise. */
6789 int is_unreachable;
6790
6791 /* Note that the BSI iterator bump happens at the bottom of the
6792 loop and no bump is necessary if we're removing the statement
6793 referenced by the current BSI. */
6794 FOR_EACH_BB_FN (bb, cfun)
6795 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
6796 {
6797 gimple *stmt = gsi_stmt (si);
6798 gimple *use_stmt;
6799
6800 if (is_gimple_assign (stmt)
6801 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6802 {
6803 tree lhs = gimple_assign_lhs (stmt);
6804 tree rhs = gimple_assign_rhs1 (stmt);
6805 tree var;
6806 use_operand_p use_p;
6807 imm_use_iterator iter;
6808
6809 var = ASSERT_EXPR_VAR (rhs);
6810 gcc_assert (TREE_CODE (var) == SSA_NAME);
6811
6812 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
6813 && SSA_NAME_RANGE_INFO (lhs))
6814 {
6815 if (is_unreachable == -1)
6816 {
6817 is_unreachable = 0;
6818 if (single_pred_p (bb)
6819 && assert_unreachable_fallthru_edge_p
6820 (single_pred_edge (bb)))
6821 is_unreachable = 1;
6822 }
6823 /* Handle
6824 if (x_7 >= 10 && x_7 < 20)
6825 __builtin_unreachable ();
6826 x_8 = ASSERT_EXPR <x_7, ...>;
6827 if the only uses of x_7 are in the ASSERT_EXPR and
6828 in the condition. In that case, we can copy the
6829 range info from x_8 computed in this pass also
6830 for x_7. */
6831 if (is_unreachable
6832 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
6833 single_pred (bb)))
6834 {
6835 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
6836 SSA_NAME_RANGE_INFO (lhs)->get_min (),
6837 SSA_NAME_RANGE_INFO (lhs)->get_max ());
6838 maybe_set_nonzero_bits (bb, var);
6839 }
6840 }
6841
6842 /* Propagate the RHS into every use of the LHS. */
6843 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
6844 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6845 SET_USE (use_p, var);
6846
6847 /* And finally, remove the copy, it is not needed. */
6848 gsi_remove (&si, true);
6849 release_defs (stmt);
6850 }
6851 else
6852 {
6853 if (!is_gimple_debug (gsi_stmt (si)))
6854 is_unreachable = 0;
6855 gsi_next (&si);
6856 }
6857 }
6858 }
6859
6860
6861 /* Return true if STMT is interesting for VRP. */
6862
6863 static bool
6864 stmt_interesting_for_vrp (gimple *stmt)
6865 {
6866 if (gimple_code (stmt) == GIMPLE_PHI)
6867 {
6868 tree res = gimple_phi_result (stmt);
6869 return (!virtual_operand_p (res)
6870 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6871 || POINTER_TYPE_P (TREE_TYPE (res))));
6872 }
6873 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6874 {
6875 tree lhs = gimple_get_lhs (stmt);
6876
6877 /* In general, assignments with virtual operands are not useful
6878 for deriving ranges, with the obvious exception of calls to
6879 builtin functions. */
6880 if (lhs && TREE_CODE (lhs) == SSA_NAME
6881 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6882 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6883 && (is_gimple_call (stmt)
6884 || !gimple_vuse (stmt)))
6885 return true;
6886 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
6887 switch (gimple_call_internal_fn (stmt))
6888 {
6889 case IFN_ADD_OVERFLOW:
6890 case IFN_SUB_OVERFLOW:
6891 case IFN_MUL_OVERFLOW:
6892 /* These internal calls return _Complex integer type,
6893 but are interesting to VRP nevertheless. */
6894 if (lhs && TREE_CODE (lhs) == SSA_NAME)
6895 return true;
6896 break;
6897 default:
6898 break;
6899 }
6900 }
6901 else if (gimple_code (stmt) == GIMPLE_COND
6902 || gimple_code (stmt) == GIMPLE_SWITCH)
6903 return true;
6904
6905 return false;
6906 }
6907
6908
6909 /* Initialize local data structures for VRP. */
6910
6911 static void
6912 vrp_initialize (void)
6913 {
6914 basic_block bb;
6915
6916 values_propagated = false;
6917 num_vr_values = num_ssa_names;
6918 vr_value = XCNEWVEC (value_range *, num_vr_values);
6919 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6920
6921 FOR_EACH_BB_FN (bb, cfun)
6922 {
6923 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6924 gsi_next (&si))
6925 {
6926 gphi *phi = si.phi ();
6927 if (!stmt_interesting_for_vrp (phi))
6928 {
6929 tree lhs = PHI_RESULT (phi);
6930 set_value_range_to_varying (get_value_range (lhs));
6931 prop_set_simulate_again (phi, false);
6932 }
6933 else
6934 prop_set_simulate_again (phi, true);
6935 }
6936
6937 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
6938 gsi_next (&si))
6939 {
6940 gimple *stmt = gsi_stmt (si);
6941
6942 /* If the statement is a control insn, then we do not
6943 want to avoid simulating the statement once. Failure
6944 to do so means that those edges will never get added. */
6945 if (stmt_ends_bb_p (stmt))
6946 prop_set_simulate_again (stmt, true);
6947 else if (!stmt_interesting_for_vrp (stmt))
6948 {
6949 ssa_op_iter i;
6950 tree def;
6951 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6952 set_value_range_to_varying (get_value_range (def));
6953 prop_set_simulate_again (stmt, false);
6954 }
6955 else
6956 prop_set_simulate_again (stmt, true);
6957 }
6958 }
6959 }
6960
6961 /* Return the singleton value-range for NAME or NAME. */
6962
6963 static inline tree
6964 vrp_valueize (tree name)
6965 {
6966 if (TREE_CODE (name) == SSA_NAME)
6967 {
6968 value_range *vr = get_value_range (name);
6969 if (vr->type == VR_RANGE
6970 && (vr->min == vr->max
6971 || operand_equal_p (vr->min, vr->max, 0)))
6972 return vr->min;
6973 }
6974 return name;
6975 }
6976
6977 /* Return the singleton value-range for NAME if that is a constant
6978 but signal to not follow SSA edges. */
6979
6980 static inline tree
6981 vrp_valueize_1 (tree name)
6982 {
6983 if (TREE_CODE (name) == SSA_NAME)
6984 {
6985 /* If the definition may be simulated again we cannot follow
6986 this SSA edge as the SSA propagator does not necessarily
6987 re-visit the use. */
6988 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
6989 if (!gimple_nop_p (def_stmt)
6990 && prop_simulate_again_p (def_stmt))
6991 return NULL_TREE;
6992 value_range *vr = get_value_range (name);
6993 if (range_int_cst_singleton_p (vr))
6994 return vr->min;
6995 }
6996 return name;
6997 }
6998
6999 /* Visit assignment STMT. If it produces an interesting range, record
7000 the SSA name in *OUTPUT_P. */
7001
7002 static enum ssa_prop_result
7003 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p)
7004 {
7005 tree def, lhs;
7006 ssa_op_iter iter;
7007 enum gimple_code code = gimple_code (stmt);
7008 lhs = gimple_get_lhs (stmt);
7009
7010 /* We only keep track of ranges in integral and pointer types. */
7011 if (TREE_CODE (lhs) == SSA_NAME
7012 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7013 /* It is valid to have NULL MIN/MAX values on a type. See
7014 build_range_type. */
7015 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7016 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
7017 || POINTER_TYPE_P (TREE_TYPE (lhs))))
7018 {
7019 value_range new_vr = VR_INITIALIZER;
7020
7021 /* Try folding the statement to a constant first. */
7022 tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7023 vrp_valueize_1);
7024 if (tem && is_gimple_min_invariant (tem))
7025 set_value_range_to_value (&new_vr, tem, NULL);
7026 /* Then dispatch to value-range extracting functions. */
7027 else if (code == GIMPLE_CALL)
7028 extract_range_basic (&new_vr, stmt);
7029 else
7030 extract_range_from_assignment (&new_vr, as_a <gassign *> (stmt));
7031
7032 if (update_value_range (lhs, &new_vr))
7033 {
7034 *output_p = lhs;
7035
7036 if (dump_file && (dump_flags & TDF_DETAILS))
7037 {
7038 fprintf (dump_file, "Found new range for ");
7039 print_generic_expr (dump_file, lhs, 0);
7040 fprintf (dump_file, ": ");
7041 dump_value_range (dump_file, &new_vr);
7042 fprintf (dump_file, "\n");
7043 }
7044
7045 if (new_vr.type == VR_VARYING)
7046 return SSA_PROP_VARYING;
7047
7048 return SSA_PROP_INTERESTING;
7049 }
7050
7051 return SSA_PROP_NOT_INTERESTING;
7052 }
7053 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7054 switch (gimple_call_internal_fn (stmt))
7055 {
7056 case IFN_ADD_OVERFLOW:
7057 case IFN_SUB_OVERFLOW:
7058 case IFN_MUL_OVERFLOW:
7059 /* These internal calls return _Complex integer type,
7060 which VRP does not track, but the immediate uses
7061 thereof might be interesting. */
7062 if (lhs && TREE_CODE (lhs) == SSA_NAME)
7063 {
7064 imm_use_iterator iter;
7065 use_operand_p use_p;
7066 enum ssa_prop_result res = SSA_PROP_VARYING;
7067
7068 set_value_range_to_varying (get_value_range (lhs));
7069
7070 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
7071 {
7072 gimple *use_stmt = USE_STMT (use_p);
7073 if (!is_gimple_assign (use_stmt))
7074 continue;
7075 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
7076 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
7077 continue;
7078 tree rhs1 = gimple_assign_rhs1 (use_stmt);
7079 tree use_lhs = gimple_assign_lhs (use_stmt);
7080 if (TREE_CODE (rhs1) != rhs_code
7081 || TREE_OPERAND (rhs1, 0) != lhs
7082 || TREE_CODE (use_lhs) != SSA_NAME
7083 || !stmt_interesting_for_vrp (use_stmt)
7084 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
7085 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
7086 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
7087 continue;
7088
7089 /* If there is a change in the value range for any of the
7090 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
7091 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
7092 or IMAGPART_EXPR immediate uses, but none of them have
7093 a change in their value ranges, return
7094 SSA_PROP_NOT_INTERESTING. If there are no
7095 {REAL,IMAG}PART_EXPR uses at all,
7096 return SSA_PROP_VARYING. */
7097 value_range new_vr = VR_INITIALIZER;
7098 extract_range_basic (&new_vr, use_stmt);
7099 value_range *old_vr = get_value_range (use_lhs);
7100 if (old_vr->type != new_vr.type
7101 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
7102 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
7103 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
7104 res = SSA_PROP_INTERESTING;
7105 else
7106 res = SSA_PROP_NOT_INTERESTING;
7107 BITMAP_FREE (new_vr.equiv);
7108 if (res == SSA_PROP_INTERESTING)
7109 {
7110 *output_p = lhs;
7111 return res;
7112 }
7113 }
7114
7115 return res;
7116 }
7117 break;
7118 default:
7119 break;
7120 }
7121
7122 /* Every other statement produces no useful ranges. */
7123 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7124 set_value_range_to_varying (get_value_range (def));
7125
7126 return SSA_PROP_VARYING;
7127 }
7128
7129 /* Helper that gets the value range of the SSA_NAME with version I
7130 or a symbolic range containing the SSA_NAME only if the value range
7131 is varying or undefined. */
7132
7133 static inline value_range
7134 get_vr_for_comparison (int i)
7135 {
7136 value_range vr = *get_value_range (ssa_name (i));
7137
7138 /* If name N_i does not have a valid range, use N_i as its own
7139 range. This allows us to compare against names that may
7140 have N_i in their ranges. */
7141 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7142 {
7143 vr.type = VR_RANGE;
7144 vr.min = ssa_name (i);
7145 vr.max = ssa_name (i);
7146 }
7147
7148 return vr;
7149 }
7150
7151 /* Compare all the value ranges for names equivalent to VAR with VAL
7152 using comparison code COMP. Return the same value returned by
7153 compare_range_with_value, including the setting of
7154 *STRICT_OVERFLOW_P. */
7155
7156 static tree
7157 compare_name_with_value (enum tree_code comp, tree var, tree val,
7158 bool *strict_overflow_p)
7159 {
7160 bitmap_iterator bi;
7161 unsigned i;
7162 bitmap e;
7163 tree retval, t;
7164 int used_strict_overflow;
7165 bool sop;
7166 value_range equiv_vr;
7167
7168 /* Get the set of equivalences for VAR. */
7169 e = get_value_range (var)->equiv;
7170
7171 /* Start at -1. Set it to 0 if we do a comparison without relying
7172 on overflow, or 1 if all comparisons rely on overflow. */
7173 used_strict_overflow = -1;
7174
7175 /* Compare vars' value range with val. */
7176 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7177 sop = false;
7178 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
7179 if (retval)
7180 used_strict_overflow = sop ? 1 : 0;
7181
7182 /* If the equiv set is empty we have done all work we need to do. */
7183 if (e == NULL)
7184 {
7185 if (retval
7186 && used_strict_overflow > 0)
7187 *strict_overflow_p = true;
7188 return retval;
7189 }
7190
7191 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7192 {
7193 equiv_vr = get_vr_for_comparison (i);
7194 sop = false;
7195 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
7196 if (t)
7197 {
7198 /* If we get different answers from different members
7199 of the equivalence set this check must be in a dead
7200 code region. Folding it to a trap representation
7201 would be correct here. For now just return don't-know. */
7202 if (retval != NULL
7203 && t != retval)
7204 {
7205 retval = NULL_TREE;
7206 break;
7207 }
7208 retval = t;
7209
7210 if (!sop)
7211 used_strict_overflow = 0;
7212 else if (used_strict_overflow < 0)
7213 used_strict_overflow = 1;
7214 }
7215 }
7216
7217 if (retval
7218 && used_strict_overflow > 0)
7219 *strict_overflow_p = true;
7220
7221 return retval;
7222 }
7223
7224
7225 /* Given a comparison code COMP and names N1 and N2, compare all the
7226 ranges equivalent to N1 against all the ranges equivalent to N2
7227 to determine the value of N1 COMP N2. Return the same value
7228 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
7229 whether we relied on an overflow infinity in the comparison. */
7230
7231
7232 static tree
7233 compare_names (enum tree_code comp, tree n1, tree n2,
7234 bool *strict_overflow_p)
7235 {
7236 tree t, retval;
7237 bitmap e1, e2;
7238 bitmap_iterator bi1, bi2;
7239 unsigned i1, i2;
7240 int used_strict_overflow;
7241 static bitmap_obstack *s_obstack = NULL;
7242 static bitmap s_e1 = NULL, s_e2 = NULL;
7243
7244 /* Compare the ranges of every name equivalent to N1 against the
7245 ranges of every name equivalent to N2. */
7246 e1 = get_value_range (n1)->equiv;
7247 e2 = get_value_range (n2)->equiv;
7248
7249 /* Use the fake bitmaps if e1 or e2 are not available. */
7250 if (s_obstack == NULL)
7251 {
7252 s_obstack = XNEW (bitmap_obstack);
7253 bitmap_obstack_initialize (s_obstack);
7254 s_e1 = BITMAP_ALLOC (s_obstack);
7255 s_e2 = BITMAP_ALLOC (s_obstack);
7256 }
7257 if (e1 == NULL)
7258 e1 = s_e1;
7259 if (e2 == NULL)
7260 e2 = s_e2;
7261
7262 /* Add N1 and N2 to their own set of equivalences to avoid
7263 duplicating the body of the loop just to check N1 and N2
7264 ranges. */
7265 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7266 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7267
7268 /* If the equivalence sets have a common intersection, then the two
7269 names can be compared without checking their ranges. */
7270 if (bitmap_intersect_p (e1, e2))
7271 {
7272 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7273 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7274
7275 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7276 ? boolean_true_node
7277 : boolean_false_node;
7278 }
7279
7280 /* Start at -1. Set it to 0 if we do a comparison without relying
7281 on overflow, or 1 if all comparisons rely on overflow. */
7282 used_strict_overflow = -1;
7283
7284 /* Otherwise, compare all the equivalent ranges. First, add N1 and
7285 N2 to their own set of equivalences to avoid duplicating the body
7286 of the loop just to check N1 and N2 ranges. */
7287 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7288 {
7289 value_range vr1 = get_vr_for_comparison (i1);
7290
7291 t = retval = NULL_TREE;
7292 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7293 {
7294 bool sop = false;
7295
7296 value_range vr2 = get_vr_for_comparison (i2);
7297
7298 t = compare_ranges (comp, &vr1, &vr2, &sop);
7299 if (t)
7300 {
7301 /* If we get different answers from different members
7302 of the equivalence set this check must be in a dead
7303 code region. Folding it to a trap representation
7304 would be correct here. For now just return don't-know. */
7305 if (retval != NULL
7306 && t != retval)
7307 {
7308 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7309 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7310 return NULL_TREE;
7311 }
7312 retval = t;
7313
7314 if (!sop)
7315 used_strict_overflow = 0;
7316 else if (used_strict_overflow < 0)
7317 used_strict_overflow = 1;
7318 }
7319 }
7320
7321 if (retval)
7322 {
7323 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7324 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7325 if (used_strict_overflow > 0)
7326 *strict_overflow_p = true;
7327 return retval;
7328 }
7329 }
7330
7331 /* None of the equivalent ranges are useful in computing this
7332 comparison. */
7333 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7334 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7335 return NULL_TREE;
7336 }
7337
7338 /* Helper function for vrp_evaluate_conditional_warnv & other
7339 optimizers. */
7340
7341 static tree
7342 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7343 tree op0, tree op1,
7344 bool * strict_overflow_p)
7345 {
7346 value_range *vr0, *vr1;
7347
7348 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7349 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7350
7351 tree res = NULL_TREE;
7352 if (vr0 && vr1)
7353 res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7354 if (!res && vr0)
7355 res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7356 if (!res && vr1)
7357 res = (compare_range_with_value
7358 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
7359 return res;
7360 }
7361
7362 /* Helper function for vrp_evaluate_conditional_warnv. */
7363
7364 static tree
7365 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7366 tree op1, bool use_equiv_p,
7367 bool *strict_overflow_p, bool *only_ranges)
7368 {
7369 tree ret;
7370 if (only_ranges)
7371 *only_ranges = true;
7372
7373 /* We only deal with integral and pointer types. */
7374 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7375 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7376 return NULL_TREE;
7377
7378 if (use_equiv_p)
7379 {
7380 if (only_ranges
7381 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7382 (code, op0, op1, strict_overflow_p)))
7383 return ret;
7384 *only_ranges = false;
7385 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
7386 return compare_names (code, op0, op1, strict_overflow_p);
7387 else if (TREE_CODE (op0) == SSA_NAME)
7388 return compare_name_with_value (code, op0, op1, strict_overflow_p);
7389 else if (TREE_CODE (op1) == SSA_NAME)
7390 return (compare_name_with_value
7391 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
7392 }
7393 else
7394 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
7395 strict_overflow_p);
7396 return NULL_TREE;
7397 }
7398
7399 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7400 information. Return NULL if the conditional can not be evaluated.
7401 The ranges of all the names equivalent with the operands in COND
7402 will be used when trying to compute the value. If the result is
7403 based on undefined signed overflow, issue a warning if
7404 appropriate. */
7405
7406 static tree
7407 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
7408 {
7409 bool sop;
7410 tree ret;
7411 bool only_ranges;
7412
7413 /* Some passes and foldings leak constants with overflow flag set
7414 into the IL. Avoid doing wrong things with these and bail out. */
7415 if ((TREE_CODE (op0) == INTEGER_CST
7416 && TREE_OVERFLOW (op0))
7417 || (TREE_CODE (op1) == INTEGER_CST
7418 && TREE_OVERFLOW (op1)))
7419 return NULL_TREE;
7420
7421 sop = false;
7422 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7423 &only_ranges);
7424
7425 if (ret && sop)
7426 {
7427 enum warn_strict_overflow_code wc;
7428 const char* warnmsg;
7429
7430 if (is_gimple_min_invariant (ret))
7431 {
7432 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7433 warnmsg = G_("assuming signed overflow does not occur when "
7434 "simplifying conditional to constant");
7435 }
7436 else
7437 {
7438 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7439 warnmsg = G_("assuming signed overflow does not occur when "
7440 "simplifying conditional");
7441 }
7442
7443 if (issue_strict_overflow_warning (wc))
7444 {
7445 location_t location;
7446
7447 if (!gimple_has_location (stmt))
7448 location = input_location;
7449 else
7450 location = gimple_location (stmt);
7451 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7452 }
7453 }
7454
7455 if (warn_type_limits
7456 && ret && only_ranges
7457 && TREE_CODE_CLASS (code) == tcc_comparison
7458 && TREE_CODE (op0) == SSA_NAME)
7459 {
7460 /* If the comparison is being folded and the operand on the LHS
7461 is being compared against a constant value that is outside of
7462 the natural range of OP0's type, then the predicate will
7463 always fold regardless of the value of OP0. If -Wtype-limits
7464 was specified, emit a warning. */
7465 tree type = TREE_TYPE (op0);
7466 value_range *vr0 = get_value_range (op0);
7467
7468 if (vr0->type == VR_RANGE
7469 && INTEGRAL_TYPE_P (type)
7470 && vrp_val_is_min (vr0->min)
7471 && vrp_val_is_max (vr0->max)
7472 && is_gimple_min_invariant (op1))
7473 {
7474 location_t location;
7475
7476 if (!gimple_has_location (stmt))
7477 location = input_location;
7478 else
7479 location = gimple_location (stmt);
7480
7481 warning_at (location, OPT_Wtype_limits,
7482 integer_zerop (ret)
7483 ? G_("comparison always false "
7484 "due to limited range of data type")
7485 : G_("comparison always true "
7486 "due to limited range of data type"));
7487 }
7488 }
7489
7490 return ret;
7491 }
7492
7493
7494 /* Visit conditional statement STMT. If we can determine which edge
7495 will be taken out of STMT's basic block, record it in
7496 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7497 SSA_PROP_VARYING. */
7498
7499 static enum ssa_prop_result
7500 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
7501 {
7502 tree val;
7503 bool sop;
7504
7505 *taken_edge_p = NULL;
7506
7507 if (dump_file && (dump_flags & TDF_DETAILS))
7508 {
7509 tree use;
7510 ssa_op_iter i;
7511
7512 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7513 print_gimple_stmt (dump_file, stmt, 0, 0);
7514 fprintf (dump_file, "\nWith known ranges\n");
7515
7516 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7517 {
7518 fprintf (dump_file, "\t");
7519 print_generic_expr (dump_file, use, 0);
7520 fprintf (dump_file, ": ");
7521 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7522 }
7523
7524 fprintf (dump_file, "\n");
7525 }
7526
7527 /* Compute the value of the predicate COND by checking the known
7528 ranges of each of its operands.
7529
7530 Note that we cannot evaluate all the equivalent ranges here
7531 because those ranges may not yet be final and with the current
7532 propagation strategy, we cannot determine when the value ranges
7533 of the names in the equivalence set have changed.
7534
7535 For instance, given the following code fragment
7536
7537 i_5 = PHI <8, i_13>
7538 ...
7539 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7540 if (i_14 == 1)
7541 ...
7542
7543 Assume that on the first visit to i_14, i_5 has the temporary
7544 range [8, 8] because the second argument to the PHI function is
7545 not yet executable. We derive the range ~[0, 0] for i_14 and the
7546 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7547 the first time, since i_14 is equivalent to the range [8, 8], we
7548 determine that the predicate is always false.
7549
7550 On the next round of propagation, i_13 is determined to be
7551 VARYING, which causes i_5 to drop down to VARYING. So, another
7552 visit to i_14 is scheduled. In this second visit, we compute the
7553 exact same range and equivalence set for i_14, namely ~[0, 0] and
7554 { i_5 }. But we did not have the previous range for i_5
7555 registered, so vrp_visit_assignment thinks that the range for
7556 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7557 is not visited again, which stops propagation from visiting
7558 statements in the THEN clause of that if().
7559
7560 To properly fix this we would need to keep the previous range
7561 value for the names in the equivalence set. This way we would've
7562 discovered that from one visit to the other i_5 changed from
7563 range [8, 8] to VR_VARYING.
7564
7565 However, fixing this apparent limitation may not be worth the
7566 additional checking. Testing on several code bases (GCC, DLV,
7567 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7568 4 more predicates folded in SPEC. */
7569 sop = false;
7570
7571 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7572 gimple_cond_lhs (stmt),
7573 gimple_cond_rhs (stmt),
7574 false, &sop, NULL);
7575 if (val)
7576 {
7577 if (!sop)
7578 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7579 else
7580 {
7581 if (dump_file && (dump_flags & TDF_DETAILS))
7582 fprintf (dump_file,
7583 "\nIgnoring predicate evaluation because "
7584 "it assumes that signed overflow is undefined");
7585 val = NULL_TREE;
7586 }
7587 }
7588
7589 if (dump_file && (dump_flags & TDF_DETAILS))
7590 {
7591 fprintf (dump_file, "\nPredicate evaluates to: ");
7592 if (val == NULL_TREE)
7593 fprintf (dump_file, "DON'T KNOW\n");
7594 else
7595 print_generic_stmt (dump_file, val, 0);
7596 }
7597
7598 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7599 }
7600
7601 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7602 that includes the value VAL. The search is restricted to the range
7603 [START_IDX, n - 1] where n is the size of VEC.
7604
7605 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7606 returned.
7607
7608 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7609 it is placed in IDX and false is returned.
7610
7611 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7612 returned. */
7613
7614 static bool
7615 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
7616 {
7617 size_t n = gimple_switch_num_labels (stmt);
7618 size_t low, high;
7619
7620 /* Find case label for minimum of the value range or the next one.
7621 At each iteration we are searching in [low, high - 1]. */
7622
7623 for (low = start_idx, high = n; high != low; )
7624 {
7625 tree t;
7626 int cmp;
7627 /* Note that i != high, so we never ask for n. */
7628 size_t i = (high + low) / 2;
7629 t = gimple_switch_label (stmt, i);
7630
7631 /* Cache the result of comparing CASE_LOW and val. */
7632 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7633
7634 if (cmp == 0)
7635 {
7636 /* Ranges cannot be empty. */
7637 *idx = i;
7638 return true;
7639 }
7640 else if (cmp > 0)
7641 high = i;
7642 else
7643 {
7644 low = i + 1;
7645 if (CASE_HIGH (t) != NULL
7646 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7647 {
7648 *idx = i;
7649 return true;
7650 }
7651 }
7652 }
7653
7654 *idx = high;
7655 return false;
7656 }
7657
7658 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7659 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7660 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7661 then MAX_IDX < MIN_IDX.
7662 Returns true if the default label is not needed. */
7663
7664 static bool
7665 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
7666 size_t *max_idx)
7667 {
7668 size_t i, j;
7669 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7670 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7671
7672 if (i == j
7673 && min_take_default
7674 && max_take_default)
7675 {
7676 /* Only the default case label reached.
7677 Return an empty range. */
7678 *min_idx = 1;
7679 *max_idx = 0;
7680 return false;
7681 }
7682 else
7683 {
7684 bool take_default = min_take_default || max_take_default;
7685 tree low, high;
7686 size_t k;
7687
7688 if (max_take_default)
7689 j--;
7690
7691 /* If the case label range is continuous, we do not need
7692 the default case label. Verify that. */
7693 high = CASE_LOW (gimple_switch_label (stmt, i));
7694 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7695 high = CASE_HIGH (gimple_switch_label (stmt, i));
7696 for (k = i + 1; k <= j; ++k)
7697 {
7698 low = CASE_LOW (gimple_switch_label (stmt, k));
7699 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7700 {
7701 take_default = true;
7702 break;
7703 }
7704 high = low;
7705 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7706 high = CASE_HIGH (gimple_switch_label (stmt, k));
7707 }
7708
7709 *min_idx = i;
7710 *max_idx = j;
7711 return !take_default;
7712 }
7713 }
7714
7715 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7716 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7717 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7718 Returns true if the default label is not needed. */
7719
7720 static bool
7721 find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
7722 size_t *max_idx1, size_t *min_idx2,
7723 size_t *max_idx2)
7724 {
7725 size_t i, j, k, l;
7726 unsigned int n = gimple_switch_num_labels (stmt);
7727 bool take_default;
7728 tree case_low, case_high;
7729 tree min = vr->min, max = vr->max;
7730
7731 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7732
7733 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7734
7735 /* Set second range to emtpy. */
7736 *min_idx2 = 1;
7737 *max_idx2 = 0;
7738
7739 if (vr->type == VR_RANGE)
7740 {
7741 *min_idx1 = i;
7742 *max_idx1 = j;
7743 return !take_default;
7744 }
7745
7746 /* Set first range to all case labels. */
7747 *min_idx1 = 1;
7748 *max_idx1 = n - 1;
7749
7750 if (i > j)
7751 return false;
7752
7753 /* Make sure all the values of case labels [i , j] are contained in
7754 range [MIN, MAX]. */
7755 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7756 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7757 if (tree_int_cst_compare (case_low, min) < 0)
7758 i += 1;
7759 if (case_high != NULL_TREE
7760 && tree_int_cst_compare (max, case_high) < 0)
7761 j -= 1;
7762
7763 if (i > j)
7764 return false;
7765
7766 /* If the range spans case labels [i, j], the corresponding anti-range spans
7767 the labels [1, i - 1] and [j + 1, n - 1]. */
7768 k = j + 1;
7769 l = n - 1;
7770 if (k > l)
7771 {
7772 k = 1;
7773 l = 0;
7774 }
7775
7776 j = i - 1;
7777 i = 1;
7778 if (i > j)
7779 {
7780 i = k;
7781 j = l;
7782 k = 1;
7783 l = 0;
7784 }
7785
7786 *min_idx1 = i;
7787 *max_idx1 = j;
7788 *min_idx2 = k;
7789 *max_idx2 = l;
7790 return false;
7791 }
7792
7793 /* Visit switch statement STMT. If we can determine which edge
7794 will be taken out of STMT's basic block, record it in
7795 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7796 SSA_PROP_VARYING. */
7797
7798 static enum ssa_prop_result
7799 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
7800 {
7801 tree op, val;
7802 value_range *vr;
7803 size_t i = 0, j = 0, k, l;
7804 bool take_default;
7805
7806 *taken_edge_p = NULL;
7807 op = gimple_switch_index (stmt);
7808 if (TREE_CODE (op) != SSA_NAME)
7809 return SSA_PROP_VARYING;
7810
7811 vr = get_value_range (op);
7812 if (dump_file && (dump_flags & TDF_DETAILS))
7813 {
7814 fprintf (dump_file, "\nVisiting switch expression with operand ");
7815 print_generic_expr (dump_file, op, 0);
7816 fprintf (dump_file, " with known range ");
7817 dump_value_range (dump_file, vr);
7818 fprintf (dump_file, "\n");
7819 }
7820
7821 if ((vr->type != VR_RANGE
7822 && vr->type != VR_ANTI_RANGE)
7823 || symbolic_range_p (vr))
7824 return SSA_PROP_VARYING;
7825
7826 /* Find the single edge that is taken from the switch expression. */
7827 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7828
7829 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7830 label */
7831 if (j < i)
7832 {
7833 gcc_assert (take_default);
7834 val = gimple_switch_default_label (stmt);
7835 }
7836 else
7837 {
7838 /* Check if labels with index i to j and maybe the default label
7839 are all reaching the same label. */
7840
7841 val = gimple_switch_label (stmt, i);
7842 if (take_default
7843 && CASE_LABEL (gimple_switch_default_label (stmt))
7844 != CASE_LABEL (val))
7845 {
7846 if (dump_file && (dump_flags & TDF_DETAILS))
7847 fprintf (dump_file, " not a single destination for this "
7848 "range\n");
7849 return SSA_PROP_VARYING;
7850 }
7851 for (++i; i <= j; ++i)
7852 {
7853 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7854 {
7855 if (dump_file && (dump_flags & TDF_DETAILS))
7856 fprintf (dump_file, " not a single destination for this "
7857 "range\n");
7858 return SSA_PROP_VARYING;
7859 }
7860 }
7861 for (; k <= l; ++k)
7862 {
7863 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7864 {
7865 if (dump_file && (dump_flags & TDF_DETAILS))
7866 fprintf (dump_file, " not a single destination for this "
7867 "range\n");
7868 return SSA_PROP_VARYING;
7869 }
7870 }
7871 }
7872
7873 *taken_edge_p = find_edge (gimple_bb (stmt),
7874 label_to_block (CASE_LABEL (val)));
7875
7876 if (dump_file && (dump_flags & TDF_DETAILS))
7877 {
7878 fprintf (dump_file, " will take edge to ");
7879 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7880 }
7881
7882 return SSA_PROP_INTERESTING;
7883 }
7884
7885
7886 /* Evaluate statement STMT. If the statement produces a useful range,
7887 return SSA_PROP_INTERESTING and record the SSA name with the
7888 interesting range into *OUTPUT_P.
7889
7890 If STMT is a conditional branch and we can determine its truth
7891 value, the taken edge is recorded in *TAKEN_EDGE_P.
7892
7893 If STMT produces a varying value, return SSA_PROP_VARYING. */
7894
7895 static enum ssa_prop_result
7896 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
7897 {
7898 tree def;
7899 ssa_op_iter iter;
7900
7901 if (dump_file && (dump_flags & TDF_DETAILS))
7902 {
7903 fprintf (dump_file, "\nVisiting statement:\n");
7904 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7905 }
7906
7907 if (!stmt_interesting_for_vrp (stmt))
7908 gcc_assert (stmt_ends_bb_p (stmt));
7909 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7910 return vrp_visit_assignment_or_call (stmt, output_p);
7911 else if (gimple_code (stmt) == GIMPLE_COND)
7912 return vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
7913 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7914 return vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
7915
7916 /* All other statements produce nothing of interest for VRP, so mark
7917 their outputs varying and prevent further simulation. */
7918 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7919 set_value_range_to_varying (get_value_range (def));
7920
7921 return SSA_PROP_VARYING;
7922 }
7923
7924 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7925 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7926 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7927 possible such range. The resulting range is not canonicalized. */
7928
7929 static void
7930 union_ranges (enum value_range_type *vr0type,
7931 tree *vr0min, tree *vr0max,
7932 enum value_range_type vr1type,
7933 tree vr1min, tree vr1max)
7934 {
7935 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7936 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7937
7938 /* [] is vr0, () is vr1 in the following classification comments. */
7939 if (mineq && maxeq)
7940 {
7941 /* [( )] */
7942 if (*vr0type == vr1type)
7943 /* Nothing to do for equal ranges. */
7944 ;
7945 else if ((*vr0type == VR_RANGE
7946 && vr1type == VR_ANTI_RANGE)
7947 || (*vr0type == VR_ANTI_RANGE
7948 && vr1type == VR_RANGE))
7949 {
7950 /* For anti-range with range union the result is varying. */
7951 goto give_up;
7952 }
7953 else
7954 gcc_unreachable ();
7955 }
7956 else if (operand_less_p (*vr0max, vr1min) == 1
7957 || operand_less_p (vr1max, *vr0min) == 1)
7958 {
7959 /* [ ] ( ) or ( ) [ ]
7960 If the ranges have an empty intersection, result of the union
7961 operation is the anti-range or if both are anti-ranges
7962 it covers all. */
7963 if (*vr0type == VR_ANTI_RANGE
7964 && vr1type == VR_ANTI_RANGE)
7965 goto give_up;
7966 else if (*vr0type == VR_ANTI_RANGE
7967 && vr1type == VR_RANGE)
7968 ;
7969 else if (*vr0type == VR_RANGE
7970 && vr1type == VR_ANTI_RANGE)
7971 {
7972 *vr0type = vr1type;
7973 *vr0min = vr1min;
7974 *vr0max = vr1max;
7975 }
7976 else if (*vr0type == VR_RANGE
7977 && vr1type == VR_RANGE)
7978 {
7979 /* The result is the convex hull of both ranges. */
7980 if (operand_less_p (*vr0max, vr1min) == 1)
7981 {
7982 /* If the result can be an anti-range, create one. */
7983 if (TREE_CODE (*vr0max) == INTEGER_CST
7984 && TREE_CODE (vr1min) == INTEGER_CST
7985 && vrp_val_is_min (*vr0min)
7986 && vrp_val_is_max (vr1max))
7987 {
7988 tree min = int_const_binop (PLUS_EXPR,
7989 *vr0max,
7990 build_int_cst (TREE_TYPE (*vr0max), 1));
7991 tree max = int_const_binop (MINUS_EXPR,
7992 vr1min,
7993 build_int_cst (TREE_TYPE (vr1min), 1));
7994 if (!operand_less_p (max, min))
7995 {
7996 *vr0type = VR_ANTI_RANGE;
7997 *vr0min = min;
7998 *vr0max = max;
7999 }
8000 else
8001 *vr0max = vr1max;
8002 }
8003 else
8004 *vr0max = vr1max;
8005 }
8006 else
8007 {
8008 /* If the result can be an anti-range, create one. */
8009 if (TREE_CODE (vr1max) == INTEGER_CST
8010 && TREE_CODE (*vr0min) == INTEGER_CST
8011 && vrp_val_is_min (vr1min)
8012 && vrp_val_is_max (*vr0max))
8013 {
8014 tree min = int_const_binop (PLUS_EXPR,
8015 vr1max,
8016 build_int_cst (TREE_TYPE (vr1max), 1));
8017 tree max = int_const_binop (MINUS_EXPR,
8018 *vr0min,
8019 build_int_cst (TREE_TYPE (*vr0min), 1));
8020 if (!operand_less_p (max, min))
8021 {
8022 *vr0type = VR_ANTI_RANGE;
8023 *vr0min = min;
8024 *vr0max = max;
8025 }
8026 else
8027 *vr0min = vr1min;
8028 }
8029 else
8030 *vr0min = vr1min;
8031 }
8032 }
8033 else
8034 gcc_unreachable ();
8035 }
8036 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8037 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8038 {
8039 /* [ ( ) ] or [( ) ] or [ ( )] */
8040 if (*vr0type == VR_RANGE
8041 && vr1type == VR_RANGE)
8042 ;
8043 else if (*vr0type == VR_ANTI_RANGE
8044 && vr1type == VR_ANTI_RANGE)
8045 {
8046 *vr0type = vr1type;
8047 *vr0min = vr1min;
8048 *vr0max = vr1max;
8049 }
8050 else if (*vr0type == VR_ANTI_RANGE
8051 && vr1type == VR_RANGE)
8052 {
8053 /* Arbitrarily choose the right or left gap. */
8054 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
8055 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8056 build_int_cst (TREE_TYPE (vr1min), 1));
8057 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
8058 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8059 build_int_cst (TREE_TYPE (vr1max), 1));
8060 else
8061 goto give_up;
8062 }
8063 else if (*vr0type == VR_RANGE
8064 && vr1type == VR_ANTI_RANGE)
8065 /* The result covers everything. */
8066 goto give_up;
8067 else
8068 gcc_unreachable ();
8069 }
8070 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8071 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8072 {
8073 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8074 if (*vr0type == VR_RANGE
8075 && vr1type == VR_RANGE)
8076 {
8077 *vr0type = vr1type;
8078 *vr0min = vr1min;
8079 *vr0max = vr1max;
8080 }
8081 else if (*vr0type == VR_ANTI_RANGE
8082 && vr1type == VR_ANTI_RANGE)
8083 ;
8084 else if (*vr0type == VR_RANGE
8085 && vr1type == VR_ANTI_RANGE)
8086 {
8087 *vr0type = VR_ANTI_RANGE;
8088 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8089 {
8090 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8091 build_int_cst (TREE_TYPE (*vr0min), 1));
8092 *vr0min = vr1min;
8093 }
8094 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8095 {
8096 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8097 build_int_cst (TREE_TYPE (*vr0max), 1));
8098 *vr0max = vr1max;
8099 }
8100 else
8101 goto give_up;
8102 }
8103 else if (*vr0type == VR_ANTI_RANGE
8104 && vr1type == VR_RANGE)
8105 /* The result covers everything. */
8106 goto give_up;
8107 else
8108 gcc_unreachable ();
8109 }
8110 else if ((operand_less_p (vr1min, *vr0max) == 1
8111 || operand_equal_p (vr1min, *vr0max, 0))
8112 && operand_less_p (*vr0min, vr1min) == 1
8113 && operand_less_p (*vr0max, vr1max) == 1)
8114 {
8115 /* [ ( ] ) or [ ]( ) */
8116 if (*vr0type == VR_RANGE
8117 && vr1type == VR_RANGE)
8118 *vr0max = vr1max;
8119 else if (*vr0type == VR_ANTI_RANGE
8120 && vr1type == VR_ANTI_RANGE)
8121 *vr0min = vr1min;
8122 else if (*vr0type == VR_ANTI_RANGE
8123 && vr1type == VR_RANGE)
8124 {
8125 if (TREE_CODE (vr1min) == INTEGER_CST)
8126 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8127 build_int_cst (TREE_TYPE (vr1min), 1));
8128 else
8129 goto give_up;
8130 }
8131 else if (*vr0type == VR_RANGE
8132 && vr1type == VR_ANTI_RANGE)
8133 {
8134 if (TREE_CODE (*vr0max) == INTEGER_CST)
8135 {
8136 *vr0type = vr1type;
8137 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8138 build_int_cst (TREE_TYPE (*vr0max), 1));
8139 *vr0max = vr1max;
8140 }
8141 else
8142 goto give_up;
8143 }
8144 else
8145 gcc_unreachable ();
8146 }
8147 else if ((operand_less_p (*vr0min, vr1max) == 1
8148 || operand_equal_p (*vr0min, vr1max, 0))
8149 && operand_less_p (vr1min, *vr0min) == 1
8150 && operand_less_p (vr1max, *vr0max) == 1)
8151 {
8152 /* ( [ ) ] or ( )[ ] */
8153 if (*vr0type == VR_RANGE
8154 && vr1type == VR_RANGE)
8155 *vr0min = vr1min;
8156 else if (*vr0type == VR_ANTI_RANGE
8157 && vr1type == VR_ANTI_RANGE)
8158 *vr0max = vr1max;
8159 else if (*vr0type == VR_ANTI_RANGE
8160 && vr1type == VR_RANGE)
8161 {
8162 if (TREE_CODE (vr1max) == INTEGER_CST)
8163 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8164 build_int_cst (TREE_TYPE (vr1max), 1));
8165 else
8166 goto give_up;
8167 }
8168 else if (*vr0type == VR_RANGE
8169 && vr1type == VR_ANTI_RANGE)
8170 {
8171 if (TREE_CODE (*vr0min) == INTEGER_CST)
8172 {
8173 *vr0type = vr1type;
8174 *vr0min = vr1min;
8175 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8176 build_int_cst (TREE_TYPE (*vr0min), 1));
8177 }
8178 else
8179 goto give_up;
8180 }
8181 else
8182 gcc_unreachable ();
8183 }
8184 else
8185 goto give_up;
8186
8187 return;
8188
8189 give_up:
8190 *vr0type = VR_VARYING;
8191 *vr0min = NULL_TREE;
8192 *vr0max = NULL_TREE;
8193 }
8194
8195 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8196 { VR1TYPE, VR0MIN, VR0MAX } and store the result
8197 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
8198 possible such range. The resulting range is not canonicalized. */
8199
8200 static void
8201 intersect_ranges (enum value_range_type *vr0type,
8202 tree *vr0min, tree *vr0max,
8203 enum value_range_type vr1type,
8204 tree vr1min, tree vr1max)
8205 {
8206 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
8207 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
8208
8209 /* [] is vr0, () is vr1 in the following classification comments. */
8210 if (mineq && maxeq)
8211 {
8212 /* [( )] */
8213 if (*vr0type == vr1type)
8214 /* Nothing to do for equal ranges. */
8215 ;
8216 else if ((*vr0type == VR_RANGE
8217 && vr1type == VR_ANTI_RANGE)
8218 || (*vr0type == VR_ANTI_RANGE
8219 && vr1type == VR_RANGE))
8220 {
8221 /* For anti-range with range intersection the result is empty. */
8222 *vr0type = VR_UNDEFINED;
8223 *vr0min = NULL_TREE;
8224 *vr0max = NULL_TREE;
8225 }
8226 else
8227 gcc_unreachable ();
8228 }
8229 else if (operand_less_p (*vr0max, vr1min) == 1
8230 || operand_less_p (vr1max, *vr0min) == 1)
8231 {
8232 /* [ ] ( ) or ( ) [ ]
8233 If the ranges have an empty intersection, the result of the
8234 intersect operation is the range for intersecting an
8235 anti-range with a range or empty when intersecting two ranges. */
8236 if (*vr0type == VR_RANGE
8237 && vr1type == VR_ANTI_RANGE)
8238 ;
8239 else if (*vr0type == VR_ANTI_RANGE
8240 && vr1type == VR_RANGE)
8241 {
8242 *vr0type = vr1type;
8243 *vr0min = vr1min;
8244 *vr0max = vr1max;
8245 }
8246 else if (*vr0type == VR_RANGE
8247 && vr1type == VR_RANGE)
8248 {
8249 *vr0type = VR_UNDEFINED;
8250 *vr0min = NULL_TREE;
8251 *vr0max = NULL_TREE;
8252 }
8253 else if (*vr0type == VR_ANTI_RANGE
8254 && vr1type == VR_ANTI_RANGE)
8255 {
8256 /* If the anti-ranges are adjacent to each other merge them. */
8257 if (TREE_CODE (*vr0max) == INTEGER_CST
8258 && TREE_CODE (vr1min) == INTEGER_CST
8259 && operand_less_p (*vr0max, vr1min) == 1
8260 && integer_onep (int_const_binop (MINUS_EXPR,
8261 vr1min, *vr0max)))
8262 *vr0max = vr1max;
8263 else if (TREE_CODE (vr1max) == INTEGER_CST
8264 && TREE_CODE (*vr0min) == INTEGER_CST
8265 && operand_less_p (vr1max, *vr0min) == 1
8266 && integer_onep (int_const_binop (MINUS_EXPR,
8267 *vr0min, vr1max)))
8268 *vr0min = vr1min;
8269 /* Else arbitrarily take VR0. */
8270 }
8271 }
8272 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8273 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8274 {
8275 /* [ ( ) ] or [( ) ] or [ ( )] */
8276 if (*vr0type == VR_RANGE
8277 && vr1type == VR_RANGE)
8278 {
8279 /* If both are ranges the result is the inner one. */
8280 *vr0type = vr1type;
8281 *vr0min = vr1min;
8282 *vr0max = vr1max;
8283 }
8284 else if (*vr0type == VR_RANGE
8285 && vr1type == VR_ANTI_RANGE)
8286 {
8287 /* Choose the right gap if the left one is empty. */
8288 if (mineq)
8289 {
8290 if (TREE_CODE (vr1max) == INTEGER_CST)
8291 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8292 build_int_cst (TREE_TYPE (vr1max), 1));
8293 else
8294 *vr0min = vr1max;
8295 }
8296 /* Choose the left gap if the right one is empty. */
8297 else if (maxeq)
8298 {
8299 if (TREE_CODE (vr1min) == INTEGER_CST)
8300 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8301 build_int_cst (TREE_TYPE (vr1min), 1));
8302 else
8303 *vr0max = vr1min;
8304 }
8305 /* Choose the anti-range if the range is effectively varying. */
8306 else if (vrp_val_is_min (*vr0min)
8307 && vrp_val_is_max (*vr0max))
8308 {
8309 *vr0type = vr1type;
8310 *vr0min = vr1min;
8311 *vr0max = vr1max;
8312 }
8313 /* Else choose the range. */
8314 }
8315 else if (*vr0type == VR_ANTI_RANGE
8316 && vr1type == VR_ANTI_RANGE)
8317 /* If both are anti-ranges the result is the outer one. */
8318 ;
8319 else if (*vr0type == VR_ANTI_RANGE
8320 && vr1type == VR_RANGE)
8321 {
8322 /* The intersection is empty. */
8323 *vr0type = VR_UNDEFINED;
8324 *vr0min = NULL_TREE;
8325 *vr0max = NULL_TREE;
8326 }
8327 else
8328 gcc_unreachable ();
8329 }
8330 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8331 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8332 {
8333 /* ( [ ] ) or ([ ] ) or ( [ ]) */
8334 if (*vr0type == VR_RANGE
8335 && vr1type == VR_RANGE)
8336 /* Choose the inner range. */
8337 ;
8338 else if (*vr0type == VR_ANTI_RANGE
8339 && vr1type == VR_RANGE)
8340 {
8341 /* Choose the right gap if the left is empty. */
8342 if (mineq)
8343 {
8344 *vr0type = VR_RANGE;
8345 if (TREE_CODE (*vr0max) == INTEGER_CST)
8346 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8347 build_int_cst (TREE_TYPE (*vr0max), 1));
8348 else
8349 *vr0min = *vr0max;
8350 *vr0max = vr1max;
8351 }
8352 /* Choose the left gap if the right is empty. */
8353 else if (maxeq)
8354 {
8355 *vr0type = VR_RANGE;
8356 if (TREE_CODE (*vr0min) == INTEGER_CST)
8357 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8358 build_int_cst (TREE_TYPE (*vr0min), 1));
8359 else
8360 *vr0max = *vr0min;
8361 *vr0min = vr1min;
8362 }
8363 /* Choose the anti-range if the range is effectively varying. */
8364 else if (vrp_val_is_min (vr1min)
8365 && vrp_val_is_max (vr1max))
8366 ;
8367 /* Else choose the range. */
8368 else
8369 {
8370 *vr0type = vr1type;
8371 *vr0min = vr1min;
8372 *vr0max = vr1max;
8373 }
8374 }
8375 else if (*vr0type == VR_ANTI_RANGE
8376 && vr1type == VR_ANTI_RANGE)
8377 {
8378 /* If both are anti-ranges the result is the outer one. */
8379 *vr0type = vr1type;
8380 *vr0min = vr1min;
8381 *vr0max = vr1max;
8382 }
8383 else if (vr1type == VR_ANTI_RANGE
8384 && *vr0type == VR_RANGE)
8385 {
8386 /* The intersection is empty. */
8387 *vr0type = VR_UNDEFINED;
8388 *vr0min = NULL_TREE;
8389 *vr0max = NULL_TREE;
8390 }
8391 else
8392 gcc_unreachable ();
8393 }
8394 else if ((operand_less_p (vr1min, *vr0max) == 1
8395 || operand_equal_p (vr1min, *vr0max, 0))
8396 && operand_less_p (*vr0min, vr1min) == 1)
8397 {
8398 /* [ ( ] ) or [ ]( ) */
8399 if (*vr0type == VR_ANTI_RANGE
8400 && vr1type == VR_ANTI_RANGE)
8401 *vr0max = vr1max;
8402 else if (*vr0type == VR_RANGE
8403 && vr1type == VR_RANGE)
8404 *vr0min = vr1min;
8405 else if (*vr0type == VR_RANGE
8406 && vr1type == VR_ANTI_RANGE)
8407 {
8408 if (TREE_CODE (vr1min) == INTEGER_CST)
8409 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8410 build_int_cst (TREE_TYPE (vr1min), 1));
8411 else
8412 *vr0max = vr1min;
8413 }
8414 else if (*vr0type == VR_ANTI_RANGE
8415 && vr1type == VR_RANGE)
8416 {
8417 *vr0type = VR_RANGE;
8418 if (TREE_CODE (*vr0max) == INTEGER_CST)
8419 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8420 build_int_cst (TREE_TYPE (*vr0max), 1));
8421 else
8422 *vr0min = *vr0max;
8423 *vr0max = vr1max;
8424 }
8425 else
8426 gcc_unreachable ();
8427 }
8428 else if ((operand_less_p (*vr0min, vr1max) == 1
8429 || operand_equal_p (*vr0min, vr1max, 0))
8430 && operand_less_p (vr1min, *vr0min) == 1)
8431 {
8432 /* ( [ ) ] or ( )[ ] */
8433 if (*vr0type == VR_ANTI_RANGE
8434 && vr1type == VR_ANTI_RANGE)
8435 *vr0min = vr1min;
8436 else if (*vr0type == VR_RANGE
8437 && vr1type == VR_RANGE)
8438 *vr0max = vr1max;
8439 else if (*vr0type == VR_RANGE
8440 && vr1type == VR_ANTI_RANGE)
8441 {
8442 if (TREE_CODE (vr1max) == INTEGER_CST)
8443 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8444 build_int_cst (TREE_TYPE (vr1max), 1));
8445 else
8446 *vr0min = vr1max;
8447 }
8448 else if (*vr0type == VR_ANTI_RANGE
8449 && vr1type == VR_RANGE)
8450 {
8451 *vr0type = VR_RANGE;
8452 if (TREE_CODE (*vr0min) == INTEGER_CST)
8453 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8454 build_int_cst (TREE_TYPE (*vr0min), 1));
8455 else
8456 *vr0max = *vr0min;
8457 *vr0min = vr1min;
8458 }
8459 else
8460 gcc_unreachable ();
8461 }
8462
8463 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8464 result for the intersection. That's always a conservative
8465 correct estimate. */
8466
8467 return;
8468 }
8469
8470
8471 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8472 in *VR0. This may not be the smallest possible such range. */
8473
8474 static void
8475 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
8476 {
8477 value_range saved;
8478
8479 /* If either range is VR_VARYING the other one wins. */
8480 if (vr1->type == VR_VARYING)
8481 return;
8482 if (vr0->type == VR_VARYING)
8483 {
8484 copy_value_range (vr0, vr1);
8485 return;
8486 }
8487
8488 /* When either range is VR_UNDEFINED the resulting range is
8489 VR_UNDEFINED, too. */
8490 if (vr0->type == VR_UNDEFINED)
8491 return;
8492 if (vr1->type == VR_UNDEFINED)
8493 {
8494 set_value_range_to_undefined (vr0);
8495 return;
8496 }
8497
8498 /* Save the original vr0 so we can return it as conservative intersection
8499 result when our worker turns things to varying. */
8500 saved = *vr0;
8501 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8502 vr1->type, vr1->min, vr1->max);
8503 /* Make sure to canonicalize the result though as the inversion of a
8504 VR_RANGE can still be a VR_RANGE. */
8505 set_and_canonicalize_value_range (vr0, vr0->type,
8506 vr0->min, vr0->max, vr0->equiv);
8507 /* If that failed, use the saved original VR0. */
8508 if (vr0->type == VR_VARYING)
8509 {
8510 *vr0 = saved;
8511 return;
8512 }
8513 /* If the result is VR_UNDEFINED there is no need to mess with
8514 the equivalencies. */
8515 if (vr0->type == VR_UNDEFINED)
8516 return;
8517
8518 /* The resulting set of equivalences for range intersection is the union of
8519 the two sets. */
8520 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8521 bitmap_ior_into (vr0->equiv, vr1->equiv);
8522 else if (vr1->equiv && !vr0->equiv)
8523 bitmap_copy (vr0->equiv, vr1->equiv);
8524 }
8525
8526 static void
8527 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
8528 {
8529 if (dump_file && (dump_flags & TDF_DETAILS))
8530 {
8531 fprintf (dump_file, "Intersecting\n ");
8532 dump_value_range (dump_file, vr0);
8533 fprintf (dump_file, "\nand\n ");
8534 dump_value_range (dump_file, vr1);
8535 fprintf (dump_file, "\n");
8536 }
8537 vrp_intersect_ranges_1 (vr0, vr1);
8538 if (dump_file && (dump_flags & TDF_DETAILS))
8539 {
8540 fprintf (dump_file, "to\n ");
8541 dump_value_range (dump_file, vr0);
8542 fprintf (dump_file, "\n");
8543 }
8544 }
8545
8546 /* Meet operation for value ranges. Given two value ranges VR0 and
8547 VR1, store in VR0 a range that contains both VR0 and VR1. This
8548 may not be the smallest possible such range. */
8549
8550 static void
8551 vrp_meet_1 (value_range *vr0, value_range *vr1)
8552 {
8553 value_range saved;
8554
8555 if (vr0->type == VR_UNDEFINED)
8556 {
8557 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8558 return;
8559 }
8560
8561 if (vr1->type == VR_UNDEFINED)
8562 {
8563 /* VR0 already has the resulting range. */
8564 return;
8565 }
8566
8567 if (vr0->type == VR_VARYING)
8568 {
8569 /* Nothing to do. VR0 already has the resulting range. */
8570 return;
8571 }
8572
8573 if (vr1->type == VR_VARYING)
8574 {
8575 set_value_range_to_varying (vr0);
8576 return;
8577 }
8578
8579 saved = *vr0;
8580 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8581 vr1->type, vr1->min, vr1->max);
8582 if (vr0->type == VR_VARYING)
8583 {
8584 /* Failed to find an efficient meet. Before giving up and setting
8585 the result to VARYING, see if we can at least derive a useful
8586 anti-range. FIXME, all this nonsense about distinguishing
8587 anti-ranges from ranges is necessary because of the odd
8588 semantics of range_includes_zero_p and friends. */
8589 if (((saved.type == VR_RANGE
8590 && range_includes_zero_p (saved.min, saved.max) == 0)
8591 || (saved.type == VR_ANTI_RANGE
8592 && range_includes_zero_p (saved.min, saved.max) == 1))
8593 && ((vr1->type == VR_RANGE
8594 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8595 || (vr1->type == VR_ANTI_RANGE
8596 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8597 {
8598 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8599
8600 /* Since this meet operation did not result from the meeting of
8601 two equivalent names, VR0 cannot have any equivalences. */
8602 if (vr0->equiv)
8603 bitmap_clear (vr0->equiv);
8604 return;
8605 }
8606
8607 set_value_range_to_varying (vr0);
8608 return;
8609 }
8610 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8611 vr0->equiv);
8612 if (vr0->type == VR_VARYING)
8613 return;
8614
8615 /* The resulting set of equivalences is always the intersection of
8616 the two sets. */
8617 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8618 bitmap_and_into (vr0->equiv, vr1->equiv);
8619 else if (vr0->equiv && !vr1->equiv)
8620 bitmap_clear (vr0->equiv);
8621 }
8622
8623 static void
8624 vrp_meet (value_range *vr0, value_range *vr1)
8625 {
8626 if (dump_file && (dump_flags & TDF_DETAILS))
8627 {
8628 fprintf (dump_file, "Meeting\n ");
8629 dump_value_range (dump_file, vr0);
8630 fprintf (dump_file, "\nand\n ");
8631 dump_value_range (dump_file, vr1);
8632 fprintf (dump_file, "\n");
8633 }
8634 vrp_meet_1 (vr0, vr1);
8635 if (dump_file && (dump_flags & TDF_DETAILS))
8636 {
8637 fprintf (dump_file, "to\n ");
8638 dump_value_range (dump_file, vr0);
8639 fprintf (dump_file, "\n");
8640 }
8641 }
8642
8643
8644 /* Visit all arguments for PHI node PHI that flow through executable
8645 edges. If a valid value range can be derived from all the incoming
8646 value ranges, set a new range for the LHS of PHI. */
8647
8648 static enum ssa_prop_result
8649 vrp_visit_phi_node (gphi *phi)
8650 {
8651 size_t i;
8652 tree lhs = PHI_RESULT (phi);
8653 value_range *lhs_vr = get_value_range (lhs);
8654 value_range vr_result = VR_INITIALIZER;
8655 bool first = true;
8656 int edges, old_edges;
8657 struct loop *l;
8658
8659 if (dump_file && (dump_flags & TDF_DETAILS))
8660 {
8661 fprintf (dump_file, "\nVisiting PHI node: ");
8662 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8663 }
8664
8665 edges = 0;
8666 for (i = 0; i < gimple_phi_num_args (phi); i++)
8667 {
8668 edge e = gimple_phi_arg_edge (phi, i);
8669
8670 if (dump_file && (dump_flags & TDF_DETAILS))
8671 {
8672 fprintf (dump_file,
8673 " Argument #%d (%d -> %d %sexecutable)\n",
8674 (int) i, e->src->index, e->dest->index,
8675 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8676 }
8677
8678 if (e->flags & EDGE_EXECUTABLE)
8679 {
8680 tree arg = PHI_ARG_DEF (phi, i);
8681 value_range vr_arg;
8682
8683 ++edges;
8684
8685 if (TREE_CODE (arg) == SSA_NAME)
8686 {
8687 vr_arg = *(get_value_range (arg));
8688 /* Do not allow equivalences or symbolic ranges to leak in from
8689 backedges. That creates invalid equivalencies.
8690 See PR53465 and PR54767. */
8691 if (e->flags & EDGE_DFS_BACK)
8692 {
8693 if (vr_arg.type == VR_RANGE
8694 || vr_arg.type == VR_ANTI_RANGE)
8695 {
8696 vr_arg.equiv = NULL;
8697 if (symbolic_range_p (&vr_arg))
8698 {
8699 vr_arg.type = VR_VARYING;
8700 vr_arg.min = NULL_TREE;
8701 vr_arg.max = NULL_TREE;
8702 }
8703 }
8704 }
8705 else
8706 {
8707 /* If the non-backedge arguments range is VR_VARYING then
8708 we can still try recording a simple equivalence. */
8709 if (vr_arg.type == VR_VARYING)
8710 {
8711 vr_arg.type = VR_RANGE;
8712 vr_arg.min = arg;
8713 vr_arg.max = arg;
8714 vr_arg.equiv = NULL;
8715 }
8716 }
8717 }
8718 else
8719 {
8720 if (TREE_OVERFLOW_P (arg))
8721 arg = drop_tree_overflow (arg);
8722
8723 vr_arg.type = VR_RANGE;
8724 vr_arg.min = arg;
8725 vr_arg.max = arg;
8726 vr_arg.equiv = NULL;
8727 }
8728
8729 if (dump_file && (dump_flags & TDF_DETAILS))
8730 {
8731 fprintf (dump_file, "\t");
8732 print_generic_expr (dump_file, arg, dump_flags);
8733 fprintf (dump_file, ": ");
8734 dump_value_range (dump_file, &vr_arg);
8735 fprintf (dump_file, "\n");
8736 }
8737
8738 if (first)
8739 copy_value_range (&vr_result, &vr_arg);
8740 else
8741 vrp_meet (&vr_result, &vr_arg);
8742 first = false;
8743
8744 if (vr_result.type == VR_VARYING)
8745 break;
8746 }
8747 }
8748
8749 if (vr_result.type == VR_VARYING)
8750 goto varying;
8751 else if (vr_result.type == VR_UNDEFINED)
8752 goto update_range;
8753
8754 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8755 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8756
8757 /* To prevent infinite iterations in the algorithm, derive ranges
8758 when the new value is slightly bigger or smaller than the
8759 previous one. We don't do this if we have seen a new executable
8760 edge; this helps us avoid an overflow infinity for conditionals
8761 which are not in a loop. If the old value-range was VR_UNDEFINED
8762 use the updated range and iterate one more time. */
8763 if (edges > 0
8764 && gimple_phi_num_args (phi) > 1
8765 && edges == old_edges
8766 && lhs_vr->type != VR_UNDEFINED)
8767 {
8768 /* Compare old and new ranges, fall back to varying if the
8769 values are not comparable. */
8770 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8771 if (cmp_min == -2)
8772 goto varying;
8773 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8774 if (cmp_max == -2)
8775 goto varying;
8776
8777 /* For non VR_RANGE or for pointers fall back to varying if
8778 the range changed. */
8779 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8780 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8781 && (cmp_min != 0 || cmp_max != 0))
8782 goto varying;
8783
8784 /* If the new minimum is larger than the previous one
8785 retain the old value. If the new minimum value is smaller
8786 than the previous one and not -INF go all the way to -INF + 1.
8787 In the first case, to avoid infinite bouncing between different
8788 minimums, and in the other case to avoid iterating millions of
8789 times to reach -INF. Going to -INF + 1 also lets the following
8790 iteration compute whether there will be any overflow, at the
8791 expense of one additional iteration. */
8792 if (cmp_min < 0)
8793 vr_result.min = lhs_vr->min;
8794 else if (cmp_min > 0
8795 && !vrp_val_is_min (vr_result.min))
8796 vr_result.min
8797 = int_const_binop (PLUS_EXPR,
8798 vrp_val_min (TREE_TYPE (vr_result.min)),
8799 build_int_cst (TREE_TYPE (vr_result.min), 1));
8800
8801 /* Similarly for the maximum value. */
8802 if (cmp_max > 0)
8803 vr_result.max = lhs_vr->max;
8804 else if (cmp_max < 0
8805 && !vrp_val_is_max (vr_result.max))
8806 vr_result.max
8807 = int_const_binop (MINUS_EXPR,
8808 vrp_val_max (TREE_TYPE (vr_result.min)),
8809 build_int_cst (TREE_TYPE (vr_result.min), 1));
8810
8811 /* If we dropped either bound to +-INF then if this is a loop
8812 PHI node SCEV may known more about its value-range. */
8813 if ((cmp_min > 0 || cmp_min < 0
8814 || cmp_max < 0 || cmp_max > 0)
8815 && (l = loop_containing_stmt (phi))
8816 && l->header == gimple_bb (phi))
8817 adjust_range_with_scev (&vr_result, l, phi, lhs);
8818
8819 /* If we will end up with a (-INF, +INF) range, set it to
8820 VARYING. Same if the previous max value was invalid for
8821 the type and we end up with vr_result.min > vr_result.max. */
8822 if ((vrp_val_is_max (vr_result.max)
8823 && vrp_val_is_min (vr_result.min))
8824 || compare_values (vr_result.min,
8825 vr_result.max) > 0)
8826 goto varying;
8827 }
8828
8829 /* If the new range is different than the previous value, keep
8830 iterating. */
8831 update_range:
8832 if (update_value_range (lhs, &vr_result))
8833 {
8834 if (dump_file && (dump_flags & TDF_DETAILS))
8835 {
8836 fprintf (dump_file, "Found new range for ");
8837 print_generic_expr (dump_file, lhs, 0);
8838 fprintf (dump_file, ": ");
8839 dump_value_range (dump_file, &vr_result);
8840 fprintf (dump_file, "\n");
8841 }
8842
8843 if (vr_result.type == VR_VARYING)
8844 return SSA_PROP_VARYING;
8845
8846 return SSA_PROP_INTERESTING;
8847 }
8848
8849 /* Nothing changed, don't add outgoing edges. */
8850 return SSA_PROP_NOT_INTERESTING;
8851
8852 /* No match found. Set the LHS to VARYING. */
8853 varying:
8854 set_value_range_to_varying (lhs_vr);
8855 return SSA_PROP_VARYING;
8856 }
8857
8858 /* Simplify boolean operations if the source is known
8859 to be already a boolean. */
8860 static bool
8861 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
8862 {
8863 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8864 tree lhs, op0, op1;
8865 bool need_conversion;
8866
8867 /* We handle only !=/== case here. */
8868 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8869
8870 op0 = gimple_assign_rhs1 (stmt);
8871 if (!op_with_boolean_value_range_p (op0))
8872 return false;
8873
8874 op1 = gimple_assign_rhs2 (stmt);
8875 if (!op_with_boolean_value_range_p (op1))
8876 return false;
8877
8878 /* Reduce number of cases to handle to NE_EXPR. As there is no
8879 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8880 if (rhs_code == EQ_EXPR)
8881 {
8882 if (TREE_CODE (op1) == INTEGER_CST)
8883 op1 = int_const_binop (BIT_XOR_EXPR, op1,
8884 build_int_cst (TREE_TYPE (op1), 1));
8885 else
8886 return false;
8887 }
8888
8889 lhs = gimple_assign_lhs (stmt);
8890 need_conversion
8891 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8892
8893 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8894 if (need_conversion
8895 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8896 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8897 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8898 return false;
8899
8900 /* For A != 0 we can substitute A itself. */
8901 if (integer_zerop (op1))
8902 gimple_assign_set_rhs_with_ops (gsi,
8903 need_conversion
8904 ? NOP_EXPR : TREE_CODE (op0), op0);
8905 /* For A != B we substitute A ^ B. Either with conversion. */
8906 else if (need_conversion)
8907 {
8908 tree tem = make_ssa_name (TREE_TYPE (op0));
8909 gassign *newop
8910 = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
8911 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8912 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
8913 }
8914 /* Or without. */
8915 else
8916 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8917 update_stmt (gsi_stmt (*gsi));
8918
8919 return true;
8920 }
8921
8922 /* Simplify a division or modulo operator to a right shift or
8923 bitwise and if the first operand is unsigned or is greater
8924 than zero and the second operand is an exact power of two.
8925 For TRUNC_MOD_EXPR op0 % op1 with constant op1, optimize it
8926 into just op0 if op0's range is known to be a subset of
8927 [-op1 + 1, op1 - 1] for signed and [0, op1 - 1] for unsigned
8928 modulo. */
8929
8930 static bool
8931 simplify_div_or_mod_using_ranges (gimple *stmt)
8932 {
8933 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8934 tree val = NULL;
8935 tree op0 = gimple_assign_rhs1 (stmt);
8936 tree op1 = gimple_assign_rhs2 (stmt);
8937 value_range *vr = get_value_range (op0);
8938
8939 if (rhs_code == TRUNC_MOD_EXPR
8940 && TREE_CODE (op1) == INTEGER_CST
8941 && tree_int_cst_sgn (op1) == 1
8942 && range_int_cst_p (vr)
8943 && tree_int_cst_lt (vr->max, op1))
8944 {
8945 if (TYPE_UNSIGNED (TREE_TYPE (op0))
8946 || tree_int_cst_sgn (vr->min) >= 0
8947 || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1), op1),
8948 vr->min))
8949 {
8950 /* If op0 already has the range op0 % op1 has,
8951 then TRUNC_MOD_EXPR won't change anything. */
8952 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
8953 gimple_assign_set_rhs_from_tree (&gsi, op0);
8954 update_stmt (stmt);
8955 return true;
8956 }
8957 }
8958
8959 if (!integer_pow2p (op1))
8960 return false;
8961
8962 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8963 {
8964 val = integer_one_node;
8965 }
8966 else
8967 {
8968 bool sop = false;
8969
8970 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8971
8972 if (val
8973 && sop
8974 && integer_onep (val)
8975 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8976 {
8977 location_t location;
8978
8979 if (!gimple_has_location (stmt))
8980 location = input_location;
8981 else
8982 location = gimple_location (stmt);
8983 warning_at (location, OPT_Wstrict_overflow,
8984 "assuming signed overflow does not occur when "
8985 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8986 }
8987 }
8988
8989 if (val && integer_onep (val))
8990 {
8991 tree t;
8992
8993 if (rhs_code == TRUNC_DIV_EXPR)
8994 {
8995 t = build_int_cst (integer_type_node, tree_log2 (op1));
8996 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8997 gimple_assign_set_rhs1 (stmt, op0);
8998 gimple_assign_set_rhs2 (stmt, t);
8999 }
9000 else
9001 {
9002 t = build_int_cst (TREE_TYPE (op1), 1);
9003 t = int_const_binop (MINUS_EXPR, op1, t);
9004 t = fold_convert (TREE_TYPE (op0), t);
9005
9006 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9007 gimple_assign_set_rhs1 (stmt, op0);
9008 gimple_assign_set_rhs2 (stmt, t);
9009 }
9010
9011 update_stmt (stmt);
9012 return true;
9013 }
9014
9015 return false;
9016 }
9017
9018 /* Simplify a min or max if the ranges of the two operands are
9019 disjoint. Return true if we do simplify. */
9020
9021 static bool
9022 simplify_min_or_max_using_ranges (gimple *stmt)
9023 {
9024 tree op0 = gimple_assign_rhs1 (stmt);
9025 tree op1 = gimple_assign_rhs2 (stmt);
9026 bool sop = false;
9027 tree val;
9028
9029 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9030 (LE_EXPR, op0, op1, &sop));
9031 if (!val)
9032 {
9033 sop = false;
9034 val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9035 (LT_EXPR, op0, op1, &sop));
9036 }
9037
9038 if (val)
9039 {
9040 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9041 {
9042 location_t location;
9043
9044 if (!gimple_has_location (stmt))
9045 location = input_location;
9046 else
9047 location = gimple_location (stmt);
9048 warning_at (location, OPT_Wstrict_overflow,
9049 "assuming signed overflow does not occur when "
9050 "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9051 }
9052
9053 /* VAL == TRUE -> OP0 < or <= op1
9054 VAL == FALSE -> OP0 > or >= op1. */
9055 tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9056 == integer_zerop (val)) ? op0 : op1;
9057 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
9058 gimple_assign_set_rhs_from_tree (&gsi, res);
9059 update_stmt (stmt);
9060 return true;
9061 }
9062
9063 return false;
9064 }
9065
9066 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
9067 ABS_EXPR. If the operand is <= 0, then simplify the
9068 ABS_EXPR into a NEGATE_EXPR. */
9069
9070 static bool
9071 simplify_abs_using_ranges (gimple *stmt)
9072 {
9073 tree op = gimple_assign_rhs1 (stmt);
9074 value_range *vr = get_value_range (op);
9075
9076 if (vr)
9077 {
9078 tree val = NULL;
9079 bool sop = false;
9080
9081 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
9082 if (!val)
9083 {
9084 /* The range is neither <= 0 nor > 0. Now see if it is
9085 either < 0 or >= 0. */
9086 sop = false;
9087 val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
9088 &sop);
9089 }
9090
9091 if (val)
9092 {
9093 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9094 {
9095 location_t location;
9096
9097 if (!gimple_has_location (stmt))
9098 location = input_location;
9099 else
9100 location = gimple_location (stmt);
9101 warning_at (location, OPT_Wstrict_overflow,
9102 "assuming signed overflow does not occur when "
9103 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
9104 }
9105
9106 gimple_assign_set_rhs1 (stmt, op);
9107 if (integer_zerop (val))
9108 gimple_assign_set_rhs_code (stmt, SSA_NAME);
9109 else
9110 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
9111 update_stmt (stmt);
9112 return true;
9113 }
9114 }
9115
9116 return false;
9117 }
9118
9119 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9120 If all the bits that are being cleared by & are already
9121 known to be zero from VR, or all the bits that are being
9122 set by | are already known to be one from VR, the bit
9123 operation is redundant. */
9124
9125 static bool
9126 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9127 {
9128 tree op0 = gimple_assign_rhs1 (stmt);
9129 tree op1 = gimple_assign_rhs2 (stmt);
9130 tree op = NULL_TREE;
9131 value_range vr0 = VR_INITIALIZER;
9132 value_range vr1 = VR_INITIALIZER;
9133 wide_int may_be_nonzero0, may_be_nonzero1;
9134 wide_int must_be_nonzero0, must_be_nonzero1;
9135 wide_int mask;
9136
9137 if (TREE_CODE (op0) == SSA_NAME)
9138 vr0 = *(get_value_range (op0));
9139 else if (is_gimple_min_invariant (op0))
9140 set_value_range_to_value (&vr0, op0, NULL);
9141 else
9142 return false;
9143
9144 if (TREE_CODE (op1) == SSA_NAME)
9145 vr1 = *(get_value_range (op1));
9146 else if (is_gimple_min_invariant (op1))
9147 set_value_range_to_value (&vr1, op1, NULL);
9148 else
9149 return false;
9150
9151 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9152 &must_be_nonzero0))
9153 return false;
9154 if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9155 &must_be_nonzero1))
9156 return false;
9157
9158 switch (gimple_assign_rhs_code (stmt))
9159 {
9160 case BIT_AND_EXPR:
9161 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9162 if (mask == 0)
9163 {
9164 op = op0;
9165 break;
9166 }
9167 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9168 if (mask == 0)
9169 {
9170 op = op1;
9171 break;
9172 }
9173 break;
9174 case BIT_IOR_EXPR:
9175 mask = may_be_nonzero0.and_not (must_be_nonzero1);
9176 if (mask == 0)
9177 {
9178 op = op1;
9179 break;
9180 }
9181 mask = may_be_nonzero1.and_not (must_be_nonzero0);
9182 if (mask == 0)
9183 {
9184 op = op0;
9185 break;
9186 }
9187 break;
9188 default:
9189 gcc_unreachable ();
9190 }
9191
9192 if (op == NULL_TREE)
9193 return false;
9194
9195 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
9196 update_stmt (gsi_stmt (*gsi));
9197 return true;
9198 }
9199
9200 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
9201 a known value range VR.
9202
9203 If there is one and only one value which will satisfy the
9204 conditional, then return that value. Else return NULL.
9205
9206 If signed overflow must be undefined for the value to satisfy
9207 the conditional, then set *STRICT_OVERFLOW_P to true. */
9208
9209 static tree
9210 test_for_singularity (enum tree_code cond_code, tree op0,
9211 tree op1, value_range *vr,
9212 bool *strict_overflow_p)
9213 {
9214 tree min = NULL;
9215 tree max = NULL;
9216
9217 /* Extract minimum/maximum values which satisfy the
9218 the conditional as it was written. */
9219 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9220 {
9221 /* This should not be negative infinity; there is no overflow
9222 here. */
9223 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9224
9225 max = op1;
9226 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
9227 {
9228 tree one = build_int_cst (TREE_TYPE (op0), 1);
9229 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
9230 if (EXPR_P (max))
9231 TREE_NO_WARNING (max) = 1;
9232 }
9233 }
9234 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9235 {
9236 /* This should not be positive infinity; there is no overflow
9237 here. */
9238 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9239
9240 min = op1;
9241 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
9242 {
9243 tree one = build_int_cst (TREE_TYPE (op0), 1);
9244 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
9245 if (EXPR_P (min))
9246 TREE_NO_WARNING (min) = 1;
9247 }
9248 }
9249
9250 /* Now refine the minimum and maximum values using any
9251 value range information we have for op0. */
9252 if (min && max)
9253 {
9254 if (compare_values (vr->min, min) == 1)
9255 min = vr->min;
9256 if (compare_values (vr->max, max) == -1)
9257 max = vr->max;
9258
9259 /* If the new min/max values have converged to a single value,
9260 then there is only one value which can satisfy the condition,
9261 return that value. */
9262 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
9263 {
9264 if ((cond_code == LE_EXPR || cond_code == LT_EXPR)
9265 && is_overflow_infinity (vr->max))
9266 *strict_overflow_p = true;
9267 if ((cond_code == GE_EXPR || cond_code == GT_EXPR)
9268 && is_overflow_infinity (vr->min))
9269 *strict_overflow_p = true;
9270
9271 return min;
9272 }
9273 }
9274 return NULL;
9275 }
9276
9277 /* Return whether the value range *VR fits in an integer type specified
9278 by PRECISION and UNSIGNED_P. */
9279
9280 static bool
9281 range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
9282 {
9283 tree src_type;
9284 unsigned src_precision;
9285 widest_int tem;
9286 signop src_sgn;
9287
9288 /* We can only handle integral and pointer types. */
9289 src_type = TREE_TYPE (vr->min);
9290 if (!INTEGRAL_TYPE_P (src_type)
9291 && !POINTER_TYPE_P (src_type))
9292 return false;
9293
9294 /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
9295 and so is an identity transform. */
9296 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
9297 src_sgn = TYPE_SIGN (src_type);
9298 if ((src_precision < dest_precision
9299 && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9300 || (src_precision == dest_precision && src_sgn == dest_sgn))
9301 return true;
9302
9303 /* Now we can only handle ranges with constant bounds. */
9304 if (vr->type != VR_RANGE
9305 || TREE_CODE (vr->min) != INTEGER_CST
9306 || TREE_CODE (vr->max) != INTEGER_CST)
9307 return false;
9308
9309 /* For sign changes, the MSB of the wide_int has to be clear.
9310 An unsigned value with its MSB set cannot be represented by
9311 a signed wide_int, while a negative value cannot be represented
9312 by an unsigned wide_int. */
9313 if (src_sgn != dest_sgn
9314 && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
9315 return false;
9316
9317 /* Then we can perform the conversion on both ends and compare
9318 the result for equality. */
9319 tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9320 if (tem != wi::to_widest (vr->min))
9321 return false;
9322 tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9323 if (tem != wi::to_widest (vr->max))
9324 return false;
9325
9326 return true;
9327 }
9328
9329 /* Simplify a conditional using a relational operator to an equality
9330 test if the range information indicates only one value can satisfy
9331 the original conditional. */
9332
9333 static bool
9334 simplify_cond_using_ranges (gcond *stmt)
9335 {
9336 tree op0 = gimple_cond_lhs (stmt);
9337 tree op1 = gimple_cond_rhs (stmt);
9338 enum tree_code cond_code = gimple_cond_code (stmt);
9339
9340 if (cond_code != NE_EXPR
9341 && cond_code != EQ_EXPR
9342 && TREE_CODE (op0) == SSA_NAME
9343 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9344 && is_gimple_min_invariant (op1))
9345 {
9346 value_range *vr = get_value_range (op0);
9347
9348 /* If we have range information for OP0, then we might be
9349 able to simplify this conditional. */
9350 if (vr->type == VR_RANGE)
9351 {
9352 enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
9353 bool sop = false;
9354 tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop);
9355
9356 if (new_tree
9357 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9358 {
9359 if (dump_file)
9360 {
9361 fprintf (dump_file, "Simplified relational ");
9362 print_gimple_stmt (dump_file, stmt, 0, 0);
9363 fprintf (dump_file, " into ");
9364 }
9365
9366 gimple_cond_set_code (stmt, EQ_EXPR);
9367 gimple_cond_set_lhs (stmt, op0);
9368 gimple_cond_set_rhs (stmt, new_tree);
9369
9370 update_stmt (stmt);
9371
9372 if (dump_file)
9373 {
9374 print_gimple_stmt (dump_file, stmt, 0, 0);
9375 fprintf (dump_file, "\n");
9376 }
9377
9378 if (sop && issue_strict_overflow_warning (wc))
9379 {
9380 location_t location = input_location;
9381 if (gimple_has_location (stmt))
9382 location = gimple_location (stmt);
9383
9384 warning_at (location, OPT_Wstrict_overflow,
9385 "assuming signed overflow does not occur when "
9386 "simplifying conditional");
9387 }
9388
9389 return true;
9390 }
9391
9392 /* Try again after inverting the condition. We only deal
9393 with integral types here, so no need to worry about
9394 issues with inverting FP comparisons. */
9395 sop = false;
9396 new_tree = test_for_singularity
9397 (invert_tree_comparison (cond_code, false),
9398 op0, op1, vr, &sop);
9399
9400 if (new_tree
9401 && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9402 {
9403 if (dump_file)
9404 {
9405 fprintf (dump_file, "Simplified relational ");
9406 print_gimple_stmt (dump_file, stmt, 0, 0);
9407 fprintf (dump_file, " into ");
9408 }
9409
9410 gimple_cond_set_code (stmt, NE_EXPR);
9411 gimple_cond_set_lhs (stmt, op0);
9412 gimple_cond_set_rhs (stmt, new_tree);
9413
9414 update_stmt (stmt);
9415
9416 if (dump_file)
9417 {
9418 print_gimple_stmt (dump_file, stmt, 0, 0);
9419 fprintf (dump_file, "\n");
9420 }
9421
9422 if (sop && issue_strict_overflow_warning (wc))
9423 {
9424 location_t location = input_location;
9425 if (gimple_has_location (stmt))
9426 location = gimple_location (stmt);
9427
9428 warning_at (location, OPT_Wstrict_overflow,
9429 "assuming signed overflow does not occur when "
9430 "simplifying conditional");
9431 }
9432
9433 return true;
9434 }
9435 }
9436 }
9437
9438 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9439 see if OP0 was set by a type conversion where the source of
9440 the conversion is another SSA_NAME with a range that fits
9441 into the range of OP0's type.
9442
9443 If so, the conversion is redundant as the earlier SSA_NAME can be
9444 used for the comparison directly if we just massage the constant in the
9445 comparison. */
9446 if (TREE_CODE (op0) == SSA_NAME
9447 && TREE_CODE (op1) == INTEGER_CST)
9448 {
9449 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
9450 tree innerop;
9451
9452 if (!is_gimple_assign (def_stmt)
9453 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9454 return false;
9455
9456 innerop = gimple_assign_rhs1 (def_stmt);
9457
9458 if (TREE_CODE (innerop) == SSA_NAME
9459 && !POINTER_TYPE_P (TREE_TYPE (innerop)))
9460 {
9461 value_range *vr = get_value_range (innerop);
9462
9463 if (range_int_cst_p (vr)
9464 && range_fits_type_p (vr,
9465 TYPE_PRECISION (TREE_TYPE (op0)),
9466 TYPE_SIGN (TREE_TYPE (op0)))
9467 && int_fits_type_p (op1, TREE_TYPE (innerop))
9468 /* The range must not have overflowed, or if it did overflow
9469 we must not be wrapping/trapping overflow and optimizing
9470 with strict overflow semantics. */
9471 && ((!is_negative_overflow_infinity (vr->min)
9472 && !is_positive_overflow_infinity (vr->max))
9473 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
9474 {
9475 /* If the range overflowed and the user has asked for warnings
9476 when strict overflow semantics were used to optimize code,
9477 issue an appropriate warning. */
9478 if (cond_code != EQ_EXPR && cond_code != NE_EXPR
9479 && (is_negative_overflow_infinity (vr->min)
9480 || is_positive_overflow_infinity (vr->max))
9481 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
9482 {
9483 location_t location;
9484
9485 if (!gimple_has_location (stmt))
9486 location = input_location;
9487 else
9488 location = gimple_location (stmt);
9489 warning_at (location, OPT_Wstrict_overflow,
9490 "assuming signed overflow does not occur when "
9491 "simplifying conditional");
9492 }
9493
9494 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
9495 gimple_cond_set_lhs (stmt, innerop);
9496 gimple_cond_set_rhs (stmt, newconst);
9497 return true;
9498 }
9499 }
9500 }
9501
9502 return false;
9503 }
9504
9505 /* Simplify a switch statement using the value range of the switch
9506 argument. */
9507
9508 static bool
9509 simplify_switch_using_ranges (gswitch *stmt)
9510 {
9511 tree op = gimple_switch_index (stmt);
9512 value_range *vr;
9513 bool take_default;
9514 edge e;
9515 edge_iterator ei;
9516 size_t i = 0, j = 0, n, n2;
9517 tree vec2;
9518 switch_update su;
9519 size_t k = 1, l = 0;
9520
9521 if (TREE_CODE (op) == SSA_NAME)
9522 {
9523 vr = get_value_range (op);
9524
9525 /* We can only handle integer ranges. */
9526 if ((vr->type != VR_RANGE
9527 && vr->type != VR_ANTI_RANGE)
9528 || symbolic_range_p (vr))
9529 return false;
9530
9531 /* Find case label for min/max of the value range. */
9532 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
9533 }
9534 else if (TREE_CODE (op) == INTEGER_CST)
9535 {
9536 take_default = !find_case_label_index (stmt, 1, op, &i);
9537 if (take_default)
9538 {
9539 i = 1;
9540 j = 0;
9541 }
9542 else
9543 {
9544 j = i;
9545 }
9546 }
9547 else
9548 return false;
9549
9550 n = gimple_switch_num_labels (stmt);
9551
9552 /* Bail out if this is just all edges taken. */
9553 if (i == 1
9554 && j == n - 1
9555 && take_default)
9556 return false;
9557
9558 /* Build a new vector of taken case labels. */
9559 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
9560 n2 = 0;
9561
9562 /* Add the default edge, if necessary. */
9563 if (take_default)
9564 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
9565
9566 for (; i <= j; ++i, ++n2)
9567 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
9568
9569 for (; k <= l; ++k, ++n2)
9570 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
9571
9572 /* Mark needed edges. */
9573 for (i = 0; i < n2; ++i)
9574 {
9575 e = find_edge (gimple_bb (stmt),
9576 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
9577 e->aux = (void *)-1;
9578 }
9579
9580 /* Queue not needed edges for later removal. */
9581 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
9582 {
9583 if (e->aux == (void *)-1)
9584 {
9585 e->aux = NULL;
9586 continue;
9587 }
9588
9589 if (dump_file && (dump_flags & TDF_DETAILS))
9590 {
9591 fprintf (dump_file, "removing unreachable case label\n");
9592 }
9593 to_remove_edges.safe_push (e);
9594 e->flags &= ~EDGE_EXECUTABLE;
9595 }
9596
9597 /* And queue an update for the stmt. */
9598 su.stmt = stmt;
9599 su.vec = vec2;
9600 to_update_switch_stmts.safe_push (su);
9601 return false;
9602 }
9603
9604 /* Simplify an integral conversion from an SSA name in STMT. */
9605
9606 static bool
9607 simplify_conversion_using_ranges (gimple *stmt)
9608 {
9609 tree innerop, middleop, finaltype;
9610 gimple *def_stmt;
9611 value_range *innervr;
9612 signop inner_sgn, middle_sgn, final_sgn;
9613 unsigned inner_prec, middle_prec, final_prec;
9614 widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
9615
9616 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
9617 if (!INTEGRAL_TYPE_P (finaltype))
9618 return false;
9619 middleop = gimple_assign_rhs1 (stmt);
9620 def_stmt = SSA_NAME_DEF_STMT (middleop);
9621 if (!is_gimple_assign (def_stmt)
9622 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9623 return false;
9624 innerop = gimple_assign_rhs1 (def_stmt);
9625 if (TREE_CODE (innerop) != SSA_NAME
9626 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
9627 return false;
9628
9629 /* Get the value-range of the inner operand. */
9630 innervr = get_value_range (innerop);
9631 if (innervr->type != VR_RANGE
9632 || TREE_CODE (innervr->min) != INTEGER_CST
9633 || TREE_CODE (innervr->max) != INTEGER_CST)
9634 return false;
9635
9636 /* Simulate the conversion chain to check if the result is equal if
9637 the middle conversion is removed. */
9638 innermin = wi::to_widest (innervr->min);
9639 innermax = wi::to_widest (innervr->max);
9640
9641 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9642 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9643 final_prec = TYPE_PRECISION (finaltype);
9644
9645 /* If the first conversion is not injective, the second must not
9646 be widening. */
9647 if (wi::gtu_p (innermax - innermin,
9648 wi::mask <widest_int> (middle_prec, false))
9649 && middle_prec < final_prec)
9650 return false;
9651 /* We also want a medium value so that we can track the effect that
9652 narrowing conversions with sign change have. */
9653 inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
9654 if (inner_sgn == UNSIGNED)
9655 innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
9656 else
9657 innermed = 0;
9658 if (wi::cmp (innermin, innermed, inner_sgn) >= 0
9659 || wi::cmp (innermed, innermax, inner_sgn) >= 0)
9660 innermed = innermin;
9661
9662 middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
9663 middlemin = wi::ext (innermin, middle_prec, middle_sgn);
9664 middlemed = wi::ext (innermed, middle_prec, middle_sgn);
9665 middlemax = wi::ext (innermax, middle_prec, middle_sgn);
9666
9667 /* Require that the final conversion applied to both the original
9668 and the intermediate range produces the same result. */
9669 final_sgn = TYPE_SIGN (finaltype);
9670 if (wi::ext (middlemin, final_prec, final_sgn)
9671 != wi::ext (innermin, final_prec, final_sgn)
9672 || wi::ext (middlemed, final_prec, final_sgn)
9673 != wi::ext (innermed, final_prec, final_sgn)
9674 || wi::ext (middlemax, final_prec, final_sgn)
9675 != wi::ext (innermax, final_prec, final_sgn))
9676 return false;
9677
9678 gimple_assign_set_rhs1 (stmt, innerop);
9679 update_stmt (stmt);
9680 return true;
9681 }
9682
9683 /* Simplify a conversion from integral SSA name to float in STMT. */
9684
9685 static bool
9686 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
9687 gimple *stmt)
9688 {
9689 tree rhs1 = gimple_assign_rhs1 (stmt);
9690 value_range *vr = get_value_range (rhs1);
9691 machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9692 machine_mode mode;
9693 tree tem;
9694 gassign *conv;
9695
9696 /* We can only handle constant ranges. */
9697 if (vr->type != VR_RANGE
9698 || TREE_CODE (vr->min) != INTEGER_CST
9699 || TREE_CODE (vr->max) != INTEGER_CST)
9700 return false;
9701
9702 /* First check if we can use a signed type in place of an unsigned. */
9703 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9704 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9705 != CODE_FOR_nothing)
9706 && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
9707 mode = TYPE_MODE (TREE_TYPE (rhs1));
9708 /* If we can do the conversion in the current input mode do nothing. */
9709 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9710 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9711 return false;
9712 /* Otherwise search for a mode we can use, starting from the narrowest
9713 integer mode available. */
9714 else
9715 {
9716 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9717 do
9718 {
9719 /* If we cannot do a signed conversion to float from mode
9720 or if the value-range does not fit in the signed type
9721 try with a wider mode. */
9722 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9723 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
9724 break;
9725
9726 mode = GET_MODE_WIDER_MODE (mode);
9727 /* But do not widen the input. Instead leave that to the
9728 optabs expansion code. */
9729 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9730 return false;
9731 }
9732 while (mode != VOIDmode);
9733 if (mode == VOIDmode)
9734 return false;
9735 }
9736
9737 /* It works, insert a truncation or sign-change before the
9738 float conversion. */
9739 tem = make_ssa_name (build_nonstandard_integer_type
9740 (GET_MODE_PRECISION (mode), 0));
9741 conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
9742 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9743 gimple_assign_set_rhs1 (stmt, tem);
9744 update_stmt (stmt);
9745
9746 return true;
9747 }
9748
9749 /* Simplify an internal fn call using ranges if possible. */
9750
9751 static bool
9752 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9753 {
9754 enum tree_code subcode;
9755 bool is_ubsan = false;
9756 bool ovf = false;
9757 switch (gimple_call_internal_fn (stmt))
9758 {
9759 case IFN_UBSAN_CHECK_ADD:
9760 subcode = PLUS_EXPR;
9761 is_ubsan = true;
9762 break;
9763 case IFN_UBSAN_CHECK_SUB:
9764 subcode = MINUS_EXPR;
9765 is_ubsan = true;
9766 break;
9767 case IFN_UBSAN_CHECK_MUL:
9768 subcode = MULT_EXPR;
9769 is_ubsan = true;
9770 break;
9771 case IFN_ADD_OVERFLOW:
9772 subcode = PLUS_EXPR;
9773 break;
9774 case IFN_SUB_OVERFLOW:
9775 subcode = MINUS_EXPR;
9776 break;
9777 case IFN_MUL_OVERFLOW:
9778 subcode = MULT_EXPR;
9779 break;
9780 default:
9781 return false;
9782 }
9783
9784 tree op0 = gimple_call_arg (stmt, 0);
9785 tree op1 = gimple_call_arg (stmt, 1);
9786 tree type;
9787 if (is_ubsan)
9788 type = TREE_TYPE (op0);
9789 else if (gimple_call_lhs (stmt) == NULL_TREE)
9790 return false;
9791 else
9792 type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
9793 if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
9794 || (is_ubsan && ovf))
9795 return false;
9796
9797 gimple *g;
9798 location_t loc = gimple_location (stmt);
9799 if (is_ubsan)
9800 g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
9801 else
9802 {
9803 int prec = TYPE_PRECISION (type);
9804 tree utype = type;
9805 if (ovf
9806 || !useless_type_conversion_p (type, TREE_TYPE (op0))
9807 || !useless_type_conversion_p (type, TREE_TYPE (op1)))
9808 utype = build_nonstandard_integer_type (prec, 1);
9809 if (TREE_CODE (op0) == INTEGER_CST)
9810 op0 = fold_convert (utype, op0);
9811 else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
9812 {
9813 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
9814 gimple_set_location (g, loc);
9815 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9816 op0 = gimple_assign_lhs (g);
9817 }
9818 if (TREE_CODE (op1) == INTEGER_CST)
9819 op1 = fold_convert (utype, op1);
9820 else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
9821 {
9822 g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
9823 gimple_set_location (g, loc);
9824 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9825 op1 = gimple_assign_lhs (g);
9826 }
9827 g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
9828 gimple_set_location (g, loc);
9829 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9830 if (utype != type)
9831 {
9832 g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
9833 gimple_assign_lhs (g));
9834 gimple_set_location (g, loc);
9835 gsi_insert_before (gsi, g, GSI_SAME_STMT);
9836 }
9837 g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
9838 gimple_assign_lhs (g),
9839 build_int_cst (type, ovf));
9840 }
9841 gimple_set_location (g, loc);
9842 gsi_replace (gsi, g, false);
9843 return true;
9844 }
9845
9846 /* Simplify STMT using ranges if possible. */
9847
9848 static bool
9849 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9850 {
9851 gimple *stmt = gsi_stmt (*gsi);
9852 if (is_gimple_assign (stmt))
9853 {
9854 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9855 tree rhs1 = gimple_assign_rhs1 (stmt);
9856
9857 switch (rhs_code)
9858 {
9859 case EQ_EXPR:
9860 case NE_EXPR:
9861 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9862 if the RHS is zero or one, and the LHS are known to be boolean
9863 values. */
9864 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9865 return simplify_truth_ops_using_ranges (gsi, stmt);
9866 break;
9867
9868 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9869 and BIT_AND_EXPR respectively if the first operand is greater
9870 than zero and the second operand is an exact power of two.
9871 Also optimize TRUNC_MOD_EXPR away if the second operand is
9872 constant and the first operand already has the right value
9873 range. */
9874 case TRUNC_DIV_EXPR:
9875 case TRUNC_MOD_EXPR:
9876 if (TREE_CODE (rhs1) == SSA_NAME
9877 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9878 return simplify_div_or_mod_using_ranges (stmt);
9879 break;
9880
9881 /* Transform ABS (X) into X or -X as appropriate. */
9882 case ABS_EXPR:
9883 if (TREE_CODE (rhs1) == SSA_NAME
9884 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9885 return simplify_abs_using_ranges (stmt);
9886 break;
9887
9888 case BIT_AND_EXPR:
9889 case BIT_IOR_EXPR:
9890 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9891 if all the bits being cleared are already cleared or
9892 all the bits being set are already set. */
9893 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9894 return simplify_bit_ops_using_ranges (gsi, stmt);
9895 break;
9896
9897 CASE_CONVERT:
9898 if (TREE_CODE (rhs1) == SSA_NAME
9899 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9900 return simplify_conversion_using_ranges (stmt);
9901 break;
9902
9903 case FLOAT_EXPR:
9904 if (TREE_CODE (rhs1) == SSA_NAME
9905 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9906 return simplify_float_conversion_using_ranges (gsi, stmt);
9907 break;
9908
9909 case MIN_EXPR:
9910 case MAX_EXPR:
9911 return simplify_min_or_max_using_ranges (stmt);
9912 break;
9913
9914 default:
9915 break;
9916 }
9917 }
9918 else if (gimple_code (stmt) == GIMPLE_COND)
9919 return simplify_cond_using_ranges (as_a <gcond *> (stmt));
9920 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9921 return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
9922 else if (is_gimple_call (stmt)
9923 && gimple_call_internal_p (stmt))
9924 return simplify_internal_call_using_ranges (gsi, stmt);
9925
9926 return false;
9927 }
9928
9929 /* If the statement pointed by SI has a predicate whose value can be
9930 computed using the value range information computed by VRP, compute
9931 its value and return true. Otherwise, return false. */
9932
9933 static bool
9934 fold_predicate_in (gimple_stmt_iterator *si)
9935 {
9936 bool assignment_p = false;
9937 tree val;
9938 gimple *stmt = gsi_stmt (*si);
9939
9940 if (is_gimple_assign (stmt)
9941 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
9942 {
9943 assignment_p = true;
9944 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
9945 gimple_assign_rhs1 (stmt),
9946 gimple_assign_rhs2 (stmt),
9947 stmt);
9948 }
9949 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
9950 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
9951 gimple_cond_lhs (cond_stmt),
9952 gimple_cond_rhs (cond_stmt),
9953 stmt);
9954 else
9955 return false;
9956
9957 if (val)
9958 {
9959 if (assignment_p)
9960 val = fold_convert (gimple_expr_type (stmt), val);
9961
9962 if (dump_file)
9963 {
9964 fprintf (dump_file, "Folding predicate ");
9965 print_gimple_expr (dump_file, stmt, 0, 0);
9966 fprintf (dump_file, " to ");
9967 print_generic_expr (dump_file, val, 0);
9968 fprintf (dump_file, "\n");
9969 }
9970
9971 if (is_gimple_assign (stmt))
9972 gimple_assign_set_rhs_from_tree (si, val);
9973 else
9974 {
9975 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
9976 gcond *cond_stmt = as_a <gcond *> (stmt);
9977 if (integer_zerop (val))
9978 gimple_cond_make_false (cond_stmt);
9979 else if (integer_onep (val))
9980 gimple_cond_make_true (cond_stmt);
9981 else
9982 gcc_unreachable ();
9983 }
9984
9985 return true;
9986 }
9987
9988 return false;
9989 }
9990
9991 /* Callback for substitute_and_fold folding the stmt at *SI. */
9992
9993 static bool
9994 vrp_fold_stmt (gimple_stmt_iterator *si)
9995 {
9996 if (fold_predicate_in (si))
9997 return true;
9998
9999 return simplify_stmt_using_ranges (si);
10000 }
10001
10002 /* Unwindable const/copy equivalences. */
10003 const_and_copies *equiv_stack;
10004
10005 /* A trivial wrapper so that we can present the generic jump threading
10006 code with a simple API for simplifying statements. STMT is the
10007 statement we want to simplify, WITHIN_STMT provides the location
10008 for any overflow warnings. */
10009
10010 static tree
10011 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
10012 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
10013 {
10014 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10015 return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10016 gimple_cond_lhs (cond_stmt),
10017 gimple_cond_rhs (cond_stmt),
10018 within_stmt);
10019
10020 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
10021 {
10022 value_range new_vr = VR_INITIALIZER;
10023 tree lhs = gimple_assign_lhs (assign_stmt);
10024
10025 if (TREE_CODE (lhs) == SSA_NAME
10026 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10027 || POINTER_TYPE_P (TREE_TYPE (lhs))))
10028 {
10029 extract_range_from_assignment (&new_vr, assign_stmt);
10030 if (range_int_cst_singleton_p (&new_vr))
10031 return new_vr.min;
10032 }
10033 }
10034
10035 return NULL_TREE;
10036 }
10037
10038 /* Blocks which have more than one predecessor and more than
10039 one successor present jump threading opportunities, i.e.,
10040 when the block is reached from a specific predecessor, we
10041 may be able to determine which of the outgoing edges will
10042 be traversed. When this optimization applies, we are able
10043 to avoid conditionals at runtime and we may expose secondary
10044 optimization opportunities.
10045
10046 This routine is effectively a driver for the generic jump
10047 threading code. It basically just presents the generic code
10048 with edges that may be suitable for jump threading.
10049
10050 Unlike DOM, we do not iterate VRP if jump threading was successful.
10051 While iterating may expose new opportunities for VRP, it is expected
10052 those opportunities would be very limited and the compile time cost
10053 to expose those opportunities would be significant.
10054
10055 As jump threading opportunities are discovered, they are registered
10056 for later realization. */
10057
10058 static void
10059 identify_jump_threads (void)
10060 {
10061 basic_block bb;
10062 gcond *dummy;
10063 int i;
10064 edge e;
10065
10066 /* Ugh. When substituting values earlier in this pass we can
10067 wipe the dominance information. So rebuild the dominator
10068 information as we need it within the jump threading code. */
10069 calculate_dominance_info (CDI_DOMINATORS);
10070
10071 /* We do not allow VRP information to be used for jump threading
10072 across a back edge in the CFG. Otherwise it becomes too
10073 difficult to avoid eliminating loop exit tests. Of course
10074 EDGE_DFS_BACK is not accurate at this time so we have to
10075 recompute it. */
10076 mark_dfs_back_edges ();
10077
10078 /* Do not thread across edges we are about to remove. Just marking
10079 them as EDGE_IGNORE will do. */
10080 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10081 e->flags |= EDGE_IGNORE;
10082
10083 /* Allocate our unwinder stack to unwind any temporary equivalences
10084 that might be recorded. */
10085 equiv_stack = new const_and_copies ();
10086
10087 /* To avoid lots of silly node creation, we create a single
10088 conditional and just modify it in-place when attempting to
10089 thread jumps. */
10090 dummy = gimple_build_cond (EQ_EXPR,
10091 integer_zero_node, integer_zero_node,
10092 NULL, NULL);
10093
10094 /* Walk through all the blocks finding those which present a
10095 potential jump threading opportunity. We could set this up
10096 as a dominator walker and record data during the walk, but
10097 I doubt it's worth the effort for the classes of jump
10098 threading opportunities we are trying to identify at this
10099 point in compilation. */
10100 FOR_EACH_BB_FN (bb, cfun)
10101 {
10102 gimple *last;
10103
10104 /* If the generic jump threading code does not find this block
10105 interesting, then there is nothing to do. */
10106 if (! potentially_threadable_block (bb))
10107 continue;
10108
10109 last = last_stmt (bb);
10110
10111 /* We're basically looking for a switch or any kind of conditional with
10112 integral or pointer type arguments. Note the type of the second
10113 argument will be the same as the first argument, so no need to
10114 check it explicitly.
10115
10116 We also handle the case where there are no statements in the
10117 block. This come up with forwarder blocks that are not
10118 optimized away because they lead to a loop header. But we do
10119 want to thread through them as we can sometimes thread to the
10120 loop exit which is obviously profitable. */
10121 if (!last
10122 || gimple_code (last) == GIMPLE_SWITCH
10123 || (gimple_code (last) == GIMPLE_COND
10124 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
10125 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
10126 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
10127 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
10128 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
10129 {
10130 edge_iterator ei;
10131
10132 /* We've got a block with multiple predecessors and multiple
10133 successors which also ends in a suitable conditional or
10134 switch statement. For each predecessor, see if we can thread
10135 it to a specific successor. */
10136 FOR_EACH_EDGE (e, ei, bb->preds)
10137 {
10138 /* Do not thread across edges marked to ignoreor abnormal
10139 edges in the CFG. */
10140 if (e->flags & (EDGE_IGNORE | EDGE_COMPLEX))
10141 continue;
10142
10143 thread_across_edge (dummy, e, true, equiv_stack, NULL,
10144 simplify_stmt_for_jump_threading);
10145 }
10146 }
10147 }
10148
10149 /* Clear EDGE_IGNORE. */
10150 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10151 e->flags &= ~EDGE_IGNORE;
10152
10153 /* We do not actually update the CFG or SSA graphs at this point as
10154 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10155 handle ASSERT_EXPRs gracefully. */
10156 }
10157
10158 /* We identified all the jump threading opportunities earlier, but could
10159 not transform the CFG at that time. This routine transforms the
10160 CFG and arranges for the dominator tree to be rebuilt if necessary.
10161
10162 Note the SSA graph update will occur during the normal TODO
10163 processing by the pass manager. */
10164 static void
10165 finalize_jump_threads (void)
10166 {
10167 thread_through_all_blocks (false);
10168 delete equiv_stack;
10169 }
10170
10171
10172 /* Traverse all the blocks folding conditionals with known ranges. */
10173
10174 static void
10175 vrp_finalize (void)
10176 {
10177 size_t i;
10178
10179 values_propagated = true;
10180
10181 if (dump_file)
10182 {
10183 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10184 dump_all_value_ranges (dump_file);
10185 fprintf (dump_file, "\n");
10186 }
10187
10188 substitute_and_fold (op_with_constant_singleton_value_range,
10189 vrp_fold_stmt, false);
10190
10191 if (warn_array_bounds && first_pass_instance)
10192 check_all_array_refs ();
10193
10194 /* We must identify jump threading opportunities before we release
10195 the datastructures built by VRP. */
10196 identify_jump_threads ();
10197
10198 /* Set value range to non pointer SSA_NAMEs. */
10199 for (i = 0; i < num_vr_values; i++)
10200 if (vr_value[i])
10201 {
10202 tree name = ssa_name (i);
10203
10204 if (!name
10205 || POINTER_TYPE_P (TREE_TYPE (name))
10206 || (vr_value[i]->type == VR_VARYING)
10207 || (vr_value[i]->type == VR_UNDEFINED))
10208 continue;
10209
10210 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
10211 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST)
10212 && (vr_value[i]->type == VR_RANGE
10213 || vr_value[i]->type == VR_ANTI_RANGE))
10214 set_range_info (name, vr_value[i]->type, vr_value[i]->min,
10215 vr_value[i]->max);
10216 }
10217
10218 /* Free allocated memory. */
10219 for (i = 0; i < num_vr_values; i++)
10220 if (vr_value[i])
10221 {
10222 BITMAP_FREE (vr_value[i]->equiv);
10223 free (vr_value[i]);
10224 }
10225
10226 free (vr_value);
10227 free (vr_phi_edge_counts);
10228
10229 /* So that we can distinguish between VRP data being available
10230 and not available. */
10231 vr_value = NULL;
10232 vr_phi_edge_counts = NULL;
10233 }
10234
10235
10236 /* Main entry point to VRP (Value Range Propagation). This pass is
10237 loosely based on J. R. C. Patterson, ``Accurate Static Branch
10238 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
10239 Programming Language Design and Implementation, pp. 67-78, 1995.
10240 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
10241
10242 This is essentially an SSA-CCP pass modified to deal with ranges
10243 instead of constants.
10244
10245 While propagating ranges, we may find that two or more SSA name
10246 have equivalent, though distinct ranges. For instance,
10247
10248 1 x_9 = p_3->a;
10249 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
10250 3 if (p_4 == q_2)
10251 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
10252 5 endif
10253 6 if (q_2)
10254
10255 In the code above, pointer p_5 has range [q_2, q_2], but from the
10256 code we can also determine that p_5 cannot be NULL and, if q_2 had
10257 a non-varying range, p_5's range should also be compatible with it.
10258
10259 These equivalences are created by two expressions: ASSERT_EXPR and
10260 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
10261 result of another assertion, then we can use the fact that p_5 and
10262 p_4 are equivalent when evaluating p_5's range.
10263
10264 Together with value ranges, we also propagate these equivalences
10265 between names so that we can take advantage of information from
10266 multiple ranges when doing final replacement. Note that this
10267 equivalency relation is transitive but not symmetric.
10268
10269 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
10270 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
10271 in contexts where that assertion does not hold (e.g., in line 6).
10272
10273 TODO, the main difference between this pass and Patterson's is that
10274 we do not propagate edge probabilities. We only compute whether
10275 edges can be taken or not. That is, instead of having a spectrum
10276 of jump probabilities between 0 and 1, we only deal with 0, 1 and
10277 DON'T KNOW. In the future, it may be worthwhile to propagate
10278 probabilities to aid branch prediction. */
10279
10280 static unsigned int
10281 execute_vrp (void)
10282 {
10283 int i;
10284 edge e;
10285 switch_update *su;
10286
10287 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
10288 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
10289 scev_initialize ();
10290
10291 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
10292 Inserting assertions may split edges which will invalidate
10293 EDGE_DFS_BACK. */
10294 insert_range_assertions ();
10295
10296 to_remove_edges.create (10);
10297 to_update_switch_stmts.create (5);
10298 threadedge_initialize_values ();
10299
10300 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
10301 mark_dfs_back_edges ();
10302
10303 vrp_initialize ();
10304 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
10305 vrp_finalize ();
10306
10307 free_numbers_of_iterations_estimates (cfun);
10308
10309 /* ASSERT_EXPRs must be removed before finalizing jump threads
10310 as finalizing jump threads calls the CFG cleanup code which
10311 does not properly handle ASSERT_EXPRs. */
10312 remove_range_assertions ();
10313
10314 /* If we exposed any new variables, go ahead and put them into
10315 SSA form now, before we handle jump threading. This simplifies
10316 interactions between rewriting of _DECL nodes into SSA form
10317 and rewriting SSA_NAME nodes into SSA form after block
10318 duplication and CFG manipulation. */
10319 update_ssa (TODO_update_ssa);
10320
10321 finalize_jump_threads ();
10322
10323 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
10324 CFG in a broken state and requires a cfg_cleanup run. */
10325 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10326 remove_edge (e);
10327 /* Update SWITCH_EXPR case label vector. */
10328 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
10329 {
10330 size_t j;
10331 size_t n = TREE_VEC_LENGTH (su->vec);
10332 tree label;
10333 gimple_switch_set_num_labels (su->stmt, n);
10334 for (j = 0; j < n; j++)
10335 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
10336 /* As we may have replaced the default label with a regular one
10337 make sure to make it a real default label again. This ensures
10338 optimal expansion. */
10339 label = gimple_switch_label (su->stmt, 0);
10340 CASE_LOW (label) = NULL_TREE;
10341 CASE_HIGH (label) = NULL_TREE;
10342 }
10343
10344 if (to_remove_edges.length () > 0)
10345 {
10346 free_dominance_info (CDI_DOMINATORS);
10347 loops_state_set (LOOPS_NEED_FIXUP);
10348 }
10349
10350 to_remove_edges.release ();
10351 to_update_switch_stmts.release ();
10352 threadedge_finalize_values ();
10353
10354 scev_finalize ();
10355 loop_optimizer_finalize ();
10356 return 0;
10357 }
10358
10359 namespace {
10360
10361 const pass_data pass_data_vrp =
10362 {
10363 GIMPLE_PASS, /* type */
10364 "vrp", /* name */
10365 OPTGROUP_NONE, /* optinfo_flags */
10366 TV_TREE_VRP, /* tv_id */
10367 PROP_ssa, /* properties_required */
10368 0, /* properties_provided */
10369 0, /* properties_destroyed */
10370 0, /* todo_flags_start */
10371 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
10372 };
10373
10374 class pass_vrp : public gimple_opt_pass
10375 {
10376 public:
10377 pass_vrp (gcc::context *ctxt)
10378 : gimple_opt_pass (pass_data_vrp, ctxt)
10379 {}
10380
10381 /* opt_pass methods: */
10382 opt_pass * clone () { return new pass_vrp (m_ctxt); }
10383 virtual bool gate (function *) { return flag_tree_vrp != 0; }
10384 virtual unsigned int execute (function *) { return execute_vrp (); }
10385
10386 }; // class pass_vrp
10387
10388 } // anon namespace
10389
10390 gimple_opt_pass *
10391 make_pass_vrp (gcc::context *ctxt)
10392 {
10393 return new pass_vrp (ctxt);
10394 }