re PR bootstrap/45700 (--enable-checking=fold bootstrap failures)
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
37 #include "toplev.h"
38 #include "intl.h"
39 #include "cfgloop.h"
40 #include "tree-scalar-evolution.h"
41 #include "tree-ssa-propagate.h"
42 #include "tree-chrec.h"
43
44
45 /* Type of value ranges. See value_range_d for a description of these
46 types. */
47 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
48
49 /* Range of values that can be associated with an SSA_NAME after VRP
50 has executed. */
51 struct value_range_d
52 {
53 /* Lattice value represented by this range. */
54 enum value_range_type type;
55
56 /* Minimum and maximum values represented by this range. These
57 values should be interpreted as follows:
58
59 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
60 be NULL.
61
62 - If TYPE == VR_RANGE then MIN holds the minimum value and
63 MAX holds the maximum value of the range [MIN, MAX].
64
65 - If TYPE == ANTI_RANGE the variable is known to NOT
66 take any values in the range [MIN, MAX]. */
67 tree min;
68 tree max;
69
70 /* Set of SSA names whose value ranges are equivalent to this one.
71 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
72 bitmap equiv;
73 };
74
75 typedef struct value_range_d value_range_t;
76
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
79 static sbitmap *live;
80
81 /* Return true if the SSA name NAME is live on the edge E. */
82
83 static bool
84 live_on_edge (edge e, tree name)
85 {
86 return (live[e->dest->index]
87 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
88 }
89
90 /* Local functions. */
91 static int compare_values (tree val1, tree val2);
92 static int compare_values_warnv (tree val1, tree val2, bool *);
93 static void vrp_meet (value_range_t *, value_range_t *);
94 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
95 tree, tree, bool, bool *,
96 bool *);
97
98 /* Location information for ASSERT_EXPRs. Each instance of this
99 structure describes an ASSERT_EXPR for an SSA name. Since a single
100 SSA name may have more than one assertion associated with it, these
101 locations are kept in a linked list attached to the corresponding
102 SSA name. */
103 struct assert_locus_d
104 {
105 /* Basic block where the assertion would be inserted. */
106 basic_block bb;
107
108 /* Some assertions need to be inserted on an edge (e.g., assertions
109 generated by COND_EXPRs). In those cases, BB will be NULL. */
110 edge e;
111
112 /* Pointer to the statement that generated this assertion. */
113 gimple_stmt_iterator si;
114
115 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
116 enum tree_code comp_code;
117
118 /* Value being compared against. */
119 tree val;
120
121 /* Expression to compare. */
122 tree expr;
123
124 /* Next node in the linked list. */
125 struct assert_locus_d *next;
126 };
127
128 typedef struct assert_locus_d *assert_locus_t;
129
130 /* If bit I is present, it means that SSA name N_i has a list of
131 assertions that should be inserted in the IL. */
132 static bitmap need_assert_for;
133
134 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
135 holds a list of ASSERT_LOCUS_T nodes that describe where
136 ASSERT_EXPRs for SSA name N_I should be inserted. */
137 static assert_locus_t *asserts_for;
138
139 /* Value range array. After propagation, VR_VALUE[I] holds the range
140 of values that SSA name N_I may take. */
141 static value_range_t **vr_value;
142
143 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
144 number of executable edges we saw the last time we visited the
145 node. */
146 static int *vr_phi_edge_counts;
147
148 typedef struct {
149 gimple stmt;
150 tree vec;
151 } switch_update;
152
153 static VEC (edge, heap) *to_remove_edges;
154 DEF_VEC_O(switch_update);
155 DEF_VEC_ALLOC_O(switch_update, heap);
156 static VEC (switch_update, heap) *to_update_switch_stmts;
157
158
159 /* Return the maximum value for TYPE. */
160
161 static inline tree
162 vrp_val_max (const_tree type)
163 {
164 if (!INTEGRAL_TYPE_P (type))
165 return NULL_TREE;
166
167 return TYPE_MAX_VALUE (type);
168 }
169
170 /* Return the minimum value for TYPE. */
171
172 static inline tree
173 vrp_val_min (const_tree type)
174 {
175 if (!INTEGRAL_TYPE_P (type))
176 return NULL_TREE;
177
178 return TYPE_MIN_VALUE (type);
179 }
180
181 /* Return whether VAL is equal to the maximum value of its type. This
182 will be true for a positive overflow infinity. We can't do a
183 simple equality comparison with TYPE_MAX_VALUE because C typedefs
184 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
185 to the integer constant with the same value in the type. */
186
187 static inline bool
188 vrp_val_is_max (const_tree val)
189 {
190 tree type_max = vrp_val_max (TREE_TYPE (val));
191 return (val == type_max
192 || (type_max != NULL_TREE
193 && operand_equal_p (val, type_max, 0)));
194 }
195
196 /* Return whether VAL is equal to the minimum value of its type. This
197 will be true for a negative overflow infinity. */
198
199 static inline bool
200 vrp_val_is_min (const_tree val)
201 {
202 tree type_min = vrp_val_min (TREE_TYPE (val));
203 return (val == type_min
204 || (type_min != NULL_TREE
205 && operand_equal_p (val, type_min, 0)));
206 }
207
208
209 /* Return whether TYPE should use an overflow infinity distinct from
210 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
211 represent a signed overflow during VRP computations. An infinity
212 is distinct from a half-range, which will go from some number to
213 TYPE_{MIN,MAX}_VALUE. */
214
215 static inline bool
216 needs_overflow_infinity (const_tree type)
217 {
218 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
219 }
220
221 /* Return whether TYPE can support our overflow infinity
222 representation: we use the TREE_OVERFLOW flag, which only exists
223 for constants. If TYPE doesn't support this, we don't optimize
224 cases which would require signed overflow--we drop them to
225 VARYING. */
226
227 static inline bool
228 supports_overflow_infinity (const_tree type)
229 {
230 tree min = vrp_val_min (type), max = vrp_val_max (type);
231 #ifdef ENABLE_CHECKING
232 gcc_assert (needs_overflow_infinity (type));
233 #endif
234 return (min != NULL_TREE
235 && CONSTANT_CLASS_P (min)
236 && max != NULL_TREE
237 && CONSTANT_CLASS_P (max));
238 }
239
240 /* VAL is the maximum or minimum value of a type. Return a
241 corresponding overflow infinity. */
242
243 static inline tree
244 make_overflow_infinity (tree val)
245 {
246 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
247 val = copy_node (val);
248 TREE_OVERFLOW (val) = 1;
249 return val;
250 }
251
252 /* Return a negative overflow infinity for TYPE. */
253
254 static inline tree
255 negative_overflow_infinity (tree type)
256 {
257 gcc_checking_assert (supports_overflow_infinity (type));
258 return make_overflow_infinity (vrp_val_min (type));
259 }
260
261 /* Return a positive overflow infinity for TYPE. */
262
263 static inline tree
264 positive_overflow_infinity (tree type)
265 {
266 gcc_checking_assert (supports_overflow_infinity (type));
267 return make_overflow_infinity (vrp_val_max (type));
268 }
269
270 /* Return whether VAL is a negative overflow infinity. */
271
272 static inline bool
273 is_negative_overflow_infinity (const_tree val)
274 {
275 return (needs_overflow_infinity (TREE_TYPE (val))
276 && CONSTANT_CLASS_P (val)
277 && TREE_OVERFLOW (val)
278 && vrp_val_is_min (val));
279 }
280
281 /* Return whether VAL is a positive overflow infinity. */
282
283 static inline bool
284 is_positive_overflow_infinity (const_tree val)
285 {
286 return (needs_overflow_infinity (TREE_TYPE (val))
287 && CONSTANT_CLASS_P (val)
288 && TREE_OVERFLOW (val)
289 && vrp_val_is_max (val));
290 }
291
292 /* Return whether VAL is a positive or negative overflow infinity. */
293
294 static inline bool
295 is_overflow_infinity (const_tree val)
296 {
297 return (needs_overflow_infinity (TREE_TYPE (val))
298 && CONSTANT_CLASS_P (val)
299 && TREE_OVERFLOW (val)
300 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
301 }
302
303 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
304
305 static inline bool
306 stmt_overflow_infinity (gimple stmt)
307 {
308 if (is_gimple_assign (stmt)
309 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
310 GIMPLE_SINGLE_RHS)
311 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
312 return false;
313 }
314
315 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
316 the same value with TREE_OVERFLOW clear. This can be used to avoid
317 confusing a regular value with an overflow value. */
318
319 static inline tree
320 avoid_overflow_infinity (tree val)
321 {
322 if (!is_overflow_infinity (val))
323 return val;
324
325 if (vrp_val_is_max (val))
326 return vrp_val_max (TREE_TYPE (val));
327 else
328 {
329 gcc_checking_assert (vrp_val_is_min (val));
330 return vrp_val_min (TREE_TYPE (val));
331 }
332 }
333
334
335 /* Return true if ARG is marked with the nonnull attribute in the
336 current function signature. */
337
338 static bool
339 nonnull_arg_p (const_tree arg)
340 {
341 tree t, attrs, fntype;
342 unsigned HOST_WIDE_INT arg_num;
343
344 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
345
346 /* The static chain decl is always non null. */
347 if (arg == cfun->static_chain_decl)
348 return true;
349
350 fntype = TREE_TYPE (current_function_decl);
351 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
352
353 /* If "nonnull" wasn't specified, we know nothing about the argument. */
354 if (attrs == NULL_TREE)
355 return false;
356
357 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
358 if (TREE_VALUE (attrs) == NULL_TREE)
359 return true;
360
361 /* Get the position number for ARG in the function signature. */
362 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
363 t;
364 t = DECL_CHAIN (t), arg_num++)
365 {
366 if (t == arg)
367 break;
368 }
369
370 gcc_assert (t == arg);
371
372 /* Now see if ARG_NUM is mentioned in the nonnull list. */
373 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
374 {
375 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
376 return true;
377 }
378
379 return false;
380 }
381
382
383 /* Set value range VR to VR_VARYING. */
384
385 static inline void
386 set_value_range_to_varying (value_range_t *vr)
387 {
388 vr->type = VR_VARYING;
389 vr->min = vr->max = NULL_TREE;
390 if (vr->equiv)
391 bitmap_clear (vr->equiv);
392 }
393
394
395 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
396
397 static void
398 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
399 tree max, bitmap equiv)
400 {
401 #if defined ENABLE_CHECKING
402 /* Check the validity of the range. */
403 if (t == VR_RANGE || t == VR_ANTI_RANGE)
404 {
405 int cmp;
406
407 gcc_assert (min && max);
408
409 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
410 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
411
412 cmp = compare_values (min, max);
413 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
414
415 if (needs_overflow_infinity (TREE_TYPE (min)))
416 gcc_assert (!is_overflow_infinity (min)
417 || !is_overflow_infinity (max));
418 }
419
420 if (t == VR_UNDEFINED || t == VR_VARYING)
421 gcc_assert (min == NULL_TREE && max == NULL_TREE);
422
423 if (t == VR_UNDEFINED || t == VR_VARYING)
424 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
425 #endif
426
427 vr->type = t;
428 vr->min = min;
429 vr->max = max;
430
431 /* Since updating the equivalence set involves deep copying the
432 bitmaps, only do it if absolutely necessary. */
433 if (vr->equiv == NULL
434 && equiv != NULL)
435 vr->equiv = BITMAP_ALLOC (NULL);
436
437 if (equiv != vr->equiv)
438 {
439 if (equiv && !bitmap_empty_p (equiv))
440 bitmap_copy (vr->equiv, equiv);
441 else
442 bitmap_clear (vr->equiv);
443 }
444 }
445
446
447 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
448 This means adjusting T, MIN and MAX representing the case of a
449 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
450 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
451 In corner cases where MAX+1 or MIN-1 wraps this will fall back
452 to varying.
453 This routine exists to ease canonicalization in the case where we
454 extract ranges from var + CST op limit. */
455
456 static void
457 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
458 tree min, tree max, bitmap equiv)
459 {
460 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
461 if ((t != VR_RANGE
462 && t != VR_ANTI_RANGE)
463 || TREE_CODE (min) != INTEGER_CST
464 || TREE_CODE (max) != INTEGER_CST)
465 {
466 set_value_range (vr, t, min, max, equiv);
467 return;
468 }
469
470 /* Wrong order for min and max, to swap them and the VR type we need
471 to adjust them. */
472 if (tree_int_cst_lt (max, min))
473 {
474 tree one = build_int_cst (TREE_TYPE (min), 1);
475 tree tmp = int_const_binop (PLUS_EXPR, max, one, 0);
476 max = int_const_binop (MINUS_EXPR, min, one, 0);
477 min = tmp;
478
479 /* There's one corner case, if we had [C+1, C] before we now have
480 that again. But this represents an empty value range, so drop
481 to varying in this case. */
482 if (tree_int_cst_lt (max, min))
483 {
484 set_value_range_to_varying (vr);
485 return;
486 }
487
488 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
489 }
490
491 /* Anti-ranges that can be represented as ranges should be so. */
492 if (t == VR_ANTI_RANGE)
493 {
494 bool is_min = vrp_val_is_min (min);
495 bool is_max = vrp_val_is_max (max);
496
497 if (is_min && is_max)
498 {
499 /* We cannot deal with empty ranges, drop to varying. */
500 set_value_range_to_varying (vr);
501 return;
502 }
503 else if (is_min
504 /* As a special exception preserve non-null ranges. */
505 && !(TYPE_UNSIGNED (TREE_TYPE (min))
506 && integer_zerop (max)))
507 {
508 tree one = build_int_cst (TREE_TYPE (max), 1);
509 min = int_const_binop (PLUS_EXPR, max, one, 0);
510 max = vrp_val_max (TREE_TYPE (max));
511 t = VR_RANGE;
512 }
513 else if (is_max)
514 {
515 tree one = build_int_cst (TREE_TYPE (min), 1);
516 max = int_const_binop (MINUS_EXPR, min, one, 0);
517 min = vrp_val_min (TREE_TYPE (min));
518 t = VR_RANGE;
519 }
520 }
521
522 set_value_range (vr, t, min, max, equiv);
523 }
524
525 /* Copy value range FROM into value range TO. */
526
527 static inline void
528 copy_value_range (value_range_t *to, value_range_t *from)
529 {
530 set_value_range (to, from->type, from->min, from->max, from->equiv);
531 }
532
533 /* Set value range VR to a single value. This function is only called
534 with values we get from statements, and exists to clear the
535 TREE_OVERFLOW flag so that we don't think we have an overflow
536 infinity when we shouldn't. */
537
538 static inline void
539 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
540 {
541 gcc_assert (is_gimple_min_invariant (val));
542 val = avoid_overflow_infinity (val);
543 set_value_range (vr, VR_RANGE, val, val, equiv);
544 }
545
546 /* Set value range VR to a non-negative range of type TYPE.
547 OVERFLOW_INFINITY indicates whether to use an overflow infinity
548 rather than TYPE_MAX_VALUE; this should be true if we determine
549 that the range is nonnegative based on the assumption that signed
550 overflow does not occur. */
551
552 static inline void
553 set_value_range_to_nonnegative (value_range_t *vr, tree type,
554 bool overflow_infinity)
555 {
556 tree zero;
557
558 if (overflow_infinity && !supports_overflow_infinity (type))
559 {
560 set_value_range_to_varying (vr);
561 return;
562 }
563
564 zero = build_int_cst (type, 0);
565 set_value_range (vr, VR_RANGE, zero,
566 (overflow_infinity
567 ? positive_overflow_infinity (type)
568 : TYPE_MAX_VALUE (type)),
569 vr->equiv);
570 }
571
572 /* Set value range VR to a non-NULL range of type TYPE. */
573
574 static inline void
575 set_value_range_to_nonnull (value_range_t *vr, tree type)
576 {
577 tree zero = build_int_cst (type, 0);
578 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
579 }
580
581
582 /* Set value range VR to a NULL range of type TYPE. */
583
584 static inline void
585 set_value_range_to_null (value_range_t *vr, tree type)
586 {
587 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
588 }
589
590
591 /* Set value range VR to a range of a truthvalue of type TYPE. */
592
593 static inline void
594 set_value_range_to_truthvalue (value_range_t *vr, tree type)
595 {
596 if (TYPE_PRECISION (type) == 1)
597 set_value_range_to_varying (vr);
598 else
599 set_value_range (vr, VR_RANGE,
600 build_int_cst (type, 0), build_int_cst (type, 1),
601 vr->equiv);
602 }
603
604
605 /* Set value range VR to VR_UNDEFINED. */
606
607 static inline void
608 set_value_range_to_undefined (value_range_t *vr)
609 {
610 vr->type = VR_UNDEFINED;
611 vr->min = vr->max = NULL_TREE;
612 if (vr->equiv)
613 bitmap_clear (vr->equiv);
614 }
615
616
617 /* If abs (min) < abs (max), set VR to [-max, max], if
618 abs (min) >= abs (max), set VR to [-min, min]. */
619
620 static void
621 abs_extent_range (value_range_t *vr, tree min, tree max)
622 {
623 int cmp;
624
625 gcc_assert (TREE_CODE (min) == INTEGER_CST);
626 gcc_assert (TREE_CODE (max) == INTEGER_CST);
627 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
628 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
629 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
630 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
631 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
632 {
633 set_value_range_to_varying (vr);
634 return;
635 }
636 cmp = compare_values (min, max);
637 if (cmp == -1)
638 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
639 else if (cmp == 0 || cmp == 1)
640 {
641 max = min;
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
643 }
644 else
645 {
646 set_value_range_to_varying (vr);
647 return;
648 }
649 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
650 }
651
652
653 /* Return value range information for VAR.
654
655 If we have no values ranges recorded (ie, VRP is not running), then
656 return NULL. Otherwise create an empty range if none existed for VAR. */
657
658 static value_range_t *
659 get_value_range (const_tree var)
660 {
661 value_range_t *vr;
662 tree sym;
663 unsigned ver = SSA_NAME_VERSION (var);
664
665 /* If we have no recorded ranges, then return NULL. */
666 if (! vr_value)
667 return NULL;
668
669 vr = vr_value[ver];
670 if (vr)
671 return vr;
672
673 /* Create a default value range. */
674 vr_value[ver] = vr = XCNEW (value_range_t);
675
676 /* Defer allocating the equivalence set. */
677 vr->equiv = NULL;
678
679 /* If VAR is a default definition, the variable can take any value
680 in VAR's type. */
681 sym = SSA_NAME_VAR (var);
682 if (SSA_NAME_IS_DEFAULT_DEF (var))
683 {
684 /* Try to use the "nonnull" attribute to create ~[0, 0]
685 anti-ranges for pointers. Note that this is only valid with
686 default definitions of PARM_DECLs. */
687 if (TREE_CODE (sym) == PARM_DECL
688 && POINTER_TYPE_P (TREE_TYPE (sym))
689 && nonnull_arg_p (sym))
690 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
691 else
692 set_value_range_to_varying (vr);
693 }
694
695 return vr;
696 }
697
698 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
699
700 static inline bool
701 vrp_operand_equal_p (const_tree val1, const_tree val2)
702 {
703 if (val1 == val2)
704 return true;
705 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
706 return false;
707 if (is_overflow_infinity (val1))
708 return is_overflow_infinity (val2);
709 return true;
710 }
711
712 /* Return true, if the bitmaps B1 and B2 are equal. */
713
714 static inline bool
715 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
716 {
717 return (b1 == b2
718 || (b1 && b2
719 && bitmap_equal_p (b1, b2)));
720 }
721
722 /* Update the value range and equivalence set for variable VAR to
723 NEW_VR. Return true if NEW_VR is different from VAR's previous
724 value.
725
726 NOTE: This function assumes that NEW_VR is a temporary value range
727 object created for the sole purpose of updating VAR's range. The
728 storage used by the equivalence set from NEW_VR will be freed by
729 this function. Do not call update_value_range when NEW_VR
730 is the range object associated with another SSA name. */
731
732 static inline bool
733 update_value_range (const_tree var, value_range_t *new_vr)
734 {
735 value_range_t *old_vr;
736 bool is_new;
737
738 /* Update the value range, if necessary. */
739 old_vr = get_value_range (var);
740 is_new = old_vr->type != new_vr->type
741 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
742 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
743 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
744
745 if (is_new)
746 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
747 new_vr->equiv);
748
749 BITMAP_FREE (new_vr->equiv);
750
751 return is_new;
752 }
753
754
755 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
756 point where equivalence processing can be turned on/off. */
757
758 static void
759 add_equivalence (bitmap *equiv, const_tree var)
760 {
761 unsigned ver = SSA_NAME_VERSION (var);
762 value_range_t *vr = vr_value[ver];
763
764 if (*equiv == NULL)
765 *equiv = BITMAP_ALLOC (NULL);
766 bitmap_set_bit (*equiv, ver);
767 if (vr && vr->equiv)
768 bitmap_ior_into (*equiv, vr->equiv);
769 }
770
771
772 /* Return true if VR is ~[0, 0]. */
773
774 static inline bool
775 range_is_nonnull (value_range_t *vr)
776 {
777 return vr->type == VR_ANTI_RANGE
778 && integer_zerop (vr->min)
779 && integer_zerop (vr->max);
780 }
781
782
783 /* Return true if VR is [0, 0]. */
784
785 static inline bool
786 range_is_null (value_range_t *vr)
787 {
788 return vr->type == VR_RANGE
789 && integer_zerop (vr->min)
790 && integer_zerop (vr->max);
791 }
792
793 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
794 a singleton. */
795
796 static inline bool
797 range_int_cst_p (value_range_t *vr)
798 {
799 return (vr->type == VR_RANGE
800 && TREE_CODE (vr->max) == INTEGER_CST
801 && TREE_CODE (vr->min) == INTEGER_CST
802 && !TREE_OVERFLOW (vr->max)
803 && !TREE_OVERFLOW (vr->min));
804 }
805
806 /* Return true if VR is a INTEGER_CST singleton. */
807
808 static inline bool
809 range_int_cst_singleton_p (value_range_t *vr)
810 {
811 return (range_int_cst_p (vr)
812 && tree_int_cst_equal (vr->min, vr->max));
813 }
814
815 /* Return true if value range VR involves at least one symbol. */
816
817 static inline bool
818 symbolic_range_p (value_range_t *vr)
819 {
820 return (!is_gimple_min_invariant (vr->min)
821 || !is_gimple_min_invariant (vr->max));
822 }
823
824 /* Return true if value range VR uses an overflow infinity. */
825
826 static inline bool
827 overflow_infinity_range_p (value_range_t *vr)
828 {
829 return (vr->type == VR_RANGE
830 && (is_overflow_infinity (vr->min)
831 || is_overflow_infinity (vr->max)));
832 }
833
834 /* Return false if we can not make a valid comparison based on VR;
835 this will be the case if it uses an overflow infinity and overflow
836 is not undefined (i.e., -fno-strict-overflow is in effect).
837 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
838 uses an overflow infinity. */
839
840 static bool
841 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
842 {
843 gcc_assert (vr->type == VR_RANGE);
844 if (is_overflow_infinity (vr->min))
845 {
846 *strict_overflow_p = true;
847 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
848 return false;
849 }
850 if (is_overflow_infinity (vr->max))
851 {
852 *strict_overflow_p = true;
853 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
854 return false;
855 }
856 return true;
857 }
858
859
860 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
861 ranges obtained so far. */
862
863 static bool
864 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
865 {
866 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p)
867 || (TREE_CODE (expr) == SSA_NAME
868 && ssa_name_nonnegative_p (expr)));
869 }
870
871 /* Return true if the result of assignment STMT is know to be non-negative.
872 If the return value is based on the assumption that signed overflow is
873 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
874 *STRICT_OVERFLOW_P.*/
875
876 static bool
877 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
878 {
879 enum tree_code code = gimple_assign_rhs_code (stmt);
880 switch (get_gimple_rhs_class (code))
881 {
882 case GIMPLE_UNARY_RHS:
883 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
884 gimple_expr_type (stmt),
885 gimple_assign_rhs1 (stmt),
886 strict_overflow_p);
887 case GIMPLE_BINARY_RHS:
888 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
889 gimple_expr_type (stmt),
890 gimple_assign_rhs1 (stmt),
891 gimple_assign_rhs2 (stmt),
892 strict_overflow_p);
893 case GIMPLE_TERNARY_RHS:
894 return false;
895 case GIMPLE_SINGLE_RHS:
896 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
897 strict_overflow_p);
898 case GIMPLE_INVALID_RHS:
899 gcc_unreachable ();
900 default:
901 gcc_unreachable ();
902 }
903 }
904
905 /* Return true if return value of call STMT is know to be non-negative.
906 If the return value is based on the assumption that signed overflow is
907 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
908 *STRICT_OVERFLOW_P.*/
909
910 static bool
911 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
912 {
913 tree arg0 = gimple_call_num_args (stmt) > 0 ?
914 gimple_call_arg (stmt, 0) : NULL_TREE;
915 tree arg1 = gimple_call_num_args (stmt) > 1 ?
916 gimple_call_arg (stmt, 1) : NULL_TREE;
917
918 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
919 gimple_call_fndecl (stmt),
920 arg0,
921 arg1,
922 strict_overflow_p);
923 }
924
925 /* Return true if STMT is know to to compute a non-negative value.
926 If the return value is based on the assumption that signed overflow is
927 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
928 *STRICT_OVERFLOW_P.*/
929
930 static bool
931 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
932 {
933 switch (gimple_code (stmt))
934 {
935 case GIMPLE_ASSIGN:
936 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
937 case GIMPLE_CALL:
938 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
939 default:
940 gcc_unreachable ();
941 }
942 }
943
944 /* Return true if the result of assignment STMT is know to be non-zero.
945 If the return value is based on the assumption that signed overflow is
946 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
947 *STRICT_OVERFLOW_P.*/
948
949 static bool
950 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
951 {
952 enum tree_code code = gimple_assign_rhs_code (stmt);
953 switch (get_gimple_rhs_class (code))
954 {
955 case GIMPLE_UNARY_RHS:
956 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
957 gimple_expr_type (stmt),
958 gimple_assign_rhs1 (stmt),
959 strict_overflow_p);
960 case GIMPLE_BINARY_RHS:
961 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
962 gimple_expr_type (stmt),
963 gimple_assign_rhs1 (stmt),
964 gimple_assign_rhs2 (stmt),
965 strict_overflow_p);
966 case GIMPLE_TERNARY_RHS:
967 return false;
968 case GIMPLE_SINGLE_RHS:
969 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
970 strict_overflow_p);
971 case GIMPLE_INVALID_RHS:
972 gcc_unreachable ();
973 default:
974 gcc_unreachable ();
975 }
976 }
977
978 /* Return true if STMT is know to to compute a non-zero value.
979 If the return value is based on the assumption that signed overflow is
980 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
981 *STRICT_OVERFLOW_P.*/
982
983 static bool
984 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
985 {
986 switch (gimple_code (stmt))
987 {
988 case GIMPLE_ASSIGN:
989 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
990 case GIMPLE_CALL:
991 return gimple_alloca_call_p (stmt);
992 default:
993 gcc_unreachable ();
994 }
995 }
996
997 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
998 obtained so far. */
999
1000 static bool
1001 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1002 {
1003 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1004 return true;
1005
1006 /* If we have an expression of the form &X->a, then the expression
1007 is nonnull if X is nonnull. */
1008 if (is_gimple_assign (stmt)
1009 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1010 {
1011 tree expr = gimple_assign_rhs1 (stmt);
1012 tree base = get_base_address (TREE_OPERAND (expr, 0));
1013
1014 if (base != NULL_TREE
1015 && TREE_CODE (base) == MEM_REF
1016 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1017 {
1018 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1019 if (range_is_nonnull (vr))
1020 return true;
1021 }
1022 }
1023
1024 return false;
1025 }
1026
1027 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1028 a gimple invariant, or SSA_NAME +- CST. */
1029
1030 static bool
1031 valid_value_p (tree expr)
1032 {
1033 if (TREE_CODE (expr) == SSA_NAME)
1034 return true;
1035
1036 if (TREE_CODE (expr) == PLUS_EXPR
1037 || TREE_CODE (expr) == MINUS_EXPR)
1038 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1039 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1040
1041 return is_gimple_min_invariant (expr);
1042 }
1043
1044 /* Return
1045 1 if VAL < VAL2
1046 0 if !(VAL < VAL2)
1047 -2 if those are incomparable. */
1048 static inline int
1049 operand_less_p (tree val, tree val2)
1050 {
1051 /* LT is folded faster than GE and others. Inline the common case. */
1052 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1053 {
1054 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1055 return INT_CST_LT_UNSIGNED (val, val2);
1056 else
1057 {
1058 if (INT_CST_LT (val, val2))
1059 return 1;
1060 }
1061 }
1062 else
1063 {
1064 tree tcmp;
1065
1066 fold_defer_overflow_warnings ();
1067
1068 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1069
1070 fold_undefer_and_ignore_overflow_warnings ();
1071
1072 if (!tcmp
1073 || TREE_CODE (tcmp) != INTEGER_CST)
1074 return -2;
1075
1076 if (!integer_zerop (tcmp))
1077 return 1;
1078 }
1079
1080 /* val >= val2, not considering overflow infinity. */
1081 if (is_negative_overflow_infinity (val))
1082 return is_negative_overflow_infinity (val2) ? 0 : 1;
1083 else if (is_positive_overflow_infinity (val2))
1084 return is_positive_overflow_infinity (val) ? 0 : 1;
1085
1086 return 0;
1087 }
1088
1089 /* Compare two values VAL1 and VAL2. Return
1090
1091 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1092 -1 if VAL1 < VAL2,
1093 0 if VAL1 == VAL2,
1094 +1 if VAL1 > VAL2, and
1095 +2 if VAL1 != VAL2
1096
1097 This is similar to tree_int_cst_compare but supports pointer values
1098 and values that cannot be compared at compile time.
1099
1100 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1101 true if the return value is only valid if we assume that signed
1102 overflow is undefined. */
1103
1104 static int
1105 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1106 {
1107 if (val1 == val2)
1108 return 0;
1109
1110 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1111 both integers. */
1112 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1113 == POINTER_TYPE_P (TREE_TYPE (val2)));
1114 /* Convert the two values into the same type. This is needed because
1115 sizetype causes sign extension even for unsigned types. */
1116 val2 = fold_convert (TREE_TYPE (val1), val2);
1117 STRIP_USELESS_TYPE_CONVERSION (val2);
1118
1119 if ((TREE_CODE (val1) == SSA_NAME
1120 || TREE_CODE (val1) == PLUS_EXPR
1121 || TREE_CODE (val1) == MINUS_EXPR)
1122 && (TREE_CODE (val2) == SSA_NAME
1123 || TREE_CODE (val2) == PLUS_EXPR
1124 || TREE_CODE (val2) == MINUS_EXPR))
1125 {
1126 tree n1, c1, n2, c2;
1127 enum tree_code code1, code2;
1128
1129 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1130 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1131 same name, return -2. */
1132 if (TREE_CODE (val1) == SSA_NAME)
1133 {
1134 code1 = SSA_NAME;
1135 n1 = val1;
1136 c1 = NULL_TREE;
1137 }
1138 else
1139 {
1140 code1 = TREE_CODE (val1);
1141 n1 = TREE_OPERAND (val1, 0);
1142 c1 = TREE_OPERAND (val1, 1);
1143 if (tree_int_cst_sgn (c1) == -1)
1144 {
1145 if (is_negative_overflow_infinity (c1))
1146 return -2;
1147 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1148 if (!c1)
1149 return -2;
1150 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1151 }
1152 }
1153
1154 if (TREE_CODE (val2) == SSA_NAME)
1155 {
1156 code2 = SSA_NAME;
1157 n2 = val2;
1158 c2 = NULL_TREE;
1159 }
1160 else
1161 {
1162 code2 = TREE_CODE (val2);
1163 n2 = TREE_OPERAND (val2, 0);
1164 c2 = TREE_OPERAND (val2, 1);
1165 if (tree_int_cst_sgn (c2) == -1)
1166 {
1167 if (is_negative_overflow_infinity (c2))
1168 return -2;
1169 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1170 if (!c2)
1171 return -2;
1172 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1173 }
1174 }
1175
1176 /* Both values must use the same name. */
1177 if (n1 != n2)
1178 return -2;
1179
1180 if (code1 == SSA_NAME
1181 && code2 == SSA_NAME)
1182 /* NAME == NAME */
1183 return 0;
1184
1185 /* If overflow is defined we cannot simplify more. */
1186 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1187 return -2;
1188
1189 if (strict_overflow_p != NULL
1190 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1191 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1192 *strict_overflow_p = true;
1193
1194 if (code1 == SSA_NAME)
1195 {
1196 if (code2 == PLUS_EXPR)
1197 /* NAME < NAME + CST */
1198 return -1;
1199 else if (code2 == MINUS_EXPR)
1200 /* NAME > NAME - CST */
1201 return 1;
1202 }
1203 else if (code1 == PLUS_EXPR)
1204 {
1205 if (code2 == SSA_NAME)
1206 /* NAME + CST > NAME */
1207 return 1;
1208 else if (code2 == PLUS_EXPR)
1209 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1210 return compare_values_warnv (c1, c2, strict_overflow_p);
1211 else if (code2 == MINUS_EXPR)
1212 /* NAME + CST1 > NAME - CST2 */
1213 return 1;
1214 }
1215 else if (code1 == MINUS_EXPR)
1216 {
1217 if (code2 == SSA_NAME)
1218 /* NAME - CST < NAME */
1219 return -1;
1220 else if (code2 == PLUS_EXPR)
1221 /* NAME - CST1 < NAME + CST2 */
1222 return -1;
1223 else if (code2 == MINUS_EXPR)
1224 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1225 C1 and C2 are swapped in the call to compare_values. */
1226 return compare_values_warnv (c2, c1, strict_overflow_p);
1227 }
1228
1229 gcc_unreachable ();
1230 }
1231
1232 /* We cannot compare non-constants. */
1233 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1234 return -2;
1235
1236 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1237 {
1238 /* We cannot compare overflowed values, except for overflow
1239 infinities. */
1240 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1241 {
1242 if (strict_overflow_p != NULL)
1243 *strict_overflow_p = true;
1244 if (is_negative_overflow_infinity (val1))
1245 return is_negative_overflow_infinity (val2) ? 0 : -1;
1246 else if (is_negative_overflow_infinity (val2))
1247 return 1;
1248 else if (is_positive_overflow_infinity (val1))
1249 return is_positive_overflow_infinity (val2) ? 0 : 1;
1250 else if (is_positive_overflow_infinity (val2))
1251 return -1;
1252 return -2;
1253 }
1254
1255 return tree_int_cst_compare (val1, val2);
1256 }
1257 else
1258 {
1259 tree t;
1260
1261 /* First see if VAL1 and VAL2 are not the same. */
1262 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1263 return 0;
1264
1265 /* If VAL1 is a lower address than VAL2, return -1. */
1266 if (operand_less_p (val1, val2) == 1)
1267 return -1;
1268
1269 /* If VAL1 is a higher address than VAL2, return +1. */
1270 if (operand_less_p (val2, val1) == 1)
1271 return 1;
1272
1273 /* If VAL1 is different than VAL2, return +2.
1274 For integer constants we either have already returned -1 or 1
1275 or they are equivalent. We still might succeed in proving
1276 something about non-trivial operands. */
1277 if (TREE_CODE (val1) != INTEGER_CST
1278 || TREE_CODE (val2) != INTEGER_CST)
1279 {
1280 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1281 if (t && integer_onep (t))
1282 return 2;
1283 }
1284
1285 return -2;
1286 }
1287 }
1288
1289 /* Compare values like compare_values_warnv, but treat comparisons of
1290 nonconstants which rely on undefined overflow as incomparable. */
1291
1292 static int
1293 compare_values (tree val1, tree val2)
1294 {
1295 bool sop;
1296 int ret;
1297
1298 sop = false;
1299 ret = compare_values_warnv (val1, val2, &sop);
1300 if (sop
1301 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1302 ret = -2;
1303 return ret;
1304 }
1305
1306
1307 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1308 0 if VAL is not inside VR,
1309 -2 if we cannot tell either way.
1310
1311 FIXME, the current semantics of this functions are a bit quirky
1312 when taken in the context of VRP. In here we do not care
1313 about VR's type. If VR is the anti-range ~[3, 5] the call
1314 value_inside_range (4, VR) will return 1.
1315
1316 This is counter-intuitive in a strict sense, but the callers
1317 currently expect this. They are calling the function
1318 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1319 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1320 themselves.
1321
1322 This also applies to value_ranges_intersect_p and
1323 range_includes_zero_p. The semantics of VR_RANGE and
1324 VR_ANTI_RANGE should be encoded here, but that also means
1325 adapting the users of these functions to the new semantics.
1326
1327 Benchmark compile/20001226-1.c compilation time after changing this
1328 function. */
1329
1330 static inline int
1331 value_inside_range (tree val, value_range_t * vr)
1332 {
1333 int cmp1, cmp2;
1334
1335 cmp1 = operand_less_p (val, vr->min);
1336 if (cmp1 == -2)
1337 return -2;
1338 if (cmp1 == 1)
1339 return 0;
1340
1341 cmp2 = operand_less_p (vr->max, val);
1342 if (cmp2 == -2)
1343 return -2;
1344
1345 return !cmp2;
1346 }
1347
1348
1349 /* Return true if value ranges VR0 and VR1 have a non-empty
1350 intersection.
1351
1352 Benchmark compile/20001226-1.c compilation time after changing this
1353 function.
1354 */
1355
1356 static inline bool
1357 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1358 {
1359 /* The value ranges do not intersect if the maximum of the first range is
1360 less than the minimum of the second range or vice versa.
1361 When those relations are unknown, we can't do any better. */
1362 if (operand_less_p (vr0->max, vr1->min) != 0)
1363 return false;
1364 if (operand_less_p (vr1->max, vr0->min) != 0)
1365 return false;
1366 return true;
1367 }
1368
1369
1370 /* Return true if VR includes the value zero, false otherwise. FIXME,
1371 currently this will return false for an anti-range like ~[-4, 3].
1372 This will be wrong when the semantics of value_inside_range are
1373 modified (currently the users of this function expect these
1374 semantics). */
1375
1376 static inline bool
1377 range_includes_zero_p (value_range_t *vr)
1378 {
1379 tree zero;
1380
1381 gcc_assert (vr->type != VR_UNDEFINED
1382 && vr->type != VR_VARYING
1383 && !symbolic_range_p (vr));
1384
1385 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1386 return (value_inside_range (zero, vr) == 1);
1387 }
1388
1389 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1390 false otherwise or if no value range information is available. */
1391
1392 bool
1393 ssa_name_nonnegative_p (const_tree t)
1394 {
1395 value_range_t *vr = get_value_range (t);
1396
1397 if (INTEGRAL_TYPE_P (t)
1398 && TYPE_UNSIGNED (t))
1399 return true;
1400
1401 if (!vr)
1402 return false;
1403
1404 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1405 which would return a useful value should be encoded as a VR_RANGE. */
1406 if (vr->type == VR_RANGE)
1407 {
1408 int result = compare_values (vr->min, integer_zero_node);
1409
1410 return (result == 0 || result == 1);
1411 }
1412 return false;
1413 }
1414
1415 /* If OP has a value range with a single constant value return that,
1416 otherwise return NULL_TREE. This returns OP itself if OP is a
1417 constant. */
1418
1419 static tree
1420 op_with_constant_singleton_value_range (tree op)
1421 {
1422 value_range_t *vr;
1423
1424 if (is_gimple_min_invariant (op))
1425 return op;
1426
1427 if (TREE_CODE (op) != SSA_NAME)
1428 return NULL_TREE;
1429
1430 vr = get_value_range (op);
1431 if (vr->type == VR_RANGE
1432 && operand_equal_p (vr->min, vr->max, 0)
1433 && is_gimple_min_invariant (vr->min))
1434 return vr->min;
1435
1436 return NULL_TREE;
1437 }
1438
1439
1440 /* Extract value range information from an ASSERT_EXPR EXPR and store
1441 it in *VR_P. */
1442
1443 static void
1444 extract_range_from_assert (value_range_t *vr_p, tree expr)
1445 {
1446 tree var, cond, limit, min, max, type;
1447 value_range_t *var_vr, *limit_vr;
1448 enum tree_code cond_code;
1449
1450 var = ASSERT_EXPR_VAR (expr);
1451 cond = ASSERT_EXPR_COND (expr);
1452
1453 gcc_assert (COMPARISON_CLASS_P (cond));
1454
1455 /* Find VAR in the ASSERT_EXPR conditional. */
1456 if (var == TREE_OPERAND (cond, 0)
1457 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1458 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1459 {
1460 /* If the predicate is of the form VAR COMP LIMIT, then we just
1461 take LIMIT from the RHS and use the same comparison code. */
1462 cond_code = TREE_CODE (cond);
1463 limit = TREE_OPERAND (cond, 1);
1464 cond = TREE_OPERAND (cond, 0);
1465 }
1466 else
1467 {
1468 /* If the predicate is of the form LIMIT COMP VAR, then we need
1469 to flip around the comparison code to create the proper range
1470 for VAR. */
1471 cond_code = swap_tree_comparison (TREE_CODE (cond));
1472 limit = TREE_OPERAND (cond, 0);
1473 cond = TREE_OPERAND (cond, 1);
1474 }
1475
1476 limit = avoid_overflow_infinity (limit);
1477
1478 type = TREE_TYPE (limit);
1479 gcc_assert (limit != var);
1480
1481 /* For pointer arithmetic, we only keep track of pointer equality
1482 and inequality. */
1483 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1484 {
1485 set_value_range_to_varying (vr_p);
1486 return;
1487 }
1488
1489 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1490 try to use LIMIT's range to avoid creating symbolic ranges
1491 unnecessarily. */
1492 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1493
1494 /* LIMIT's range is only interesting if it has any useful information. */
1495 if (limit_vr
1496 && (limit_vr->type == VR_UNDEFINED
1497 || limit_vr->type == VR_VARYING
1498 || symbolic_range_p (limit_vr)))
1499 limit_vr = NULL;
1500
1501 /* Initially, the new range has the same set of equivalences of
1502 VAR's range. This will be revised before returning the final
1503 value. Since assertions may be chained via mutually exclusive
1504 predicates, we will need to trim the set of equivalences before
1505 we are done. */
1506 gcc_assert (vr_p->equiv == NULL);
1507 add_equivalence (&vr_p->equiv, var);
1508
1509 /* Extract a new range based on the asserted comparison for VAR and
1510 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1511 will only use it for equality comparisons (EQ_EXPR). For any
1512 other kind of assertion, we cannot derive a range from LIMIT's
1513 anti-range that can be used to describe the new range. For
1514 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1515 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1516 no single range for x_2 that could describe LE_EXPR, so we might
1517 as well build the range [b_4, +INF] for it.
1518 One special case we handle is extracting a range from a
1519 range test encoded as (unsigned)var + CST <= limit. */
1520 if (TREE_CODE (cond) == NOP_EXPR
1521 || TREE_CODE (cond) == PLUS_EXPR)
1522 {
1523 if (TREE_CODE (cond) == PLUS_EXPR)
1524 {
1525 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1526 TREE_OPERAND (cond, 1));
1527 max = int_const_binop (PLUS_EXPR, limit, min, 0);
1528 cond = TREE_OPERAND (cond, 0);
1529 }
1530 else
1531 {
1532 min = build_int_cst (TREE_TYPE (var), 0);
1533 max = limit;
1534 }
1535
1536 /* Make sure to not set TREE_OVERFLOW on the final type
1537 conversion. We are willingly interpreting large positive
1538 unsigned values as negative singed values here. */
1539 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1540 0, false);
1541 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1542 0, false);
1543
1544 /* We can transform a max, min range to an anti-range or
1545 vice-versa. Use set_and_canonicalize_value_range which does
1546 this for us. */
1547 if (cond_code == LE_EXPR)
1548 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1549 min, max, vr_p->equiv);
1550 else if (cond_code == GT_EXPR)
1551 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1552 min, max, vr_p->equiv);
1553 else
1554 gcc_unreachable ();
1555 }
1556 else if (cond_code == EQ_EXPR)
1557 {
1558 enum value_range_type range_type;
1559
1560 if (limit_vr)
1561 {
1562 range_type = limit_vr->type;
1563 min = limit_vr->min;
1564 max = limit_vr->max;
1565 }
1566 else
1567 {
1568 range_type = VR_RANGE;
1569 min = limit;
1570 max = limit;
1571 }
1572
1573 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1574
1575 /* When asserting the equality VAR == LIMIT and LIMIT is another
1576 SSA name, the new range will also inherit the equivalence set
1577 from LIMIT. */
1578 if (TREE_CODE (limit) == SSA_NAME)
1579 add_equivalence (&vr_p->equiv, limit);
1580 }
1581 else if (cond_code == NE_EXPR)
1582 {
1583 /* As described above, when LIMIT's range is an anti-range and
1584 this assertion is an inequality (NE_EXPR), then we cannot
1585 derive anything from the anti-range. For instance, if
1586 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1587 not imply that VAR's range is [0, 0]. So, in the case of
1588 anti-ranges, we just assert the inequality using LIMIT and
1589 not its anti-range.
1590
1591 If LIMIT_VR is a range, we can only use it to build a new
1592 anti-range if LIMIT_VR is a single-valued range. For
1593 instance, if LIMIT_VR is [0, 1], the predicate
1594 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1595 Rather, it means that for value 0 VAR should be ~[0, 0]
1596 and for value 1, VAR should be ~[1, 1]. We cannot
1597 represent these ranges.
1598
1599 The only situation in which we can build a valid
1600 anti-range is when LIMIT_VR is a single-valued range
1601 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1602 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1603 if (limit_vr
1604 && limit_vr->type == VR_RANGE
1605 && compare_values (limit_vr->min, limit_vr->max) == 0)
1606 {
1607 min = limit_vr->min;
1608 max = limit_vr->max;
1609 }
1610 else
1611 {
1612 /* In any other case, we cannot use LIMIT's range to build a
1613 valid anti-range. */
1614 min = max = limit;
1615 }
1616
1617 /* If MIN and MAX cover the whole range for their type, then
1618 just use the original LIMIT. */
1619 if (INTEGRAL_TYPE_P (type)
1620 && vrp_val_is_min (min)
1621 && vrp_val_is_max (max))
1622 min = max = limit;
1623
1624 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1625 }
1626 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1627 {
1628 min = TYPE_MIN_VALUE (type);
1629
1630 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1631 max = limit;
1632 else
1633 {
1634 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1635 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1636 LT_EXPR. */
1637 max = limit_vr->max;
1638 }
1639
1640 /* If the maximum value forces us to be out of bounds, simply punt.
1641 It would be pointless to try and do anything more since this
1642 all should be optimized away above us. */
1643 if ((cond_code == LT_EXPR
1644 && compare_values (max, min) == 0)
1645 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1646 set_value_range_to_varying (vr_p);
1647 else
1648 {
1649 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1650 if (cond_code == LT_EXPR)
1651 {
1652 tree one = build_int_cst (type, 1);
1653 max = fold_build2 (MINUS_EXPR, type, max, one);
1654 if (EXPR_P (max))
1655 TREE_NO_WARNING (max) = 1;
1656 }
1657
1658 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1659 }
1660 }
1661 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1662 {
1663 max = TYPE_MAX_VALUE (type);
1664
1665 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1666 min = limit;
1667 else
1668 {
1669 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1670 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1671 GT_EXPR. */
1672 min = limit_vr->min;
1673 }
1674
1675 /* If the minimum value forces us to be out of bounds, simply punt.
1676 It would be pointless to try and do anything more since this
1677 all should be optimized away above us. */
1678 if ((cond_code == GT_EXPR
1679 && compare_values (min, max) == 0)
1680 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1681 set_value_range_to_varying (vr_p);
1682 else
1683 {
1684 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1685 if (cond_code == GT_EXPR)
1686 {
1687 tree one = build_int_cst (type, 1);
1688 min = fold_build2 (PLUS_EXPR, type, min, one);
1689 if (EXPR_P (min))
1690 TREE_NO_WARNING (min) = 1;
1691 }
1692
1693 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1694 }
1695 }
1696 else
1697 gcc_unreachable ();
1698
1699 /* If VAR already had a known range, it may happen that the new
1700 range we have computed and VAR's range are not compatible. For
1701 instance,
1702
1703 if (p_5 == NULL)
1704 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1705 x_7 = p_6->fld;
1706 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1707
1708 While the above comes from a faulty program, it will cause an ICE
1709 later because p_8 and p_6 will have incompatible ranges and at
1710 the same time will be considered equivalent. A similar situation
1711 would arise from
1712
1713 if (i_5 > 10)
1714 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1715 if (i_5 < 5)
1716 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1717
1718 Again i_6 and i_7 will have incompatible ranges. It would be
1719 pointless to try and do anything with i_7's range because
1720 anything dominated by 'if (i_5 < 5)' will be optimized away.
1721 Note, due to the wa in which simulation proceeds, the statement
1722 i_7 = ASSERT_EXPR <...> we would never be visited because the
1723 conditional 'if (i_5 < 5)' always evaluates to false. However,
1724 this extra check does not hurt and may protect against future
1725 changes to VRP that may get into a situation similar to the
1726 NULL pointer dereference example.
1727
1728 Note that these compatibility tests are only needed when dealing
1729 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1730 are both anti-ranges, they will always be compatible, because two
1731 anti-ranges will always have a non-empty intersection. */
1732
1733 var_vr = get_value_range (var);
1734
1735 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1736 ranges or anti-ranges. */
1737 if (vr_p->type == VR_VARYING
1738 || vr_p->type == VR_UNDEFINED
1739 || var_vr->type == VR_VARYING
1740 || var_vr->type == VR_UNDEFINED
1741 || symbolic_range_p (vr_p)
1742 || symbolic_range_p (var_vr))
1743 return;
1744
1745 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1746 {
1747 /* If the two ranges have a non-empty intersection, we can
1748 refine the resulting range. Since the assert expression
1749 creates an equivalency and at the same time it asserts a
1750 predicate, we can take the intersection of the two ranges to
1751 get better precision. */
1752 if (value_ranges_intersect_p (var_vr, vr_p))
1753 {
1754 /* Use the larger of the two minimums. */
1755 if (compare_values (vr_p->min, var_vr->min) == -1)
1756 min = var_vr->min;
1757 else
1758 min = vr_p->min;
1759
1760 /* Use the smaller of the two maximums. */
1761 if (compare_values (vr_p->max, var_vr->max) == 1)
1762 max = var_vr->max;
1763 else
1764 max = vr_p->max;
1765
1766 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1767 }
1768 else
1769 {
1770 /* The two ranges do not intersect, set the new range to
1771 VARYING, because we will not be able to do anything
1772 meaningful with it. */
1773 set_value_range_to_varying (vr_p);
1774 }
1775 }
1776 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1777 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1778 {
1779 /* A range and an anti-range will cancel each other only if
1780 their ends are the same. For instance, in the example above,
1781 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1782 so VR_P should be set to VR_VARYING. */
1783 if (compare_values (var_vr->min, vr_p->min) == 0
1784 && compare_values (var_vr->max, vr_p->max) == 0)
1785 set_value_range_to_varying (vr_p);
1786 else
1787 {
1788 tree min, max, anti_min, anti_max, real_min, real_max;
1789 int cmp;
1790
1791 /* We want to compute the logical AND of the two ranges;
1792 there are three cases to consider.
1793
1794
1795 1. The VR_ANTI_RANGE range is completely within the
1796 VR_RANGE and the endpoints of the ranges are
1797 different. In that case the resulting range
1798 should be whichever range is more precise.
1799 Typically that will be the VR_RANGE.
1800
1801 2. The VR_ANTI_RANGE is completely disjoint from
1802 the VR_RANGE. In this case the resulting range
1803 should be the VR_RANGE.
1804
1805 3. There is some overlap between the VR_ANTI_RANGE
1806 and the VR_RANGE.
1807
1808 3a. If the high limit of the VR_ANTI_RANGE resides
1809 within the VR_RANGE, then the result is a new
1810 VR_RANGE starting at the high limit of the
1811 VR_ANTI_RANGE + 1 and extending to the
1812 high limit of the original VR_RANGE.
1813
1814 3b. If the low limit of the VR_ANTI_RANGE resides
1815 within the VR_RANGE, then the result is a new
1816 VR_RANGE starting at the low limit of the original
1817 VR_RANGE and extending to the low limit of the
1818 VR_ANTI_RANGE - 1. */
1819 if (vr_p->type == VR_ANTI_RANGE)
1820 {
1821 anti_min = vr_p->min;
1822 anti_max = vr_p->max;
1823 real_min = var_vr->min;
1824 real_max = var_vr->max;
1825 }
1826 else
1827 {
1828 anti_min = var_vr->min;
1829 anti_max = var_vr->max;
1830 real_min = vr_p->min;
1831 real_max = vr_p->max;
1832 }
1833
1834
1835 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1836 not including any endpoints. */
1837 if (compare_values (anti_max, real_max) == -1
1838 && compare_values (anti_min, real_min) == 1)
1839 {
1840 /* If the range is covering the whole valid range of
1841 the type keep the anti-range. */
1842 if (!vrp_val_is_min (real_min)
1843 || !vrp_val_is_max (real_max))
1844 set_value_range (vr_p, VR_RANGE, real_min,
1845 real_max, vr_p->equiv);
1846 }
1847 /* Case 2, VR_ANTI_RANGE completely disjoint from
1848 VR_RANGE. */
1849 else if (compare_values (anti_min, real_max) == 1
1850 || compare_values (anti_max, real_min) == -1)
1851 {
1852 set_value_range (vr_p, VR_RANGE, real_min,
1853 real_max, vr_p->equiv);
1854 }
1855 /* Case 3a, the anti-range extends into the low
1856 part of the real range. Thus creating a new
1857 low for the real range. */
1858 else if (((cmp = compare_values (anti_max, real_min)) == 1
1859 || cmp == 0)
1860 && compare_values (anti_max, real_max) == -1)
1861 {
1862 gcc_assert (!is_positive_overflow_infinity (anti_max));
1863 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1864 && vrp_val_is_max (anti_max))
1865 {
1866 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1867 {
1868 set_value_range_to_varying (vr_p);
1869 return;
1870 }
1871 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1872 }
1873 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1874 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1875 anti_max,
1876 build_int_cst (TREE_TYPE (var_vr->min), 1));
1877 else
1878 min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1879 anti_max, size_int (1));
1880 max = real_max;
1881 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1882 }
1883 /* Case 3b, the anti-range extends into the high
1884 part of the real range. Thus creating a new
1885 higher for the real range. */
1886 else if (compare_values (anti_min, real_min) == 1
1887 && ((cmp = compare_values (anti_min, real_max)) == -1
1888 || cmp == 0))
1889 {
1890 gcc_assert (!is_negative_overflow_infinity (anti_min));
1891 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1892 && vrp_val_is_min (anti_min))
1893 {
1894 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1895 {
1896 set_value_range_to_varying (vr_p);
1897 return;
1898 }
1899 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1900 }
1901 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1902 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1903 anti_min,
1904 build_int_cst (TREE_TYPE (var_vr->min), 1));
1905 else
1906 max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1907 anti_min,
1908 size_int (-1));
1909 min = real_min;
1910 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1911 }
1912 }
1913 }
1914 }
1915
1916
1917 /* Extract range information from SSA name VAR and store it in VR. If
1918 VAR has an interesting range, use it. Otherwise, create the
1919 range [VAR, VAR] and return it. This is useful in situations where
1920 we may have conditionals testing values of VARYING names. For
1921 instance,
1922
1923 x_3 = y_5;
1924 if (x_3 > y_5)
1925 ...
1926
1927 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1928 always false. */
1929
1930 static void
1931 extract_range_from_ssa_name (value_range_t *vr, tree var)
1932 {
1933 value_range_t *var_vr = get_value_range (var);
1934
1935 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1936 copy_value_range (vr, var_vr);
1937 else
1938 set_value_range (vr, VR_RANGE, var, var, NULL);
1939
1940 add_equivalence (&vr->equiv, var);
1941 }
1942
1943
1944 /* Wrapper around int_const_binop. If the operation overflows and we
1945 are not using wrapping arithmetic, then adjust the result to be
1946 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1947 NULL_TREE if we need to use an overflow infinity representation but
1948 the type does not support it. */
1949
1950 static tree
1951 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1952 {
1953 tree res;
1954
1955 res = int_const_binop (code, val1, val2, 0);
1956
1957 /* If we are using unsigned arithmetic, operate symbolically
1958 on -INF and +INF as int_const_binop only handles signed overflow. */
1959 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1960 {
1961 int checkz = compare_values (res, val1);
1962 bool overflow = false;
1963
1964 /* Ensure that res = val1 [+*] val2 >= val1
1965 or that res = val1 - val2 <= val1. */
1966 if ((code == PLUS_EXPR
1967 && !(checkz == 1 || checkz == 0))
1968 || (code == MINUS_EXPR
1969 && !(checkz == 0 || checkz == -1)))
1970 {
1971 overflow = true;
1972 }
1973 /* Checking for multiplication overflow is done by dividing the
1974 output of the multiplication by the first input of the
1975 multiplication. If the result of that division operation is
1976 not equal to the second input of the multiplication, then the
1977 multiplication overflowed. */
1978 else if (code == MULT_EXPR && !integer_zerop (val1))
1979 {
1980 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1981 res,
1982 val1, 0);
1983 int check = compare_values (tmp, val2);
1984
1985 if (check != 0)
1986 overflow = true;
1987 }
1988
1989 if (overflow)
1990 {
1991 res = copy_node (res);
1992 TREE_OVERFLOW (res) = 1;
1993 }
1994
1995 }
1996 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1997 /* If the singed operation wraps then int_const_binop has done
1998 everything we want. */
1999 ;
2000 else if ((TREE_OVERFLOW (res)
2001 && !TREE_OVERFLOW (val1)
2002 && !TREE_OVERFLOW (val2))
2003 || is_overflow_infinity (val1)
2004 || is_overflow_infinity (val2))
2005 {
2006 /* If the operation overflowed but neither VAL1 nor VAL2 are
2007 overflown, return -INF or +INF depending on the operation
2008 and the combination of signs of the operands. */
2009 int sgn1 = tree_int_cst_sgn (val1);
2010 int sgn2 = tree_int_cst_sgn (val2);
2011
2012 if (needs_overflow_infinity (TREE_TYPE (res))
2013 && !supports_overflow_infinity (TREE_TYPE (res)))
2014 return NULL_TREE;
2015
2016 /* We have to punt on adding infinities of different signs,
2017 since we can't tell what the sign of the result should be.
2018 Likewise for subtracting infinities of the same sign. */
2019 if (((code == PLUS_EXPR && sgn1 != sgn2)
2020 || (code == MINUS_EXPR && sgn1 == sgn2))
2021 && is_overflow_infinity (val1)
2022 && is_overflow_infinity (val2))
2023 return NULL_TREE;
2024
2025 /* Don't try to handle division or shifting of infinities. */
2026 if ((code == TRUNC_DIV_EXPR
2027 || code == FLOOR_DIV_EXPR
2028 || code == CEIL_DIV_EXPR
2029 || code == EXACT_DIV_EXPR
2030 || code == ROUND_DIV_EXPR
2031 || code == RSHIFT_EXPR)
2032 && (is_overflow_infinity (val1)
2033 || is_overflow_infinity (val2)))
2034 return NULL_TREE;
2035
2036 /* Notice that we only need to handle the restricted set of
2037 operations handled by extract_range_from_binary_expr.
2038 Among them, only multiplication, addition and subtraction
2039 can yield overflow without overflown operands because we
2040 are working with integral types only... except in the
2041 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2042 for division too. */
2043
2044 /* For multiplication, the sign of the overflow is given
2045 by the comparison of the signs of the operands. */
2046 if ((code == MULT_EXPR && sgn1 == sgn2)
2047 /* For addition, the operands must be of the same sign
2048 to yield an overflow. Its sign is therefore that
2049 of one of the operands, for example the first. For
2050 infinite operands X + -INF is negative, not positive. */
2051 || (code == PLUS_EXPR
2052 && (sgn1 >= 0
2053 ? !is_negative_overflow_infinity (val2)
2054 : is_positive_overflow_infinity (val2)))
2055 /* For subtraction, non-infinite operands must be of
2056 different signs to yield an overflow. Its sign is
2057 therefore that of the first operand or the opposite of
2058 that of the second operand. A first operand of 0 counts
2059 as positive here, for the corner case 0 - (-INF), which
2060 overflows, but must yield +INF. For infinite operands 0
2061 - INF is negative, not positive. */
2062 || (code == MINUS_EXPR
2063 && (sgn1 >= 0
2064 ? !is_positive_overflow_infinity (val2)
2065 : is_negative_overflow_infinity (val2)))
2066 /* We only get in here with positive shift count, so the
2067 overflow direction is the same as the sign of val1.
2068 Actually rshift does not overflow at all, but we only
2069 handle the case of shifting overflowed -INF and +INF. */
2070 || (code == RSHIFT_EXPR
2071 && sgn1 >= 0)
2072 /* For division, the only case is -INF / -1 = +INF. */
2073 || code == TRUNC_DIV_EXPR
2074 || code == FLOOR_DIV_EXPR
2075 || code == CEIL_DIV_EXPR
2076 || code == EXACT_DIV_EXPR
2077 || code == ROUND_DIV_EXPR)
2078 return (needs_overflow_infinity (TREE_TYPE (res))
2079 ? positive_overflow_infinity (TREE_TYPE (res))
2080 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2081 else
2082 return (needs_overflow_infinity (TREE_TYPE (res))
2083 ? negative_overflow_infinity (TREE_TYPE (res))
2084 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2085 }
2086
2087 return res;
2088 }
2089
2090
2091 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2092 bitmask if some bit is unset, it means for all numbers in the range
2093 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2094 bitmask if some bit is set, it means for all numbers in the range
2095 the bit is 1, otherwise it might be 0 or 1. */
2096
2097 static bool
2098 zero_nonzero_bits_from_vr (value_range_t *vr, double_int *may_be_nonzero,
2099 double_int *must_be_nonzero)
2100 {
2101 if (range_int_cst_p (vr))
2102 {
2103 if (range_int_cst_singleton_p (vr))
2104 {
2105 *may_be_nonzero = tree_to_double_int (vr->min);
2106 *must_be_nonzero = *may_be_nonzero;
2107 return true;
2108 }
2109 if (tree_int_cst_sgn (vr->min) >= 0)
2110 {
2111 double_int dmin = tree_to_double_int (vr->min);
2112 double_int dmax = tree_to_double_int (vr->max);
2113 double_int xor_mask = double_int_xor (dmin, dmax);
2114 *may_be_nonzero = double_int_ior (dmin, dmax);
2115 *must_be_nonzero = double_int_and (dmin, dmax);
2116 if (xor_mask.high != 0)
2117 {
2118 unsigned HOST_WIDE_INT mask
2119 = ((unsigned HOST_WIDE_INT) 1
2120 << floor_log2 (xor_mask.high)) - 1;
2121 may_be_nonzero->low = ALL_ONES;
2122 may_be_nonzero->high |= mask;
2123 must_be_nonzero->low = 0;
2124 must_be_nonzero->high &= ~mask;
2125 }
2126 else if (xor_mask.low != 0)
2127 {
2128 unsigned HOST_WIDE_INT mask
2129 = ((unsigned HOST_WIDE_INT) 1
2130 << floor_log2 (xor_mask.low)) - 1;
2131 may_be_nonzero->low |= mask;
2132 must_be_nonzero->low &= ~mask;
2133 }
2134 return true;
2135 }
2136 }
2137 may_be_nonzero->low = ALL_ONES;
2138 may_be_nonzero->high = ALL_ONES;
2139 must_be_nonzero->low = 0;
2140 must_be_nonzero->high = 0;
2141 return false;
2142 }
2143
2144
2145 /* Extract range information from a binary expression EXPR based on
2146 the ranges of each of its operands and the expression code. */
2147
2148 static void
2149 extract_range_from_binary_expr (value_range_t *vr,
2150 enum tree_code code,
2151 tree expr_type, tree op0, tree op1)
2152 {
2153 enum value_range_type type;
2154 tree min, max;
2155 int cmp;
2156 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2157 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2158
2159 /* Not all binary expressions can be applied to ranges in a
2160 meaningful way. Handle only arithmetic operations. */
2161 if (code != PLUS_EXPR
2162 && code != MINUS_EXPR
2163 && code != POINTER_PLUS_EXPR
2164 && code != MULT_EXPR
2165 && code != TRUNC_DIV_EXPR
2166 && code != FLOOR_DIV_EXPR
2167 && code != CEIL_DIV_EXPR
2168 && code != EXACT_DIV_EXPR
2169 && code != ROUND_DIV_EXPR
2170 && code != TRUNC_MOD_EXPR
2171 && code != RSHIFT_EXPR
2172 && code != MIN_EXPR
2173 && code != MAX_EXPR
2174 && code != BIT_AND_EXPR
2175 && code != BIT_IOR_EXPR
2176 && code != TRUTH_AND_EXPR
2177 && code != TRUTH_OR_EXPR)
2178 {
2179 /* We can still do constant propagation here. */
2180 tree const_op0 = op_with_constant_singleton_value_range (op0);
2181 tree const_op1 = op_with_constant_singleton_value_range (op1);
2182 if (const_op0 || const_op1)
2183 {
2184 tree tem = fold_binary (code, expr_type,
2185 const_op0 ? const_op0 : op0,
2186 const_op1 ? const_op1 : op1);
2187 if (tem
2188 && is_gimple_min_invariant (tem)
2189 && !is_overflow_infinity (tem))
2190 {
2191 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2192 return;
2193 }
2194 }
2195 set_value_range_to_varying (vr);
2196 return;
2197 }
2198
2199 /* Get value ranges for each operand. For constant operands, create
2200 a new value range with the operand to simplify processing. */
2201 if (TREE_CODE (op0) == SSA_NAME)
2202 vr0 = *(get_value_range (op0));
2203 else if (is_gimple_min_invariant (op0))
2204 set_value_range_to_value (&vr0, op0, NULL);
2205 else
2206 set_value_range_to_varying (&vr0);
2207
2208 if (TREE_CODE (op1) == SSA_NAME)
2209 vr1 = *(get_value_range (op1));
2210 else if (is_gimple_min_invariant (op1))
2211 set_value_range_to_value (&vr1, op1, NULL);
2212 else
2213 set_value_range_to_varying (&vr1);
2214
2215 /* If either range is UNDEFINED, so is the result. */
2216 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
2217 {
2218 set_value_range_to_undefined (vr);
2219 return;
2220 }
2221
2222 /* The type of the resulting value range defaults to VR0.TYPE. */
2223 type = vr0.type;
2224
2225 /* Refuse to operate on VARYING ranges, ranges of different kinds
2226 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2227 because we may be able to derive a useful range even if one of
2228 the operands is VR_VARYING or symbolic range. Similarly for
2229 divisions. TODO, we may be able to derive anti-ranges in
2230 some cases. */
2231 if (code != BIT_AND_EXPR
2232 && code != TRUTH_AND_EXPR
2233 && code != TRUTH_OR_EXPR
2234 && code != TRUNC_DIV_EXPR
2235 && code != FLOOR_DIV_EXPR
2236 && code != CEIL_DIV_EXPR
2237 && code != EXACT_DIV_EXPR
2238 && code != ROUND_DIV_EXPR
2239 && code != TRUNC_MOD_EXPR
2240 && (vr0.type == VR_VARYING
2241 || vr1.type == VR_VARYING
2242 || vr0.type != vr1.type
2243 || symbolic_range_p (&vr0)
2244 || symbolic_range_p (&vr1)))
2245 {
2246 set_value_range_to_varying (vr);
2247 return;
2248 }
2249
2250 /* Now evaluate the expression to determine the new range. */
2251 if (POINTER_TYPE_P (expr_type)
2252 || POINTER_TYPE_P (TREE_TYPE (op0))
2253 || POINTER_TYPE_P (TREE_TYPE (op1)))
2254 {
2255 if (code == MIN_EXPR || code == MAX_EXPR)
2256 {
2257 /* For MIN/MAX expressions with pointers, we only care about
2258 nullness, if both are non null, then the result is nonnull.
2259 If both are null, then the result is null. Otherwise they
2260 are varying. */
2261 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2262 set_value_range_to_nonnull (vr, expr_type);
2263 else if (range_is_null (&vr0) && range_is_null (&vr1))
2264 set_value_range_to_null (vr, expr_type);
2265 else
2266 set_value_range_to_varying (vr);
2267
2268 return;
2269 }
2270 if (code == POINTER_PLUS_EXPR)
2271 {
2272 /* For pointer types, we are really only interested in asserting
2273 whether the expression evaluates to non-NULL. */
2274 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2275 set_value_range_to_nonnull (vr, expr_type);
2276 else if (range_is_null (&vr0) && range_is_null (&vr1))
2277 set_value_range_to_null (vr, expr_type);
2278 else
2279 set_value_range_to_varying (vr);
2280 }
2281 else if (code == BIT_AND_EXPR)
2282 {
2283 /* For pointer types, we are really only interested in asserting
2284 whether the expression evaluates to non-NULL. */
2285 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2286 set_value_range_to_nonnull (vr, expr_type);
2287 else if (range_is_null (&vr0) || range_is_null (&vr1))
2288 set_value_range_to_null (vr, expr_type);
2289 else
2290 set_value_range_to_varying (vr);
2291 }
2292 else
2293 gcc_unreachable ();
2294
2295 return;
2296 }
2297
2298 /* For integer ranges, apply the operation to each end of the
2299 range and see what we end up with. */
2300 if (code == TRUTH_AND_EXPR
2301 || code == TRUTH_OR_EXPR)
2302 {
2303 /* If one of the operands is zero, we know that the whole
2304 expression evaluates zero. */
2305 if (code == TRUTH_AND_EXPR
2306 && ((vr0.type == VR_RANGE
2307 && integer_zerop (vr0.min)
2308 && integer_zerop (vr0.max))
2309 || (vr1.type == VR_RANGE
2310 && integer_zerop (vr1.min)
2311 && integer_zerop (vr1.max))))
2312 {
2313 type = VR_RANGE;
2314 min = max = build_int_cst (expr_type, 0);
2315 }
2316 /* If one of the operands is one, we know that the whole
2317 expression evaluates one. */
2318 else if (code == TRUTH_OR_EXPR
2319 && ((vr0.type == VR_RANGE
2320 && integer_onep (vr0.min)
2321 && integer_onep (vr0.max))
2322 || (vr1.type == VR_RANGE
2323 && integer_onep (vr1.min)
2324 && integer_onep (vr1.max))))
2325 {
2326 type = VR_RANGE;
2327 min = max = build_int_cst (expr_type, 1);
2328 }
2329 else if (vr0.type != VR_VARYING
2330 && vr1.type != VR_VARYING
2331 && vr0.type == vr1.type
2332 && !symbolic_range_p (&vr0)
2333 && !overflow_infinity_range_p (&vr0)
2334 && !symbolic_range_p (&vr1)
2335 && !overflow_infinity_range_p (&vr1))
2336 {
2337 /* Boolean expressions cannot be folded with int_const_binop. */
2338 min = fold_binary (code, expr_type, vr0.min, vr1.min);
2339 max = fold_binary (code, expr_type, vr0.max, vr1.max);
2340 }
2341 else
2342 {
2343 /* The result of a TRUTH_*_EXPR is always true or false. */
2344 set_value_range_to_truthvalue (vr, expr_type);
2345 return;
2346 }
2347 }
2348 else if (code == PLUS_EXPR
2349 || code == MIN_EXPR
2350 || code == MAX_EXPR)
2351 {
2352 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2353 VR_VARYING. It would take more effort to compute a precise
2354 range for such a case. For example, if we have op0 == 1 and
2355 op1 == -1 with their ranges both being ~[0,0], we would have
2356 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2357 Note that we are guaranteed to have vr0.type == vr1.type at
2358 this point. */
2359 if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE)
2360 {
2361 set_value_range_to_varying (vr);
2362 return;
2363 }
2364
2365 /* For operations that make the resulting range directly
2366 proportional to the original ranges, apply the operation to
2367 the same end of each range. */
2368 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2369 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2370
2371 /* If both additions overflowed the range kind is still correct.
2372 This happens regularly with subtracting something in unsigned
2373 arithmetic.
2374 ??? See PR30318 for all the cases we do not handle. */
2375 if (code == PLUS_EXPR
2376 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2377 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2378 {
2379 min = build_int_cst_wide (TREE_TYPE (min),
2380 TREE_INT_CST_LOW (min),
2381 TREE_INT_CST_HIGH (min));
2382 max = build_int_cst_wide (TREE_TYPE (max),
2383 TREE_INT_CST_LOW (max),
2384 TREE_INT_CST_HIGH (max));
2385 }
2386 }
2387 else if (code == MULT_EXPR
2388 || code == TRUNC_DIV_EXPR
2389 || code == FLOOR_DIV_EXPR
2390 || code == CEIL_DIV_EXPR
2391 || code == EXACT_DIV_EXPR
2392 || code == ROUND_DIV_EXPR
2393 || code == RSHIFT_EXPR)
2394 {
2395 tree val[4];
2396 size_t i;
2397 bool sop;
2398
2399 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2400 drop to VR_VARYING. It would take more effort to compute a
2401 precise range for such a case. For example, if we have
2402 op0 == 65536 and op1 == 65536 with their ranges both being
2403 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2404 we cannot claim that the product is in ~[0,0]. Note that we
2405 are guaranteed to have vr0.type == vr1.type at this
2406 point. */
2407 if (code == MULT_EXPR
2408 && vr0.type == VR_ANTI_RANGE
2409 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2410 {
2411 set_value_range_to_varying (vr);
2412 return;
2413 }
2414
2415 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2416 then drop to VR_VARYING. Outside of this range we get undefined
2417 behavior from the shift operation. We cannot even trust
2418 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2419 shifts, and the operation at the tree level may be widened. */
2420 if (code == RSHIFT_EXPR)
2421 {
2422 if (vr1.type == VR_ANTI_RANGE
2423 || !vrp_expr_computes_nonnegative (op1, &sop)
2424 || (operand_less_p
2425 (build_int_cst (TREE_TYPE (vr1.max),
2426 TYPE_PRECISION (expr_type) - 1),
2427 vr1.max) != 0))
2428 {
2429 set_value_range_to_varying (vr);
2430 return;
2431 }
2432 }
2433
2434 else if ((code == TRUNC_DIV_EXPR
2435 || code == FLOOR_DIV_EXPR
2436 || code == CEIL_DIV_EXPR
2437 || code == EXACT_DIV_EXPR
2438 || code == ROUND_DIV_EXPR)
2439 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
2440 {
2441 /* For division, if op1 has VR_RANGE but op0 does not, something
2442 can be deduced just from that range. Say [min, max] / [4, max]
2443 gives [min / 4, max / 4] range. */
2444 if (vr1.type == VR_RANGE
2445 && !symbolic_range_p (&vr1)
2446 && !range_includes_zero_p (&vr1))
2447 {
2448 vr0.type = type = VR_RANGE;
2449 vr0.min = vrp_val_min (TREE_TYPE (op0));
2450 vr0.max = vrp_val_max (TREE_TYPE (op1));
2451 }
2452 else
2453 {
2454 set_value_range_to_varying (vr);
2455 return;
2456 }
2457 }
2458
2459 /* For divisions, if flag_non_call_exceptions is true, we must
2460 not eliminate a division by zero. */
2461 if ((code == TRUNC_DIV_EXPR
2462 || code == FLOOR_DIV_EXPR
2463 || code == CEIL_DIV_EXPR
2464 || code == EXACT_DIV_EXPR
2465 || code == ROUND_DIV_EXPR)
2466 && cfun->can_throw_non_call_exceptions
2467 && (vr1.type != VR_RANGE
2468 || symbolic_range_p (&vr1)
2469 || range_includes_zero_p (&vr1)))
2470 {
2471 set_value_range_to_varying (vr);
2472 return;
2473 }
2474
2475 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2476 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2477 include 0. */
2478 if ((code == TRUNC_DIV_EXPR
2479 || code == FLOOR_DIV_EXPR
2480 || code == CEIL_DIV_EXPR
2481 || code == EXACT_DIV_EXPR
2482 || code == ROUND_DIV_EXPR)
2483 && vr0.type == VR_RANGE
2484 && (vr1.type != VR_RANGE
2485 || symbolic_range_p (&vr1)
2486 || range_includes_zero_p (&vr1)))
2487 {
2488 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2489 int cmp;
2490
2491 sop = false;
2492 min = NULL_TREE;
2493 max = NULL_TREE;
2494 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop)
2495 {
2496 /* For unsigned division or when divisor is known
2497 to be non-negative, the range has to cover
2498 all numbers from 0 to max for positive max
2499 and all numbers from min to 0 for negative min. */
2500 cmp = compare_values (vr0.max, zero);
2501 if (cmp == -1)
2502 max = zero;
2503 else if (cmp == 0 || cmp == 1)
2504 max = vr0.max;
2505 else
2506 type = VR_VARYING;
2507 cmp = compare_values (vr0.min, zero);
2508 if (cmp == 1)
2509 min = zero;
2510 else if (cmp == 0 || cmp == -1)
2511 min = vr0.min;
2512 else
2513 type = VR_VARYING;
2514 }
2515 else
2516 {
2517 /* Otherwise the range is -max .. max or min .. -min
2518 depending on which bound is bigger in absolute value,
2519 as the division can change the sign. */
2520 abs_extent_range (vr, vr0.min, vr0.max);
2521 return;
2522 }
2523 if (type == VR_VARYING)
2524 {
2525 set_value_range_to_varying (vr);
2526 return;
2527 }
2528 }
2529
2530 /* Multiplications and divisions are a bit tricky to handle,
2531 depending on the mix of signs we have in the two ranges, we
2532 need to operate on different values to get the minimum and
2533 maximum values for the new range. One approach is to figure
2534 out all the variations of range combinations and do the
2535 operations.
2536
2537 However, this involves several calls to compare_values and it
2538 is pretty convoluted. It's simpler to do the 4 operations
2539 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2540 MAX1) and then figure the smallest and largest values to form
2541 the new range. */
2542 else
2543 {
2544 gcc_assert ((vr0.type == VR_RANGE
2545 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
2546 && vr0.type == vr1.type);
2547
2548 /* Compute the 4 cross operations. */
2549 sop = false;
2550 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2551 if (val[0] == NULL_TREE)
2552 sop = true;
2553
2554 if (vr1.max == vr1.min)
2555 val[1] = NULL_TREE;
2556 else
2557 {
2558 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2559 if (val[1] == NULL_TREE)
2560 sop = true;
2561 }
2562
2563 if (vr0.max == vr0.min)
2564 val[2] = NULL_TREE;
2565 else
2566 {
2567 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2568 if (val[2] == NULL_TREE)
2569 sop = true;
2570 }
2571
2572 if (vr0.min == vr0.max || vr1.min == vr1.max)
2573 val[3] = NULL_TREE;
2574 else
2575 {
2576 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2577 if (val[3] == NULL_TREE)
2578 sop = true;
2579 }
2580
2581 if (sop)
2582 {
2583 set_value_range_to_varying (vr);
2584 return;
2585 }
2586
2587 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2588 of VAL[i]. */
2589 min = val[0];
2590 max = val[0];
2591 for (i = 1; i < 4; i++)
2592 {
2593 if (!is_gimple_min_invariant (min)
2594 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2595 || !is_gimple_min_invariant (max)
2596 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2597 break;
2598
2599 if (val[i])
2600 {
2601 if (!is_gimple_min_invariant (val[i])
2602 || (TREE_OVERFLOW (val[i])
2603 && !is_overflow_infinity (val[i])))
2604 {
2605 /* If we found an overflowed value, set MIN and MAX
2606 to it so that we set the resulting range to
2607 VARYING. */
2608 min = max = val[i];
2609 break;
2610 }
2611
2612 if (compare_values (val[i], min) == -1)
2613 min = val[i];
2614
2615 if (compare_values (val[i], max) == 1)
2616 max = val[i];
2617 }
2618 }
2619 }
2620 }
2621 else if (code == TRUNC_MOD_EXPR)
2622 {
2623 bool sop = false;
2624 if (vr1.type != VR_RANGE
2625 || symbolic_range_p (&vr1)
2626 || range_includes_zero_p (&vr1)
2627 || vrp_val_is_min (vr1.min))
2628 {
2629 set_value_range_to_varying (vr);
2630 return;
2631 }
2632 type = VR_RANGE;
2633 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2634 max = fold_unary_to_constant (ABS_EXPR, TREE_TYPE (vr1.min), vr1.min);
2635 if (tree_int_cst_lt (max, vr1.max))
2636 max = vr1.max;
2637 max = int_const_binop (MINUS_EXPR, max, integer_one_node, 0);
2638 /* If the dividend is non-negative the modulus will be
2639 non-negative as well. */
2640 if (TYPE_UNSIGNED (TREE_TYPE (max))
2641 || (vrp_expr_computes_nonnegative (op0, &sop) && !sop))
2642 min = build_int_cst (TREE_TYPE (max), 0);
2643 else
2644 min = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (max), max);
2645 }
2646 else if (code == MINUS_EXPR)
2647 {
2648 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2649 VR_VARYING. It would take more effort to compute a precise
2650 range for such a case. For example, if we have op0 == 1 and
2651 op1 == 1 with their ranges both being ~[0,0], we would have
2652 op0 - op1 == 0, so we cannot claim that the difference is in
2653 ~[0,0]. Note that we are guaranteed to have
2654 vr0.type == vr1.type at this point. */
2655 if (vr0.type == VR_ANTI_RANGE)
2656 {
2657 set_value_range_to_varying (vr);
2658 return;
2659 }
2660
2661 /* For MINUS_EXPR, apply the operation to the opposite ends of
2662 each range. */
2663 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2664 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2665 }
2666 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2667 {
2668 bool vr0_int_cst_singleton_p, vr1_int_cst_singleton_p;
2669 bool int_cst_range0, int_cst_range1;
2670 double_int may_be_nonzero0, may_be_nonzero1;
2671 double_int must_be_nonzero0, must_be_nonzero1;
2672
2673 vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
2674 vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
2675 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2676 &must_be_nonzero0);
2677 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2678 &must_be_nonzero1);
2679
2680 type = VR_RANGE;
2681 if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
2682 min = max = int_const_binop (code, vr0.max, vr1.max, 0);
2683 else if (!int_cst_range0 && !int_cst_range1)
2684 {
2685 set_value_range_to_varying (vr);
2686 return;
2687 }
2688 else if (code == BIT_AND_EXPR)
2689 {
2690 min = double_int_to_tree (expr_type,
2691 double_int_and (must_be_nonzero0,
2692 must_be_nonzero1));
2693 max = double_int_to_tree (expr_type,
2694 double_int_and (may_be_nonzero0,
2695 may_be_nonzero1));
2696 if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
2697 min = NULL_TREE;
2698 if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
2699 max = NULL_TREE;
2700 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2701 {
2702 if (min == NULL_TREE)
2703 min = build_int_cst (expr_type, 0);
2704 if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max))
2705 max = vr0.max;
2706 }
2707 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2708 {
2709 if (min == NULL_TREE)
2710 min = build_int_cst (expr_type, 0);
2711 if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max))
2712 max = vr1.max;
2713 }
2714 }
2715 else if (!int_cst_range0
2716 || !int_cst_range1
2717 || tree_int_cst_sgn (vr0.min) < 0
2718 || tree_int_cst_sgn (vr1.min) < 0)
2719 {
2720 set_value_range_to_varying (vr);
2721 return;
2722 }
2723 else
2724 {
2725 min = double_int_to_tree (expr_type,
2726 double_int_ior (must_be_nonzero0,
2727 must_be_nonzero1));
2728 max = double_int_to_tree (expr_type,
2729 double_int_ior (may_be_nonzero0,
2730 may_be_nonzero1));
2731 if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
2732 min = vr0.min;
2733 else
2734 min = vrp_int_const_binop (MAX_EXPR, min, vr0.min);
2735 if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
2736 max = NULL_TREE;
2737 min = vrp_int_const_binop (MAX_EXPR, min, vr1.min);
2738 }
2739 }
2740 else
2741 gcc_unreachable ();
2742
2743 /* If either MIN or MAX overflowed, then set the resulting range to
2744 VARYING. But we do accept an overflow infinity
2745 representation. */
2746 if (min == NULL_TREE
2747 || !is_gimple_min_invariant (min)
2748 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2749 || max == NULL_TREE
2750 || !is_gimple_min_invariant (max)
2751 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2752 {
2753 set_value_range_to_varying (vr);
2754 return;
2755 }
2756
2757 /* We punt if:
2758 1) [-INF, +INF]
2759 2) [-INF, +-INF(OVF)]
2760 3) [+-INF(OVF), +INF]
2761 4) [+-INF(OVF), +-INF(OVF)]
2762 We learn nothing when we have INF and INF(OVF) on both sides.
2763 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2764 overflow. */
2765 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2766 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2767 {
2768 set_value_range_to_varying (vr);
2769 return;
2770 }
2771
2772 cmp = compare_values (min, max);
2773 if (cmp == -2 || cmp == 1)
2774 {
2775 /* If the new range has its limits swapped around (MIN > MAX),
2776 then the operation caused one of them to wrap around, mark
2777 the new range VARYING. */
2778 set_value_range_to_varying (vr);
2779 }
2780 else
2781 set_value_range (vr, type, min, max, NULL);
2782 }
2783
2784
2785 /* Extract range information from a unary expression EXPR based on
2786 the range of its operand and the expression code. */
2787
2788 static void
2789 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
2790 tree type, tree op0)
2791 {
2792 tree min, max;
2793 int cmp;
2794 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2795
2796 /* Refuse to operate on certain unary expressions for which we
2797 cannot easily determine a resulting range. */
2798 if (code == FIX_TRUNC_EXPR
2799 || code == FLOAT_EXPR
2800 || code == BIT_NOT_EXPR
2801 || code == CONJ_EXPR)
2802 {
2803 /* We can still do constant propagation here. */
2804 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE)
2805 {
2806 tree tem = fold_unary (code, type, op0);
2807 if (tem
2808 && is_gimple_min_invariant (tem)
2809 && !is_overflow_infinity (tem))
2810 {
2811 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2812 return;
2813 }
2814 }
2815 set_value_range_to_varying (vr);
2816 return;
2817 }
2818
2819 /* Get value ranges for the operand. For constant operands, create
2820 a new value range with the operand to simplify processing. */
2821 if (TREE_CODE (op0) == SSA_NAME)
2822 vr0 = *(get_value_range (op0));
2823 else if (is_gimple_min_invariant (op0))
2824 set_value_range_to_value (&vr0, op0, NULL);
2825 else
2826 set_value_range_to_varying (&vr0);
2827
2828 /* If VR0 is UNDEFINED, so is the result. */
2829 if (vr0.type == VR_UNDEFINED)
2830 {
2831 set_value_range_to_undefined (vr);
2832 return;
2833 }
2834
2835 /* Refuse to operate on symbolic ranges, or if neither operand is
2836 a pointer or integral type. */
2837 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2838 && !POINTER_TYPE_P (TREE_TYPE (op0)))
2839 || (vr0.type != VR_VARYING
2840 && symbolic_range_p (&vr0)))
2841 {
2842 set_value_range_to_varying (vr);
2843 return;
2844 }
2845
2846 /* If the expression involves pointers, we are only interested in
2847 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2848 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0)))
2849 {
2850 bool sop;
2851
2852 sop = false;
2853 if (range_is_nonnull (&vr0)
2854 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop)
2855 && !sop))
2856 set_value_range_to_nonnull (vr, type);
2857 else if (range_is_null (&vr0))
2858 set_value_range_to_null (vr, type);
2859 else
2860 set_value_range_to_varying (vr);
2861
2862 return;
2863 }
2864
2865 /* Handle unary expressions on integer ranges. */
2866 if (CONVERT_EXPR_CODE_P (code)
2867 && INTEGRAL_TYPE_P (type)
2868 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2869 {
2870 tree inner_type = TREE_TYPE (op0);
2871 tree outer_type = type;
2872
2873 /* If VR0 is varying and we increase the type precision, assume
2874 a full range for the following transformation. */
2875 if (vr0.type == VR_VARYING
2876 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2877 {
2878 vr0.type = VR_RANGE;
2879 vr0.min = TYPE_MIN_VALUE (inner_type);
2880 vr0.max = TYPE_MAX_VALUE (inner_type);
2881 }
2882
2883 /* If VR0 is a constant range or anti-range and the conversion is
2884 not truncating we can convert the min and max values and
2885 canonicalize the resulting range. Otherwise we can do the
2886 conversion if the size of the range is less than what the
2887 precision of the target type can represent and the range is
2888 not an anti-range. */
2889 if ((vr0.type == VR_RANGE
2890 || vr0.type == VR_ANTI_RANGE)
2891 && TREE_CODE (vr0.min) == INTEGER_CST
2892 && TREE_CODE (vr0.max) == INTEGER_CST
2893 && (!is_overflow_infinity (vr0.min)
2894 || (vr0.type == VR_RANGE
2895 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2896 && needs_overflow_infinity (outer_type)
2897 && supports_overflow_infinity (outer_type)))
2898 && (!is_overflow_infinity (vr0.max)
2899 || (vr0.type == VR_RANGE
2900 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2901 && needs_overflow_infinity (outer_type)
2902 && supports_overflow_infinity (outer_type)))
2903 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2904 || (vr0.type == VR_RANGE
2905 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2906 int_const_binop (MINUS_EXPR, vr0.max, vr0.min, 0),
2907 size_int (TYPE_PRECISION (outer_type)), 0)))))
2908 {
2909 tree new_min, new_max;
2910 new_min = force_fit_type_double (outer_type,
2911 tree_to_double_int (vr0.min),
2912 0, false);
2913 new_max = force_fit_type_double (outer_type,
2914 tree_to_double_int (vr0.max),
2915 0, false);
2916 if (is_overflow_infinity (vr0.min))
2917 new_min = negative_overflow_infinity (outer_type);
2918 if (is_overflow_infinity (vr0.max))
2919 new_max = positive_overflow_infinity (outer_type);
2920 set_and_canonicalize_value_range (vr, vr0.type,
2921 new_min, new_max, NULL);
2922 return;
2923 }
2924
2925 set_value_range_to_varying (vr);
2926 return;
2927 }
2928
2929 /* Conversion of a VR_VARYING value to a wider type can result
2930 in a usable range. So wait until after we've handled conversions
2931 before dropping the result to VR_VARYING if we had a source
2932 operand that is VR_VARYING. */
2933 if (vr0.type == VR_VARYING)
2934 {
2935 set_value_range_to_varying (vr);
2936 return;
2937 }
2938
2939 /* Apply the operation to each end of the range and see what we end
2940 up with. */
2941 if (code == NEGATE_EXPR
2942 && !TYPE_UNSIGNED (type))
2943 {
2944 /* NEGATE_EXPR flips the range around. We need to treat
2945 TYPE_MIN_VALUE specially. */
2946 if (is_positive_overflow_infinity (vr0.max))
2947 min = negative_overflow_infinity (type);
2948 else if (is_negative_overflow_infinity (vr0.max))
2949 min = positive_overflow_infinity (type);
2950 else if (!vrp_val_is_min (vr0.max))
2951 min = fold_unary_to_constant (code, type, vr0.max);
2952 else if (needs_overflow_infinity (type))
2953 {
2954 if (supports_overflow_infinity (type)
2955 && !is_overflow_infinity (vr0.min)
2956 && !vrp_val_is_min (vr0.min))
2957 min = positive_overflow_infinity (type);
2958 else
2959 {
2960 set_value_range_to_varying (vr);
2961 return;
2962 }
2963 }
2964 else
2965 min = TYPE_MIN_VALUE (type);
2966
2967 if (is_positive_overflow_infinity (vr0.min))
2968 max = negative_overflow_infinity (type);
2969 else if (is_negative_overflow_infinity (vr0.min))
2970 max = positive_overflow_infinity (type);
2971 else if (!vrp_val_is_min (vr0.min))
2972 max = fold_unary_to_constant (code, type, vr0.min);
2973 else if (needs_overflow_infinity (type))
2974 {
2975 if (supports_overflow_infinity (type))
2976 max = positive_overflow_infinity (type);
2977 else
2978 {
2979 set_value_range_to_varying (vr);
2980 return;
2981 }
2982 }
2983 else
2984 max = TYPE_MIN_VALUE (type);
2985 }
2986 else if (code == NEGATE_EXPR
2987 && TYPE_UNSIGNED (type))
2988 {
2989 if (!range_includes_zero_p (&vr0))
2990 {
2991 max = fold_unary_to_constant (code, type, vr0.min);
2992 min = fold_unary_to_constant (code, type, vr0.max);
2993 }
2994 else
2995 {
2996 if (range_is_null (&vr0))
2997 set_value_range_to_null (vr, type);
2998 else
2999 set_value_range_to_varying (vr);
3000 return;
3001 }
3002 }
3003 else if (code == ABS_EXPR
3004 && !TYPE_UNSIGNED (type))
3005 {
3006 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3007 useful range. */
3008 if (!TYPE_OVERFLOW_UNDEFINED (type)
3009 && ((vr0.type == VR_RANGE
3010 && vrp_val_is_min (vr0.min))
3011 || (vr0.type == VR_ANTI_RANGE
3012 && !vrp_val_is_min (vr0.min)
3013 && !range_includes_zero_p (&vr0))))
3014 {
3015 set_value_range_to_varying (vr);
3016 return;
3017 }
3018
3019 /* ABS_EXPR may flip the range around, if the original range
3020 included negative values. */
3021 if (is_overflow_infinity (vr0.min))
3022 min = positive_overflow_infinity (type);
3023 else if (!vrp_val_is_min (vr0.min))
3024 min = fold_unary_to_constant (code, type, vr0.min);
3025 else if (!needs_overflow_infinity (type))
3026 min = TYPE_MAX_VALUE (type);
3027 else if (supports_overflow_infinity (type))
3028 min = positive_overflow_infinity (type);
3029 else
3030 {
3031 set_value_range_to_varying (vr);
3032 return;
3033 }
3034
3035 if (is_overflow_infinity (vr0.max))
3036 max = positive_overflow_infinity (type);
3037 else if (!vrp_val_is_min (vr0.max))
3038 max = fold_unary_to_constant (code, type, vr0.max);
3039 else if (!needs_overflow_infinity (type))
3040 max = TYPE_MAX_VALUE (type);
3041 else if (supports_overflow_infinity (type)
3042 /* We shouldn't generate [+INF, +INF] as set_value_range
3043 doesn't like this and ICEs. */
3044 && !is_positive_overflow_infinity (min))
3045 max = positive_overflow_infinity (type);
3046 else
3047 {
3048 set_value_range_to_varying (vr);
3049 return;
3050 }
3051
3052 cmp = compare_values (min, max);
3053
3054 /* If a VR_ANTI_RANGEs contains zero, then we have
3055 ~[-INF, min(MIN, MAX)]. */
3056 if (vr0.type == VR_ANTI_RANGE)
3057 {
3058 if (range_includes_zero_p (&vr0))
3059 {
3060 /* Take the lower of the two values. */
3061 if (cmp != 1)
3062 max = min;
3063
3064 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3065 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3066 flag_wrapv is set and the original anti-range doesn't include
3067 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3068 if (TYPE_OVERFLOW_WRAPS (type))
3069 {
3070 tree type_min_value = TYPE_MIN_VALUE (type);
3071
3072 min = (vr0.min != type_min_value
3073 ? int_const_binop (PLUS_EXPR, type_min_value,
3074 integer_one_node, 0)
3075 : type_min_value);
3076 }
3077 else
3078 {
3079 if (overflow_infinity_range_p (&vr0))
3080 min = negative_overflow_infinity (type);
3081 else
3082 min = TYPE_MIN_VALUE (type);
3083 }
3084 }
3085 else
3086 {
3087 /* All else has failed, so create the range [0, INF], even for
3088 flag_wrapv since TYPE_MIN_VALUE is in the original
3089 anti-range. */
3090 vr0.type = VR_RANGE;
3091 min = build_int_cst (type, 0);
3092 if (needs_overflow_infinity (type))
3093 {
3094 if (supports_overflow_infinity (type))
3095 max = positive_overflow_infinity (type);
3096 else
3097 {
3098 set_value_range_to_varying (vr);
3099 return;
3100 }
3101 }
3102 else
3103 max = TYPE_MAX_VALUE (type);
3104 }
3105 }
3106
3107 /* If the range contains zero then we know that the minimum value in the
3108 range will be zero. */
3109 else if (range_includes_zero_p (&vr0))
3110 {
3111 if (cmp == 1)
3112 max = min;
3113 min = build_int_cst (type, 0);
3114 }
3115 else
3116 {
3117 /* If the range was reversed, swap MIN and MAX. */
3118 if (cmp == 1)
3119 {
3120 tree t = min;
3121 min = max;
3122 max = t;
3123 }
3124 }
3125 }
3126 else
3127 {
3128 /* Otherwise, operate on each end of the range. */
3129 min = fold_unary_to_constant (code, type, vr0.min);
3130 max = fold_unary_to_constant (code, type, vr0.max);
3131
3132 if (needs_overflow_infinity (type))
3133 {
3134 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
3135
3136 /* If both sides have overflowed, we don't know
3137 anything. */
3138 if ((is_overflow_infinity (vr0.min)
3139 || TREE_OVERFLOW (min))
3140 && (is_overflow_infinity (vr0.max)
3141 || TREE_OVERFLOW (max)))
3142 {
3143 set_value_range_to_varying (vr);
3144 return;
3145 }
3146
3147 if (is_overflow_infinity (vr0.min))
3148 min = vr0.min;
3149 else if (TREE_OVERFLOW (min))
3150 {
3151 if (supports_overflow_infinity (type))
3152 min = (tree_int_cst_sgn (min) >= 0
3153 ? positive_overflow_infinity (TREE_TYPE (min))
3154 : negative_overflow_infinity (TREE_TYPE (min)));
3155 else
3156 {
3157 set_value_range_to_varying (vr);
3158 return;
3159 }
3160 }
3161
3162 if (is_overflow_infinity (vr0.max))
3163 max = vr0.max;
3164 else if (TREE_OVERFLOW (max))
3165 {
3166 if (supports_overflow_infinity (type))
3167 max = (tree_int_cst_sgn (max) >= 0
3168 ? positive_overflow_infinity (TREE_TYPE (max))
3169 : negative_overflow_infinity (TREE_TYPE (max)));
3170 else
3171 {
3172 set_value_range_to_varying (vr);
3173 return;
3174 }
3175 }
3176 }
3177 }
3178
3179 cmp = compare_values (min, max);
3180 if (cmp == -2 || cmp == 1)
3181 {
3182 /* If the new range has its limits swapped around (MIN > MAX),
3183 then the operation caused one of them to wrap around, mark
3184 the new range VARYING. */
3185 set_value_range_to_varying (vr);
3186 }
3187 else
3188 set_value_range (vr, vr0.type, min, max, NULL);
3189 }
3190
3191
3192 /* Extract range information from a conditional expression EXPR based on
3193 the ranges of each of its operands and the expression code. */
3194
3195 static void
3196 extract_range_from_cond_expr (value_range_t *vr, tree expr)
3197 {
3198 tree op0, op1;
3199 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3200 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3201
3202 /* Get value ranges for each operand. For constant operands, create
3203 a new value range with the operand to simplify processing. */
3204 op0 = COND_EXPR_THEN (expr);
3205 if (TREE_CODE (op0) == SSA_NAME)
3206 vr0 = *(get_value_range (op0));
3207 else if (is_gimple_min_invariant (op0))
3208 set_value_range_to_value (&vr0, op0, NULL);
3209 else
3210 set_value_range_to_varying (&vr0);
3211
3212 op1 = COND_EXPR_ELSE (expr);
3213 if (TREE_CODE (op1) == SSA_NAME)
3214 vr1 = *(get_value_range (op1));
3215 else if (is_gimple_min_invariant (op1))
3216 set_value_range_to_value (&vr1, op1, NULL);
3217 else
3218 set_value_range_to_varying (&vr1);
3219
3220 /* The resulting value range is the union of the operand ranges */
3221 vrp_meet (&vr0, &vr1);
3222 copy_value_range (vr, &vr0);
3223 }
3224
3225
3226 /* Extract range information from a comparison expression EXPR based
3227 on the range of its operand and the expression code. */
3228
3229 static void
3230 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3231 tree type, tree op0, tree op1)
3232 {
3233 bool sop = false;
3234 tree val;
3235
3236 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3237 NULL);
3238
3239 /* A disadvantage of using a special infinity as an overflow
3240 representation is that we lose the ability to record overflow
3241 when we don't have an infinity. So we have to ignore a result
3242 which relies on overflow. */
3243
3244 if (val && !is_overflow_infinity (val) && !sop)
3245 {
3246 /* Since this expression was found on the RHS of an assignment,
3247 its type may be different from _Bool. Convert VAL to EXPR's
3248 type. */
3249 val = fold_convert (type, val);
3250 if (is_gimple_min_invariant (val))
3251 set_value_range_to_value (vr, val, vr->equiv);
3252 else
3253 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3254 }
3255 else
3256 /* The result of a comparison is always true or false. */
3257 set_value_range_to_truthvalue (vr, type);
3258 }
3259
3260 /* Try to derive a nonnegative or nonzero range out of STMT relying
3261 primarily on generic routines in fold in conjunction with range data.
3262 Store the result in *VR */
3263
3264 static void
3265 extract_range_basic (value_range_t *vr, gimple stmt)
3266 {
3267 bool sop = false;
3268 tree type = gimple_expr_type (stmt);
3269
3270 if (INTEGRAL_TYPE_P (type)
3271 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3272 set_value_range_to_nonnegative (vr, type,
3273 sop || stmt_overflow_infinity (stmt));
3274 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3275 && !sop)
3276 set_value_range_to_nonnull (vr, type);
3277 else
3278 set_value_range_to_varying (vr);
3279 }
3280
3281
3282 /* Try to compute a useful range out of assignment STMT and store it
3283 in *VR. */
3284
3285 static void
3286 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3287 {
3288 enum tree_code code = gimple_assign_rhs_code (stmt);
3289
3290 if (code == ASSERT_EXPR)
3291 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3292 else if (code == SSA_NAME)
3293 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3294 else if (TREE_CODE_CLASS (code) == tcc_binary
3295 || code == TRUTH_AND_EXPR
3296 || code == TRUTH_OR_EXPR
3297 || code == TRUTH_XOR_EXPR)
3298 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3299 gimple_expr_type (stmt),
3300 gimple_assign_rhs1 (stmt),
3301 gimple_assign_rhs2 (stmt));
3302 else if (TREE_CODE_CLASS (code) == tcc_unary)
3303 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3304 gimple_expr_type (stmt),
3305 gimple_assign_rhs1 (stmt));
3306 else if (code == COND_EXPR)
3307 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt));
3308 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3309 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3310 gimple_expr_type (stmt),
3311 gimple_assign_rhs1 (stmt),
3312 gimple_assign_rhs2 (stmt));
3313 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3314 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3315 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3316 else
3317 set_value_range_to_varying (vr);
3318
3319 if (vr->type == VR_VARYING)
3320 extract_range_basic (vr, stmt);
3321 }
3322
3323 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3324 would be profitable to adjust VR using scalar evolution information
3325 for VAR. If so, update VR with the new limits. */
3326
3327 static void
3328 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3329 gimple stmt, tree var)
3330 {
3331 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3332 enum ev_direction dir;
3333
3334 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3335 better opportunities than a regular range, but I'm not sure. */
3336 if (vr->type == VR_ANTI_RANGE)
3337 return;
3338
3339 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3340
3341 /* Like in PR19590, scev can return a constant function. */
3342 if (is_gimple_min_invariant (chrec))
3343 {
3344 set_value_range_to_value (vr, chrec, vr->equiv);
3345 return;
3346 }
3347
3348 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3349 return;
3350
3351 init = initial_condition_in_loop_num (chrec, loop->num);
3352 tem = op_with_constant_singleton_value_range (init);
3353 if (tem)
3354 init = tem;
3355 step = evolution_part_in_loop_num (chrec, loop->num);
3356 tem = op_with_constant_singleton_value_range (step);
3357 if (tem)
3358 step = tem;
3359
3360 /* If STEP is symbolic, we can't know whether INIT will be the
3361 minimum or maximum value in the range. Also, unless INIT is
3362 a simple expression, compare_values and possibly other functions
3363 in tree-vrp won't be able to handle it. */
3364 if (step == NULL_TREE
3365 || !is_gimple_min_invariant (step)
3366 || !valid_value_p (init))
3367 return;
3368
3369 dir = scev_direction (chrec);
3370 if (/* Do not adjust ranges if we do not know whether the iv increases
3371 or decreases, ... */
3372 dir == EV_DIR_UNKNOWN
3373 /* ... or if it may wrap. */
3374 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3375 true))
3376 return;
3377
3378 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3379 negative_overflow_infinity and positive_overflow_infinity,
3380 because we have concluded that the loop probably does not
3381 wrap. */
3382
3383 type = TREE_TYPE (var);
3384 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3385 tmin = lower_bound_in_type (type, type);
3386 else
3387 tmin = TYPE_MIN_VALUE (type);
3388 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3389 tmax = upper_bound_in_type (type, type);
3390 else
3391 tmax = TYPE_MAX_VALUE (type);
3392
3393 /* Try to use estimated number of iterations for the loop to constrain the
3394 final value in the evolution.
3395 We are interested in the number of executions of the latch, while
3396 nb_iterations_upper_bound includes the last execution of the exit test. */
3397 if (TREE_CODE (step) == INTEGER_CST
3398 && loop->any_upper_bound
3399 && !double_int_zero_p (loop->nb_iterations_upper_bound)
3400 && is_gimple_val (init)
3401 && (TREE_CODE (init) != SSA_NAME
3402 || get_value_range (init)->type == VR_RANGE))
3403 {
3404 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3405 double_int dtmp;
3406 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3407 int overflow = 0;
3408
3409 dtmp = double_int_mul_with_sign (tree_to_double_int (step),
3410 double_int_sub (
3411 loop->nb_iterations_upper_bound,
3412 double_int_one),
3413 unsigned_p, &overflow);
3414 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3415 /* If the multiplication overflowed we can't do a meaningful
3416 adjustment. */
3417 if (!overflow && double_int_equal_p (dtmp, tree_to_double_int (tem)))
3418 {
3419 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3420 TREE_TYPE (init), init, tem);
3421 /* Likewise if the addition did. */
3422 if (maxvr.type == VR_RANGE)
3423 {
3424 tmin = maxvr.min;
3425 tmax = maxvr.max;
3426 }
3427 }
3428 }
3429
3430 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3431 {
3432 min = tmin;
3433 max = tmax;
3434
3435 /* For VARYING or UNDEFINED ranges, just about anything we get
3436 from scalar evolutions should be better. */
3437
3438 if (dir == EV_DIR_DECREASES)
3439 max = init;
3440 else
3441 min = init;
3442
3443 /* If we would create an invalid range, then just assume we
3444 know absolutely nothing. This may be over-conservative,
3445 but it's clearly safe, and should happen only in unreachable
3446 parts of code, or for invalid programs. */
3447 if (compare_values (min, max) == 1)
3448 return;
3449
3450 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3451 }
3452 else if (vr->type == VR_RANGE)
3453 {
3454 min = vr->min;
3455 max = vr->max;
3456
3457 if (dir == EV_DIR_DECREASES)
3458 {
3459 /* INIT is the maximum value. If INIT is lower than VR->MAX
3460 but no smaller than VR->MIN, set VR->MAX to INIT. */
3461 if (compare_values (init, max) == -1)
3462 max = init;
3463
3464 /* According to the loop information, the variable does not
3465 overflow. If we think it does, probably because of an
3466 overflow due to arithmetic on a different INF value,
3467 reset now. */
3468 if (is_negative_overflow_infinity (min)
3469 || compare_values (min, tmin) == -1)
3470 min = tmin;
3471
3472 }
3473 else
3474 {
3475 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3476 if (compare_values (init, min) == 1)
3477 min = init;
3478
3479 if (is_positive_overflow_infinity (max)
3480 || compare_values (tmax, max) == -1)
3481 max = tmax;
3482 }
3483
3484 /* If we just created an invalid range with the minimum
3485 greater than the maximum, we fail conservatively.
3486 This should happen only in unreachable
3487 parts of code, or for invalid programs. */
3488 if (compare_values (min, max) == 1)
3489 return;
3490
3491 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3492 }
3493 }
3494
3495 /* Return true if VAR may overflow at STMT. This checks any available
3496 loop information to see if we can determine that VAR does not
3497 overflow. */
3498
3499 static bool
3500 vrp_var_may_overflow (tree var, gimple stmt)
3501 {
3502 struct loop *l;
3503 tree chrec, init, step;
3504
3505 if (current_loops == NULL)
3506 return true;
3507
3508 l = loop_containing_stmt (stmt);
3509 if (l == NULL
3510 || !loop_outer (l))
3511 return true;
3512
3513 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3514 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3515 return true;
3516
3517 init = initial_condition_in_loop_num (chrec, l->num);
3518 step = evolution_part_in_loop_num (chrec, l->num);
3519
3520 if (step == NULL_TREE
3521 || !is_gimple_min_invariant (step)
3522 || !valid_value_p (init))
3523 return true;
3524
3525 /* If we get here, we know something useful about VAR based on the
3526 loop information. If it wraps, it may overflow. */
3527
3528 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3529 true))
3530 return true;
3531
3532 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3533 {
3534 print_generic_expr (dump_file, var, 0);
3535 fprintf (dump_file, ": loop information indicates does not overflow\n");
3536 }
3537
3538 return false;
3539 }
3540
3541
3542 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3543
3544 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3545 all the values in the ranges.
3546
3547 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3548
3549 - Return NULL_TREE if it is not always possible to determine the
3550 value of the comparison.
3551
3552 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3553 overflow infinity was used in the test. */
3554
3555
3556 static tree
3557 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3558 bool *strict_overflow_p)
3559 {
3560 /* VARYING or UNDEFINED ranges cannot be compared. */
3561 if (vr0->type == VR_VARYING
3562 || vr0->type == VR_UNDEFINED
3563 || vr1->type == VR_VARYING
3564 || vr1->type == VR_UNDEFINED)
3565 return NULL_TREE;
3566
3567 /* Anti-ranges need to be handled separately. */
3568 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3569 {
3570 /* If both are anti-ranges, then we cannot compute any
3571 comparison. */
3572 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3573 return NULL_TREE;
3574
3575 /* These comparisons are never statically computable. */
3576 if (comp == GT_EXPR
3577 || comp == GE_EXPR
3578 || comp == LT_EXPR
3579 || comp == LE_EXPR)
3580 return NULL_TREE;
3581
3582 /* Equality can be computed only between a range and an
3583 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3584 if (vr0->type == VR_RANGE)
3585 {
3586 /* To simplify processing, make VR0 the anti-range. */
3587 value_range_t *tmp = vr0;
3588 vr0 = vr1;
3589 vr1 = tmp;
3590 }
3591
3592 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3593
3594 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3595 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3596 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3597
3598 return NULL_TREE;
3599 }
3600
3601 if (!usable_range_p (vr0, strict_overflow_p)
3602 || !usable_range_p (vr1, strict_overflow_p))
3603 return NULL_TREE;
3604
3605 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3606 operands around and change the comparison code. */
3607 if (comp == GT_EXPR || comp == GE_EXPR)
3608 {
3609 value_range_t *tmp;
3610 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3611 tmp = vr0;
3612 vr0 = vr1;
3613 vr1 = tmp;
3614 }
3615
3616 if (comp == EQ_EXPR)
3617 {
3618 /* Equality may only be computed if both ranges represent
3619 exactly one value. */
3620 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3621 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3622 {
3623 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3624 strict_overflow_p);
3625 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3626 strict_overflow_p);
3627 if (cmp_min == 0 && cmp_max == 0)
3628 return boolean_true_node;
3629 else if (cmp_min != -2 && cmp_max != -2)
3630 return boolean_false_node;
3631 }
3632 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3633 else if (compare_values_warnv (vr0->min, vr1->max,
3634 strict_overflow_p) == 1
3635 || compare_values_warnv (vr1->min, vr0->max,
3636 strict_overflow_p) == 1)
3637 return boolean_false_node;
3638
3639 return NULL_TREE;
3640 }
3641 else if (comp == NE_EXPR)
3642 {
3643 int cmp1, cmp2;
3644
3645 /* If VR0 is completely to the left or completely to the right
3646 of VR1, they are always different. Notice that we need to
3647 make sure that both comparisons yield similar results to
3648 avoid comparing values that cannot be compared at
3649 compile-time. */
3650 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3651 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3652 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3653 return boolean_true_node;
3654
3655 /* If VR0 and VR1 represent a single value and are identical,
3656 return false. */
3657 else if (compare_values_warnv (vr0->min, vr0->max,
3658 strict_overflow_p) == 0
3659 && compare_values_warnv (vr1->min, vr1->max,
3660 strict_overflow_p) == 0
3661 && compare_values_warnv (vr0->min, vr1->min,
3662 strict_overflow_p) == 0
3663 && compare_values_warnv (vr0->max, vr1->max,
3664 strict_overflow_p) == 0)
3665 return boolean_false_node;
3666
3667 /* Otherwise, they may or may not be different. */
3668 else
3669 return NULL_TREE;
3670 }
3671 else if (comp == LT_EXPR || comp == LE_EXPR)
3672 {
3673 int tst;
3674
3675 /* If VR0 is to the left of VR1, return true. */
3676 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3677 if ((comp == LT_EXPR && tst == -1)
3678 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3679 {
3680 if (overflow_infinity_range_p (vr0)
3681 || overflow_infinity_range_p (vr1))
3682 *strict_overflow_p = true;
3683 return boolean_true_node;
3684 }
3685
3686 /* If VR0 is to the right of VR1, return false. */
3687 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3688 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3689 || (comp == LE_EXPR && tst == 1))
3690 {
3691 if (overflow_infinity_range_p (vr0)
3692 || overflow_infinity_range_p (vr1))
3693 *strict_overflow_p = true;
3694 return boolean_false_node;
3695 }
3696
3697 /* Otherwise, we don't know. */
3698 return NULL_TREE;
3699 }
3700
3701 gcc_unreachable ();
3702 }
3703
3704
3705 /* Given a value range VR, a value VAL and a comparison code COMP, return
3706 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3707 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3708 always returns false. Return NULL_TREE if it is not always
3709 possible to determine the value of the comparison. Also set
3710 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3711 infinity was used in the test. */
3712
3713 static tree
3714 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3715 bool *strict_overflow_p)
3716 {
3717 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3718 return NULL_TREE;
3719
3720 /* Anti-ranges need to be handled separately. */
3721 if (vr->type == VR_ANTI_RANGE)
3722 {
3723 /* For anti-ranges, the only predicates that we can compute at
3724 compile time are equality and inequality. */
3725 if (comp == GT_EXPR
3726 || comp == GE_EXPR
3727 || comp == LT_EXPR
3728 || comp == LE_EXPR)
3729 return NULL_TREE;
3730
3731 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3732 if (value_inside_range (val, vr) == 1)
3733 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3734
3735 return NULL_TREE;
3736 }
3737
3738 if (!usable_range_p (vr, strict_overflow_p))
3739 return NULL_TREE;
3740
3741 if (comp == EQ_EXPR)
3742 {
3743 /* EQ_EXPR may only be computed if VR represents exactly
3744 one value. */
3745 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3746 {
3747 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3748 if (cmp == 0)
3749 return boolean_true_node;
3750 else if (cmp == -1 || cmp == 1 || cmp == 2)
3751 return boolean_false_node;
3752 }
3753 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3754 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3755 return boolean_false_node;
3756
3757 return NULL_TREE;
3758 }
3759 else if (comp == NE_EXPR)
3760 {
3761 /* If VAL is not inside VR, then they are always different. */
3762 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3763 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3764 return boolean_true_node;
3765
3766 /* If VR represents exactly one value equal to VAL, then return
3767 false. */
3768 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3769 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3770 return boolean_false_node;
3771
3772 /* Otherwise, they may or may not be different. */
3773 return NULL_TREE;
3774 }
3775 else if (comp == LT_EXPR || comp == LE_EXPR)
3776 {
3777 int tst;
3778
3779 /* If VR is to the left of VAL, return true. */
3780 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3781 if ((comp == LT_EXPR && tst == -1)
3782 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3783 {
3784 if (overflow_infinity_range_p (vr))
3785 *strict_overflow_p = true;
3786 return boolean_true_node;
3787 }
3788
3789 /* If VR is to the right of VAL, return false. */
3790 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3791 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3792 || (comp == LE_EXPR && tst == 1))
3793 {
3794 if (overflow_infinity_range_p (vr))
3795 *strict_overflow_p = true;
3796 return boolean_false_node;
3797 }
3798
3799 /* Otherwise, we don't know. */
3800 return NULL_TREE;
3801 }
3802 else if (comp == GT_EXPR || comp == GE_EXPR)
3803 {
3804 int tst;
3805
3806 /* If VR is to the right of VAL, return true. */
3807 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3808 if ((comp == GT_EXPR && tst == 1)
3809 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3810 {
3811 if (overflow_infinity_range_p (vr))
3812 *strict_overflow_p = true;
3813 return boolean_true_node;
3814 }
3815
3816 /* If VR is to the left of VAL, return false. */
3817 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3818 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3819 || (comp == GE_EXPR && tst == -1))
3820 {
3821 if (overflow_infinity_range_p (vr))
3822 *strict_overflow_p = true;
3823 return boolean_false_node;
3824 }
3825
3826 /* Otherwise, we don't know. */
3827 return NULL_TREE;
3828 }
3829
3830 gcc_unreachable ();
3831 }
3832
3833
3834 /* Debugging dumps. */
3835
3836 void dump_value_range (FILE *, value_range_t *);
3837 void debug_value_range (value_range_t *);
3838 void dump_all_value_ranges (FILE *);
3839 void debug_all_value_ranges (void);
3840 void dump_vr_equiv (FILE *, bitmap);
3841 void debug_vr_equiv (bitmap);
3842
3843
3844 /* Dump value range VR to FILE. */
3845
3846 void
3847 dump_value_range (FILE *file, value_range_t *vr)
3848 {
3849 if (vr == NULL)
3850 fprintf (file, "[]");
3851 else if (vr->type == VR_UNDEFINED)
3852 fprintf (file, "UNDEFINED");
3853 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3854 {
3855 tree type = TREE_TYPE (vr->min);
3856
3857 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3858
3859 if (is_negative_overflow_infinity (vr->min))
3860 fprintf (file, "-INF(OVF)");
3861 else if (INTEGRAL_TYPE_P (type)
3862 && !TYPE_UNSIGNED (type)
3863 && vrp_val_is_min (vr->min))
3864 fprintf (file, "-INF");
3865 else
3866 print_generic_expr (file, vr->min, 0);
3867
3868 fprintf (file, ", ");
3869
3870 if (is_positive_overflow_infinity (vr->max))
3871 fprintf (file, "+INF(OVF)");
3872 else if (INTEGRAL_TYPE_P (type)
3873 && vrp_val_is_max (vr->max))
3874 fprintf (file, "+INF");
3875 else
3876 print_generic_expr (file, vr->max, 0);
3877
3878 fprintf (file, "]");
3879
3880 if (vr->equiv)
3881 {
3882 bitmap_iterator bi;
3883 unsigned i, c = 0;
3884
3885 fprintf (file, " EQUIVALENCES: { ");
3886
3887 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3888 {
3889 print_generic_expr (file, ssa_name (i), 0);
3890 fprintf (file, " ");
3891 c++;
3892 }
3893
3894 fprintf (file, "} (%u elements)", c);
3895 }
3896 }
3897 else if (vr->type == VR_VARYING)
3898 fprintf (file, "VARYING");
3899 else
3900 fprintf (file, "INVALID RANGE");
3901 }
3902
3903
3904 /* Dump value range VR to stderr. */
3905
3906 DEBUG_FUNCTION void
3907 debug_value_range (value_range_t *vr)
3908 {
3909 dump_value_range (stderr, vr);
3910 fprintf (stderr, "\n");
3911 }
3912
3913
3914 /* Dump value ranges of all SSA_NAMEs to FILE. */
3915
3916 void
3917 dump_all_value_ranges (FILE *file)
3918 {
3919 size_t i;
3920
3921 for (i = 0; i < num_ssa_names; i++)
3922 {
3923 if (vr_value[i])
3924 {
3925 print_generic_expr (file, ssa_name (i), 0);
3926 fprintf (file, ": ");
3927 dump_value_range (file, vr_value[i]);
3928 fprintf (file, "\n");
3929 }
3930 }
3931
3932 fprintf (file, "\n");
3933 }
3934
3935
3936 /* Dump all value ranges to stderr. */
3937
3938 DEBUG_FUNCTION void
3939 debug_all_value_ranges (void)
3940 {
3941 dump_all_value_ranges (stderr);
3942 }
3943
3944
3945 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3946 create a new SSA name N and return the assertion assignment
3947 'V = ASSERT_EXPR <V, V OP W>'. */
3948
3949 static gimple
3950 build_assert_expr_for (tree cond, tree v)
3951 {
3952 tree n;
3953 gimple assertion;
3954
3955 gcc_assert (TREE_CODE (v) == SSA_NAME);
3956 n = duplicate_ssa_name (v, NULL);
3957
3958 if (COMPARISON_CLASS_P (cond))
3959 {
3960 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3961 assertion = gimple_build_assign (n, a);
3962 }
3963 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3964 {
3965 /* Given !V, build the assignment N = false. */
3966 tree op0 = TREE_OPERAND (cond, 0);
3967 gcc_assert (op0 == v);
3968 assertion = gimple_build_assign (n, boolean_false_node);
3969 }
3970 else if (TREE_CODE (cond) == SSA_NAME)
3971 {
3972 /* Given V, build the assignment N = true. */
3973 gcc_assert (v == cond);
3974 assertion = gimple_build_assign (n, boolean_true_node);
3975 }
3976 else
3977 gcc_unreachable ();
3978
3979 SSA_NAME_DEF_STMT (n) = assertion;
3980
3981 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3982 operand of the ASSERT_EXPR. Register the new name and the old one
3983 in the replacement table so that we can fix the SSA web after
3984 adding all the ASSERT_EXPRs. */
3985 register_new_name_mapping (n, v);
3986
3987 return assertion;
3988 }
3989
3990
3991 /* Return false if EXPR is a predicate expression involving floating
3992 point values. */
3993
3994 static inline bool
3995 fp_predicate (gimple stmt)
3996 {
3997 GIMPLE_CHECK (stmt, GIMPLE_COND);
3998
3999 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4000 }
4001
4002
4003 /* If the range of values taken by OP can be inferred after STMT executes,
4004 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4005 describes the inferred range. Return true if a range could be
4006 inferred. */
4007
4008 static bool
4009 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4010 {
4011 *val_p = NULL_TREE;
4012 *comp_code_p = ERROR_MARK;
4013
4014 /* Do not attempt to infer anything in names that flow through
4015 abnormal edges. */
4016 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4017 return false;
4018
4019 /* Similarly, don't infer anything from statements that may throw
4020 exceptions. */
4021 if (stmt_could_throw_p (stmt))
4022 return false;
4023
4024 /* If STMT is the last statement of a basic block with no
4025 successors, there is no point inferring anything about any of its
4026 operands. We would not be able to find a proper insertion point
4027 for the assertion, anyway. */
4028 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4029 return false;
4030
4031 /* We can only assume that a pointer dereference will yield
4032 non-NULL if -fdelete-null-pointer-checks is enabled. */
4033 if (flag_delete_null_pointer_checks
4034 && POINTER_TYPE_P (TREE_TYPE (op))
4035 && gimple_code (stmt) != GIMPLE_ASM)
4036 {
4037 unsigned num_uses, num_loads, num_stores;
4038
4039 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4040 if (num_loads + num_stores > 0)
4041 {
4042 *val_p = build_int_cst (TREE_TYPE (op), 0);
4043 *comp_code_p = NE_EXPR;
4044 return true;
4045 }
4046 }
4047
4048 return false;
4049 }
4050
4051
4052 void dump_asserts_for (FILE *, tree);
4053 void debug_asserts_for (tree);
4054 void dump_all_asserts (FILE *);
4055 void debug_all_asserts (void);
4056
4057 /* Dump all the registered assertions for NAME to FILE. */
4058
4059 void
4060 dump_asserts_for (FILE *file, tree name)
4061 {
4062 assert_locus_t loc;
4063
4064 fprintf (file, "Assertions to be inserted for ");
4065 print_generic_expr (file, name, 0);
4066 fprintf (file, "\n");
4067
4068 loc = asserts_for[SSA_NAME_VERSION (name)];
4069 while (loc)
4070 {
4071 fprintf (file, "\t");
4072 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4073 fprintf (file, "\n\tBB #%d", loc->bb->index);
4074 if (loc->e)
4075 {
4076 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4077 loc->e->dest->index);
4078 dump_edge_info (file, loc->e, 0);
4079 }
4080 fprintf (file, "\n\tPREDICATE: ");
4081 print_generic_expr (file, name, 0);
4082 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4083 print_generic_expr (file, loc->val, 0);
4084 fprintf (file, "\n\n");
4085 loc = loc->next;
4086 }
4087
4088 fprintf (file, "\n");
4089 }
4090
4091
4092 /* Dump all the registered assertions for NAME to stderr. */
4093
4094 DEBUG_FUNCTION void
4095 debug_asserts_for (tree name)
4096 {
4097 dump_asserts_for (stderr, name);
4098 }
4099
4100
4101 /* Dump all the registered assertions for all the names to FILE. */
4102
4103 void
4104 dump_all_asserts (FILE *file)
4105 {
4106 unsigned i;
4107 bitmap_iterator bi;
4108
4109 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4110 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4111 dump_asserts_for (file, ssa_name (i));
4112 fprintf (file, "\n");
4113 }
4114
4115
4116 /* Dump all the registered assertions for all the names to stderr. */
4117
4118 DEBUG_FUNCTION void
4119 debug_all_asserts (void)
4120 {
4121 dump_all_asserts (stderr);
4122 }
4123
4124
4125 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4126 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4127 E->DEST, then register this location as a possible insertion point
4128 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4129
4130 BB, E and SI provide the exact insertion point for the new
4131 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4132 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4133 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4134 must not be NULL. */
4135
4136 static void
4137 register_new_assert_for (tree name, tree expr,
4138 enum tree_code comp_code,
4139 tree val,
4140 basic_block bb,
4141 edge e,
4142 gimple_stmt_iterator si)
4143 {
4144 assert_locus_t n, loc, last_loc;
4145 basic_block dest_bb;
4146
4147 gcc_checking_assert (bb == NULL || e == NULL);
4148
4149 if (e == NULL)
4150 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4151 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4152
4153 /* Never build an assert comparing against an integer constant with
4154 TREE_OVERFLOW set. This confuses our undefined overflow warning
4155 machinery. */
4156 if (TREE_CODE (val) == INTEGER_CST
4157 && TREE_OVERFLOW (val))
4158 val = build_int_cst_wide (TREE_TYPE (val),
4159 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4160
4161 /* The new assertion A will be inserted at BB or E. We need to
4162 determine if the new location is dominated by a previously
4163 registered location for A. If we are doing an edge insertion,
4164 assume that A will be inserted at E->DEST. Note that this is not
4165 necessarily true.
4166
4167 If E is a critical edge, it will be split. But even if E is
4168 split, the new block will dominate the same set of blocks that
4169 E->DEST dominates.
4170
4171 The reverse, however, is not true, blocks dominated by E->DEST
4172 will not be dominated by the new block created to split E. So,
4173 if the insertion location is on a critical edge, we will not use
4174 the new location to move another assertion previously registered
4175 at a block dominated by E->DEST. */
4176 dest_bb = (bb) ? bb : e->dest;
4177
4178 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4179 VAL at a block dominating DEST_BB, then we don't need to insert a new
4180 one. Similarly, if the same assertion already exists at a block
4181 dominated by DEST_BB and the new location is not on a critical
4182 edge, then update the existing location for the assertion (i.e.,
4183 move the assertion up in the dominance tree).
4184
4185 Note, this is implemented as a simple linked list because there
4186 should not be more than a handful of assertions registered per
4187 name. If this becomes a performance problem, a table hashed by
4188 COMP_CODE and VAL could be implemented. */
4189 loc = asserts_for[SSA_NAME_VERSION (name)];
4190 last_loc = loc;
4191 while (loc)
4192 {
4193 if (loc->comp_code == comp_code
4194 && (loc->val == val
4195 || operand_equal_p (loc->val, val, 0))
4196 && (loc->expr == expr
4197 || operand_equal_p (loc->expr, expr, 0)))
4198 {
4199 /* If the assertion NAME COMP_CODE VAL has already been
4200 registered at a basic block that dominates DEST_BB, then
4201 we don't need to insert the same assertion again. Note
4202 that we don't check strict dominance here to avoid
4203 replicating the same assertion inside the same basic
4204 block more than once (e.g., when a pointer is
4205 dereferenced several times inside a block).
4206
4207 An exception to this rule are edge insertions. If the
4208 new assertion is to be inserted on edge E, then it will
4209 dominate all the other insertions that we may want to
4210 insert in DEST_BB. So, if we are doing an edge
4211 insertion, don't do this dominance check. */
4212 if (e == NULL
4213 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4214 return;
4215
4216 /* Otherwise, if E is not a critical edge and DEST_BB
4217 dominates the existing location for the assertion, move
4218 the assertion up in the dominance tree by updating its
4219 location information. */
4220 if ((e == NULL || !EDGE_CRITICAL_P (e))
4221 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4222 {
4223 loc->bb = dest_bb;
4224 loc->e = e;
4225 loc->si = si;
4226 return;
4227 }
4228 }
4229
4230 /* Update the last node of the list and move to the next one. */
4231 last_loc = loc;
4232 loc = loc->next;
4233 }
4234
4235 /* If we didn't find an assertion already registered for
4236 NAME COMP_CODE VAL, add a new one at the end of the list of
4237 assertions associated with NAME. */
4238 n = XNEW (struct assert_locus_d);
4239 n->bb = dest_bb;
4240 n->e = e;
4241 n->si = si;
4242 n->comp_code = comp_code;
4243 n->val = val;
4244 n->expr = expr;
4245 n->next = NULL;
4246
4247 if (last_loc)
4248 last_loc->next = n;
4249 else
4250 asserts_for[SSA_NAME_VERSION (name)] = n;
4251
4252 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4253 }
4254
4255 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4256 Extract a suitable test code and value and store them into *CODE_P and
4257 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4258
4259 If no extraction was possible, return FALSE, otherwise return TRUE.
4260
4261 If INVERT is true, then we invert the result stored into *CODE_P. */
4262
4263 static bool
4264 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4265 tree cond_op0, tree cond_op1,
4266 bool invert, enum tree_code *code_p,
4267 tree *val_p)
4268 {
4269 enum tree_code comp_code;
4270 tree val;
4271
4272 /* Otherwise, we have a comparison of the form NAME COMP VAL
4273 or VAL COMP NAME. */
4274 if (name == cond_op1)
4275 {
4276 /* If the predicate is of the form VAL COMP NAME, flip
4277 COMP around because we need to register NAME as the
4278 first operand in the predicate. */
4279 comp_code = swap_tree_comparison (cond_code);
4280 val = cond_op0;
4281 }
4282 else
4283 {
4284 /* The comparison is of the form NAME COMP VAL, so the
4285 comparison code remains unchanged. */
4286 comp_code = cond_code;
4287 val = cond_op1;
4288 }
4289
4290 /* Invert the comparison code as necessary. */
4291 if (invert)
4292 comp_code = invert_tree_comparison (comp_code, 0);
4293
4294 /* VRP does not handle float types. */
4295 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4296 return false;
4297
4298 /* Do not register always-false predicates.
4299 FIXME: this works around a limitation in fold() when dealing with
4300 enumerations. Given 'enum { N1, N2 } x;', fold will not
4301 fold 'if (x > N2)' to 'if (0)'. */
4302 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4303 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4304 {
4305 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4306 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4307
4308 if (comp_code == GT_EXPR
4309 && (!max
4310 || compare_values (val, max) == 0))
4311 return false;
4312
4313 if (comp_code == LT_EXPR
4314 && (!min
4315 || compare_values (val, min) == 0))
4316 return false;
4317 }
4318 *code_p = comp_code;
4319 *val_p = val;
4320 return true;
4321 }
4322
4323 /* Try to register an edge assertion for SSA name NAME on edge E for
4324 the condition COND contributing to the conditional jump pointed to by BSI.
4325 Invert the condition COND if INVERT is true.
4326 Return true if an assertion for NAME could be registered. */
4327
4328 static bool
4329 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4330 enum tree_code cond_code,
4331 tree cond_op0, tree cond_op1, bool invert)
4332 {
4333 tree val;
4334 enum tree_code comp_code;
4335 bool retval = false;
4336
4337 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4338 cond_op0,
4339 cond_op1,
4340 invert, &comp_code, &val))
4341 return false;
4342
4343 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4344 reachable from E. */
4345 if (live_on_edge (e, name)
4346 && !has_single_use (name))
4347 {
4348 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4349 retval = true;
4350 }
4351
4352 /* In the case of NAME <= CST and NAME being defined as
4353 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4354 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4355 This catches range and anti-range tests. */
4356 if ((comp_code == LE_EXPR
4357 || comp_code == GT_EXPR)
4358 && TREE_CODE (val) == INTEGER_CST
4359 && TYPE_UNSIGNED (TREE_TYPE (val)))
4360 {
4361 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4362 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4363
4364 /* Extract CST2 from the (optional) addition. */
4365 if (is_gimple_assign (def_stmt)
4366 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4367 {
4368 name2 = gimple_assign_rhs1 (def_stmt);
4369 cst2 = gimple_assign_rhs2 (def_stmt);
4370 if (TREE_CODE (name2) == SSA_NAME
4371 && TREE_CODE (cst2) == INTEGER_CST)
4372 def_stmt = SSA_NAME_DEF_STMT (name2);
4373 }
4374
4375 /* Extract NAME2 from the (optional) sign-changing cast. */
4376 if (gimple_assign_cast_p (def_stmt))
4377 {
4378 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4379 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4380 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4381 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4382 name3 = gimple_assign_rhs1 (def_stmt);
4383 }
4384
4385 /* If name3 is used later, create an ASSERT_EXPR for it. */
4386 if (name3 != NULL_TREE
4387 && TREE_CODE (name3) == SSA_NAME
4388 && (cst2 == NULL_TREE
4389 || TREE_CODE (cst2) == INTEGER_CST)
4390 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4391 && live_on_edge (e, name3)
4392 && !has_single_use (name3))
4393 {
4394 tree tmp;
4395
4396 /* Build an expression for the range test. */
4397 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4398 if (cst2 != NULL_TREE)
4399 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4400
4401 if (dump_file)
4402 {
4403 fprintf (dump_file, "Adding assert for ");
4404 print_generic_expr (dump_file, name3, 0);
4405 fprintf (dump_file, " from ");
4406 print_generic_expr (dump_file, tmp, 0);
4407 fprintf (dump_file, "\n");
4408 }
4409
4410 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4411
4412 retval = true;
4413 }
4414
4415 /* If name2 is used later, create an ASSERT_EXPR for it. */
4416 if (name2 != NULL_TREE
4417 && TREE_CODE (name2) == SSA_NAME
4418 && TREE_CODE (cst2) == INTEGER_CST
4419 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4420 && live_on_edge (e, name2)
4421 && !has_single_use (name2))
4422 {
4423 tree tmp;
4424
4425 /* Build an expression for the range test. */
4426 tmp = name2;
4427 if (TREE_TYPE (name) != TREE_TYPE (name2))
4428 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4429 if (cst2 != NULL_TREE)
4430 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4431
4432 if (dump_file)
4433 {
4434 fprintf (dump_file, "Adding assert for ");
4435 print_generic_expr (dump_file, name2, 0);
4436 fprintf (dump_file, " from ");
4437 print_generic_expr (dump_file, tmp, 0);
4438 fprintf (dump_file, "\n");
4439 }
4440
4441 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4442
4443 retval = true;
4444 }
4445 }
4446
4447 return retval;
4448 }
4449
4450 /* OP is an operand of a truth value expression which is known to have
4451 a particular value. Register any asserts for OP and for any
4452 operands in OP's defining statement.
4453
4454 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4455 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4456
4457 static bool
4458 register_edge_assert_for_1 (tree op, enum tree_code code,
4459 edge e, gimple_stmt_iterator bsi)
4460 {
4461 bool retval = false;
4462 gimple op_def;
4463 tree val;
4464 enum tree_code rhs_code;
4465
4466 /* We only care about SSA_NAMEs. */
4467 if (TREE_CODE (op) != SSA_NAME)
4468 return false;
4469
4470 /* We know that OP will have a zero or nonzero value. If OP is used
4471 more than once go ahead and register an assert for OP.
4472
4473 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4474 it will always be set for OP (because OP is used in a COND_EXPR in
4475 the subgraph). */
4476 if (!has_single_use (op))
4477 {
4478 val = build_int_cst (TREE_TYPE (op), 0);
4479 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4480 retval = true;
4481 }
4482
4483 /* Now look at how OP is set. If it's set from a comparison,
4484 a truth operation or some bit operations, then we may be able
4485 to register information about the operands of that assignment. */
4486 op_def = SSA_NAME_DEF_STMT (op);
4487 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4488 return retval;
4489
4490 rhs_code = gimple_assign_rhs_code (op_def);
4491
4492 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4493 {
4494 bool invert = (code == EQ_EXPR ? true : false);
4495 tree op0 = gimple_assign_rhs1 (op_def);
4496 tree op1 = gimple_assign_rhs2 (op_def);
4497
4498 if (TREE_CODE (op0) == SSA_NAME)
4499 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4500 invert);
4501 if (TREE_CODE (op1) == SSA_NAME)
4502 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4503 invert);
4504 }
4505 else if ((code == NE_EXPR
4506 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
4507 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
4508 || (code == EQ_EXPR
4509 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
4510 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
4511 {
4512 /* Recurse on each operand. */
4513 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4514 code, e, bsi);
4515 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4516 code, e, bsi);
4517 }
4518 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
4519 {
4520 /* Recurse, flipping CODE. */
4521 code = invert_tree_comparison (code, false);
4522 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4523 code, e, bsi);
4524 }
4525 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4526 {
4527 /* Recurse through the copy. */
4528 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4529 code, e, bsi);
4530 }
4531 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4532 {
4533 /* Recurse through the type conversion. */
4534 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4535 code, e, bsi);
4536 }
4537
4538 return retval;
4539 }
4540
4541 /* Try to register an edge assertion for SSA name NAME on edge E for
4542 the condition COND contributing to the conditional jump pointed to by SI.
4543 Return true if an assertion for NAME could be registered. */
4544
4545 static bool
4546 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4547 enum tree_code cond_code, tree cond_op0,
4548 tree cond_op1)
4549 {
4550 tree val;
4551 enum tree_code comp_code;
4552 bool retval = false;
4553 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4554
4555 /* Do not attempt to infer anything in names that flow through
4556 abnormal edges. */
4557 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4558 return false;
4559
4560 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4561 cond_op0, cond_op1,
4562 is_else_edge,
4563 &comp_code, &val))
4564 return false;
4565
4566 /* Register ASSERT_EXPRs for name. */
4567 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4568 cond_op1, is_else_edge);
4569
4570
4571 /* If COND is effectively an equality test of an SSA_NAME against
4572 the value zero or one, then we may be able to assert values
4573 for SSA_NAMEs which flow into COND. */
4574
4575 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4576 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4577 have nonzero value. */
4578 if (((comp_code == EQ_EXPR && integer_onep (val))
4579 || (comp_code == NE_EXPR && integer_zerop (val))))
4580 {
4581 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4582
4583 if (is_gimple_assign (def_stmt)
4584 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
4585 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
4586 {
4587 tree op0 = gimple_assign_rhs1 (def_stmt);
4588 tree op1 = gimple_assign_rhs2 (def_stmt);
4589 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4590 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4591 }
4592 }
4593
4594 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4595 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4596 have zero value. */
4597 if (((comp_code == EQ_EXPR && integer_zerop (val))
4598 || (comp_code == NE_EXPR && integer_onep (val))))
4599 {
4600 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4601
4602 if (is_gimple_assign (def_stmt)
4603 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
4604 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4605 necessarily zero value. */
4606 || (comp_code == EQ_EXPR
4607 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
4608 {
4609 tree op0 = gimple_assign_rhs1 (def_stmt);
4610 tree op1 = gimple_assign_rhs2 (def_stmt);
4611 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4612 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4613 }
4614 }
4615
4616 return retval;
4617 }
4618
4619
4620 /* Determine whether the outgoing edges of BB should receive an
4621 ASSERT_EXPR for each of the operands of BB's LAST statement.
4622 The last statement of BB must be a COND_EXPR.
4623
4624 If any of the sub-graphs rooted at BB have an interesting use of
4625 the predicate operands, an assert location node is added to the
4626 list of assertions for the corresponding operands. */
4627
4628 static bool
4629 find_conditional_asserts (basic_block bb, gimple last)
4630 {
4631 bool need_assert;
4632 gimple_stmt_iterator bsi;
4633 tree op;
4634 edge_iterator ei;
4635 edge e;
4636 ssa_op_iter iter;
4637
4638 need_assert = false;
4639 bsi = gsi_for_stmt (last);
4640
4641 /* Look for uses of the operands in each of the sub-graphs
4642 rooted at BB. We need to check each of the outgoing edges
4643 separately, so that we know what kind of ASSERT_EXPR to
4644 insert. */
4645 FOR_EACH_EDGE (e, ei, bb->succs)
4646 {
4647 if (e->dest == bb)
4648 continue;
4649
4650 /* Register the necessary assertions for each operand in the
4651 conditional predicate. */
4652 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4653 {
4654 need_assert |= register_edge_assert_for (op, e, bsi,
4655 gimple_cond_code (last),
4656 gimple_cond_lhs (last),
4657 gimple_cond_rhs (last));
4658 }
4659 }
4660
4661 return need_assert;
4662 }
4663
4664 /* Compare two case labels sorting first by the destination label uid
4665 and then by the case value. */
4666
4667 static int
4668 compare_case_labels (const void *p1, const void *p2)
4669 {
4670 const_tree const case1 = *(const_tree const*)p1;
4671 const_tree const case2 = *(const_tree const*)p2;
4672 unsigned int uid1 = DECL_UID (CASE_LABEL (case1));
4673 unsigned int uid2 = DECL_UID (CASE_LABEL (case2));
4674
4675 if (uid1 < uid2)
4676 return -1;
4677 else if (uid1 == uid2)
4678 {
4679 /* Make sure the default label is first in a group. */
4680 if (!CASE_LOW (case1))
4681 return -1;
4682 else if (!CASE_LOW (case2))
4683 return 1;
4684 else
4685 return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
4686 }
4687 else
4688 return 1;
4689 }
4690
4691 /* Determine whether the outgoing edges of BB should receive an
4692 ASSERT_EXPR for each of the operands of BB's LAST statement.
4693 The last statement of BB must be a SWITCH_EXPR.
4694
4695 If any of the sub-graphs rooted at BB have an interesting use of
4696 the predicate operands, an assert location node is added to the
4697 list of assertions for the corresponding operands. */
4698
4699 static bool
4700 find_switch_asserts (basic_block bb, gimple last)
4701 {
4702 bool need_assert;
4703 gimple_stmt_iterator bsi;
4704 tree op;
4705 edge e;
4706 tree vec2;
4707 size_t n = gimple_switch_num_labels(last);
4708 #if GCC_VERSION >= 4000
4709 unsigned int idx;
4710 #else
4711 /* Work around GCC 3.4 bug (PR 37086). */
4712 volatile unsigned int idx;
4713 #endif
4714
4715 need_assert = false;
4716 bsi = gsi_for_stmt (last);
4717 op = gimple_switch_index (last);
4718 if (TREE_CODE (op) != SSA_NAME)
4719 return false;
4720
4721 /* Build a vector of case labels sorted by destination label. */
4722 vec2 = make_tree_vec (n);
4723 for (idx = 0; idx < n; ++idx)
4724 TREE_VEC_ELT (vec2, idx) = gimple_switch_label (last, idx);
4725 qsort (&TREE_VEC_ELT (vec2, 0), n, sizeof (tree), compare_case_labels);
4726
4727 for (idx = 0; idx < n; ++idx)
4728 {
4729 tree min, max;
4730 tree cl = TREE_VEC_ELT (vec2, idx);
4731
4732 min = CASE_LOW (cl);
4733 max = CASE_HIGH (cl);
4734
4735 /* If there are multiple case labels with the same destination
4736 we need to combine them to a single value range for the edge. */
4737 if (idx + 1 < n
4738 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx + 1)))
4739 {
4740 /* Skip labels until the last of the group. */
4741 do {
4742 ++idx;
4743 } while (idx < n
4744 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx)));
4745 --idx;
4746
4747 /* Pick up the maximum of the case label range. */
4748 if (CASE_HIGH (TREE_VEC_ELT (vec2, idx)))
4749 max = CASE_HIGH (TREE_VEC_ELT (vec2, idx));
4750 else
4751 max = CASE_LOW (TREE_VEC_ELT (vec2, idx));
4752 }
4753
4754 /* Nothing to do if the range includes the default label until we
4755 can register anti-ranges. */
4756 if (min == NULL_TREE)
4757 continue;
4758
4759 /* Find the edge to register the assert expr on. */
4760 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
4761
4762 /* Register the necessary assertions for the operand in the
4763 SWITCH_EXPR. */
4764 need_assert |= register_edge_assert_for (op, e, bsi,
4765 max ? GE_EXPR : EQ_EXPR,
4766 op,
4767 fold_convert (TREE_TYPE (op),
4768 min));
4769 if (max)
4770 {
4771 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4772 op,
4773 fold_convert (TREE_TYPE (op),
4774 max));
4775 }
4776 }
4777
4778 return need_assert;
4779 }
4780
4781
4782 /* Traverse all the statements in block BB looking for statements that
4783 may generate useful assertions for the SSA names in their operand.
4784 If a statement produces a useful assertion A for name N_i, then the
4785 list of assertions already generated for N_i is scanned to
4786 determine if A is actually needed.
4787
4788 If N_i already had the assertion A at a location dominating the
4789 current location, then nothing needs to be done. Otherwise, the
4790 new location for A is recorded instead.
4791
4792 1- For every statement S in BB, all the variables used by S are
4793 added to bitmap FOUND_IN_SUBGRAPH.
4794
4795 2- If statement S uses an operand N in a way that exposes a known
4796 value range for N, then if N was not already generated by an
4797 ASSERT_EXPR, create a new assert location for N. For instance,
4798 if N is a pointer and the statement dereferences it, we can
4799 assume that N is not NULL.
4800
4801 3- COND_EXPRs are a special case of #2. We can derive range
4802 information from the predicate but need to insert different
4803 ASSERT_EXPRs for each of the sub-graphs rooted at the
4804 conditional block. If the last statement of BB is a conditional
4805 expression of the form 'X op Y', then
4806
4807 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4808
4809 b) If the conditional is the only entry point to the sub-graph
4810 corresponding to the THEN_CLAUSE, recurse into it. On
4811 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4812 an ASSERT_EXPR is added for the corresponding variable.
4813
4814 c) Repeat step (b) on the ELSE_CLAUSE.
4815
4816 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4817
4818 For instance,
4819
4820 if (a == 9)
4821 b = a;
4822 else
4823 b = c + 1;
4824
4825 In this case, an assertion on the THEN clause is useful to
4826 determine that 'a' is always 9 on that edge. However, an assertion
4827 on the ELSE clause would be unnecessary.
4828
4829 4- If BB does not end in a conditional expression, then we recurse
4830 into BB's dominator children.
4831
4832 At the end of the recursive traversal, every SSA name will have a
4833 list of locations where ASSERT_EXPRs should be added. When a new
4834 location for name N is found, it is registered by calling
4835 register_new_assert_for. That function keeps track of all the
4836 registered assertions to prevent adding unnecessary assertions.
4837 For instance, if a pointer P_4 is dereferenced more than once in a
4838 dominator tree, only the location dominating all the dereference of
4839 P_4 will receive an ASSERT_EXPR.
4840
4841 If this function returns true, then it means that there are names
4842 for which we need to generate ASSERT_EXPRs. Those assertions are
4843 inserted by process_assert_insertions. */
4844
4845 static bool
4846 find_assert_locations_1 (basic_block bb, sbitmap live)
4847 {
4848 gimple_stmt_iterator si;
4849 gimple last;
4850 gimple phi;
4851 bool need_assert;
4852
4853 need_assert = false;
4854 last = last_stmt (bb);
4855
4856 /* If BB's last statement is a conditional statement involving integer
4857 operands, determine if we need to add ASSERT_EXPRs. */
4858 if (last
4859 && gimple_code (last) == GIMPLE_COND
4860 && !fp_predicate (last)
4861 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4862 need_assert |= find_conditional_asserts (bb, last);
4863
4864 /* If BB's last statement is a switch statement involving integer
4865 operands, determine if we need to add ASSERT_EXPRs. */
4866 if (last
4867 && gimple_code (last) == GIMPLE_SWITCH
4868 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4869 need_assert |= find_switch_asserts (bb, last);
4870
4871 /* Traverse all the statements in BB marking used names and looking
4872 for statements that may infer assertions for their used operands. */
4873 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4874 {
4875 gimple stmt;
4876 tree op;
4877 ssa_op_iter i;
4878
4879 stmt = gsi_stmt (si);
4880
4881 if (is_gimple_debug (stmt))
4882 continue;
4883
4884 /* See if we can derive an assertion for any of STMT's operands. */
4885 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4886 {
4887 tree value;
4888 enum tree_code comp_code;
4889
4890 /* Mark OP in our live bitmap. */
4891 SET_BIT (live, SSA_NAME_VERSION (op));
4892
4893 /* If OP is used in such a way that we can infer a value
4894 range for it, and we don't find a previous assertion for
4895 it, create a new assertion location node for OP. */
4896 if (infer_value_range (stmt, op, &comp_code, &value))
4897 {
4898 /* If we are able to infer a nonzero value range for OP,
4899 then walk backwards through the use-def chain to see if OP
4900 was set via a typecast.
4901
4902 If so, then we can also infer a nonzero value range
4903 for the operand of the NOP_EXPR. */
4904 if (comp_code == NE_EXPR && integer_zerop (value))
4905 {
4906 tree t = op;
4907 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4908
4909 while (is_gimple_assign (def_stmt)
4910 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4911 && TREE_CODE
4912 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4913 && POINTER_TYPE_P
4914 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4915 {
4916 t = gimple_assign_rhs1 (def_stmt);
4917 def_stmt = SSA_NAME_DEF_STMT (t);
4918
4919 /* Note we want to register the assert for the
4920 operand of the NOP_EXPR after SI, not after the
4921 conversion. */
4922 if (! has_single_use (t))
4923 {
4924 register_new_assert_for (t, t, comp_code, value,
4925 bb, NULL, si);
4926 need_assert = true;
4927 }
4928 }
4929 }
4930
4931 /* If OP is used only once, namely in this STMT, don't
4932 bother creating an ASSERT_EXPR for it. Such an
4933 ASSERT_EXPR would do nothing but increase compile time. */
4934 if (!has_single_use (op))
4935 {
4936 register_new_assert_for (op, op, comp_code, value,
4937 bb, NULL, si);
4938 need_assert = true;
4939 }
4940 }
4941 }
4942 }
4943
4944 /* Traverse all PHI nodes in BB marking used operands. */
4945 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4946 {
4947 use_operand_p arg_p;
4948 ssa_op_iter i;
4949 phi = gsi_stmt (si);
4950
4951 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4952 {
4953 tree arg = USE_FROM_PTR (arg_p);
4954 if (TREE_CODE (arg) == SSA_NAME)
4955 SET_BIT (live, SSA_NAME_VERSION (arg));
4956 }
4957 }
4958
4959 return need_assert;
4960 }
4961
4962 /* Do an RPO walk over the function computing SSA name liveness
4963 on-the-fly and deciding on assert expressions to insert.
4964 Returns true if there are assert expressions to be inserted. */
4965
4966 static bool
4967 find_assert_locations (void)
4968 {
4969 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4970 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4971 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4972 int rpo_cnt, i;
4973 bool need_asserts;
4974
4975 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4976 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4977 for (i = 0; i < rpo_cnt; ++i)
4978 bb_rpo[rpo[i]] = i;
4979
4980 need_asserts = false;
4981 for (i = rpo_cnt-1; i >= 0; --i)
4982 {
4983 basic_block bb = BASIC_BLOCK (rpo[i]);
4984 edge e;
4985 edge_iterator ei;
4986
4987 if (!live[rpo[i]])
4988 {
4989 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4990 sbitmap_zero (live[rpo[i]]);
4991 }
4992
4993 /* Process BB and update the live information with uses in
4994 this block. */
4995 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
4996
4997 /* Merge liveness into the predecessor blocks and free it. */
4998 if (!sbitmap_empty_p (live[rpo[i]]))
4999 {
5000 int pred_rpo = i;
5001 FOR_EACH_EDGE (e, ei, bb->preds)
5002 {
5003 int pred = e->src->index;
5004 if (e->flags & EDGE_DFS_BACK)
5005 continue;
5006
5007 if (!live[pred])
5008 {
5009 live[pred] = sbitmap_alloc (num_ssa_names);
5010 sbitmap_zero (live[pred]);
5011 }
5012 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5013
5014 if (bb_rpo[pred] < pred_rpo)
5015 pred_rpo = bb_rpo[pred];
5016 }
5017
5018 /* Record the RPO number of the last visited block that needs
5019 live information from this block. */
5020 last_rpo[rpo[i]] = pred_rpo;
5021 }
5022 else
5023 {
5024 sbitmap_free (live[rpo[i]]);
5025 live[rpo[i]] = NULL;
5026 }
5027
5028 /* We can free all successors live bitmaps if all their
5029 predecessors have been visited already. */
5030 FOR_EACH_EDGE (e, ei, bb->succs)
5031 if (last_rpo[e->dest->index] == i
5032 && live[e->dest->index])
5033 {
5034 sbitmap_free (live[e->dest->index]);
5035 live[e->dest->index] = NULL;
5036 }
5037 }
5038
5039 XDELETEVEC (rpo);
5040 XDELETEVEC (bb_rpo);
5041 XDELETEVEC (last_rpo);
5042 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
5043 if (live[i])
5044 sbitmap_free (live[i]);
5045 XDELETEVEC (live);
5046
5047 return need_asserts;
5048 }
5049
5050 /* Create an ASSERT_EXPR for NAME and insert it in the location
5051 indicated by LOC. Return true if we made any edge insertions. */
5052
5053 static bool
5054 process_assert_insertions_for (tree name, assert_locus_t loc)
5055 {
5056 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5057 gimple stmt;
5058 tree cond;
5059 gimple assert_stmt;
5060 edge_iterator ei;
5061 edge e;
5062
5063 /* If we have X <=> X do not insert an assert expr for that. */
5064 if (loc->expr == loc->val)
5065 return false;
5066
5067 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5068 assert_stmt = build_assert_expr_for (cond, name);
5069 if (loc->e)
5070 {
5071 /* We have been asked to insert the assertion on an edge. This
5072 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5073 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5074 || (gimple_code (gsi_stmt (loc->si))
5075 == GIMPLE_SWITCH));
5076
5077 gsi_insert_on_edge (loc->e, assert_stmt);
5078 return true;
5079 }
5080
5081 /* Otherwise, we can insert right after LOC->SI iff the
5082 statement must not be the last statement in the block. */
5083 stmt = gsi_stmt (loc->si);
5084 if (!stmt_ends_bb_p (stmt))
5085 {
5086 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5087 return false;
5088 }
5089
5090 /* If STMT must be the last statement in BB, we can only insert new
5091 assertions on the non-abnormal edge out of BB. Note that since
5092 STMT is not control flow, there may only be one non-abnormal edge
5093 out of BB. */
5094 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5095 if (!(e->flags & EDGE_ABNORMAL))
5096 {
5097 gsi_insert_on_edge (e, assert_stmt);
5098 return true;
5099 }
5100
5101 gcc_unreachable ();
5102 }
5103
5104
5105 /* Process all the insertions registered for every name N_i registered
5106 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5107 found in ASSERTS_FOR[i]. */
5108
5109 static void
5110 process_assert_insertions (void)
5111 {
5112 unsigned i;
5113 bitmap_iterator bi;
5114 bool update_edges_p = false;
5115 int num_asserts = 0;
5116
5117 if (dump_file && (dump_flags & TDF_DETAILS))
5118 dump_all_asserts (dump_file);
5119
5120 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5121 {
5122 assert_locus_t loc = asserts_for[i];
5123 gcc_assert (loc);
5124
5125 while (loc)
5126 {
5127 assert_locus_t next = loc->next;
5128 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5129 free (loc);
5130 loc = next;
5131 num_asserts++;
5132 }
5133 }
5134
5135 if (update_edges_p)
5136 gsi_commit_edge_inserts ();
5137
5138 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5139 num_asserts);
5140 }
5141
5142
5143 /* Traverse the flowgraph looking for conditional jumps to insert range
5144 expressions. These range expressions are meant to provide information
5145 to optimizations that need to reason in terms of value ranges. They
5146 will not be expanded into RTL. For instance, given:
5147
5148 x = ...
5149 y = ...
5150 if (x < y)
5151 y = x - 2;
5152 else
5153 x = y + 3;
5154
5155 this pass will transform the code into:
5156
5157 x = ...
5158 y = ...
5159 if (x < y)
5160 {
5161 x = ASSERT_EXPR <x, x < y>
5162 y = x - 2
5163 }
5164 else
5165 {
5166 y = ASSERT_EXPR <y, x <= y>
5167 x = y + 3
5168 }
5169
5170 The idea is that once copy and constant propagation have run, other
5171 optimizations will be able to determine what ranges of values can 'x'
5172 take in different paths of the code, simply by checking the reaching
5173 definition of 'x'. */
5174
5175 static void
5176 insert_range_assertions (void)
5177 {
5178 need_assert_for = BITMAP_ALLOC (NULL);
5179 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5180
5181 calculate_dominance_info (CDI_DOMINATORS);
5182
5183 if (find_assert_locations ())
5184 {
5185 process_assert_insertions ();
5186 update_ssa (TODO_update_ssa_no_phi);
5187 }
5188
5189 if (dump_file && (dump_flags & TDF_DETAILS))
5190 {
5191 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5192 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5193 }
5194
5195 free (asserts_for);
5196 BITMAP_FREE (need_assert_for);
5197 }
5198
5199 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5200 and "struct" hacks. If VRP can determine that the
5201 array subscript is a constant, check if it is outside valid
5202 range. If the array subscript is a RANGE, warn if it is
5203 non-overlapping with valid range.
5204 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5205
5206 static void
5207 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5208 {
5209 value_range_t* vr = NULL;
5210 tree low_sub, up_sub;
5211 tree low_bound, up_bound, up_bound_p1;
5212 tree base;
5213
5214 if (TREE_NO_WARNING (ref))
5215 return;
5216
5217 low_sub = up_sub = TREE_OPERAND (ref, 1);
5218 up_bound = array_ref_up_bound (ref);
5219
5220 /* Can not check flexible arrays. */
5221 if (!up_bound
5222 || TREE_CODE (up_bound) != INTEGER_CST)
5223 return;
5224
5225 /* Accesses to trailing arrays via pointers may access storage
5226 beyond the types array bounds. */
5227 base = get_base_address (ref);
5228 if (base && TREE_CODE (base) == MEM_REF)
5229 {
5230 tree cref, next = NULL_TREE;
5231
5232 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5233 return;
5234
5235 cref = TREE_OPERAND (ref, 0);
5236 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5237 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5238 next && TREE_CODE (next) != FIELD_DECL;
5239 next = DECL_CHAIN (next))
5240 ;
5241
5242 /* If this is the last field in a struct type or a field in a
5243 union type do not warn. */
5244 if (!next)
5245 return;
5246 }
5247
5248 low_bound = array_ref_low_bound (ref);
5249 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node, 0);
5250
5251 if (TREE_CODE (low_sub) == SSA_NAME)
5252 {
5253 vr = get_value_range (low_sub);
5254 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5255 {
5256 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5257 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5258 }
5259 }
5260
5261 if (vr && vr->type == VR_ANTI_RANGE)
5262 {
5263 if (TREE_CODE (up_sub) == INTEGER_CST
5264 && tree_int_cst_lt (up_bound, up_sub)
5265 && TREE_CODE (low_sub) == INTEGER_CST
5266 && tree_int_cst_lt (low_sub, low_bound))
5267 {
5268 warning_at (location, OPT_Warray_bounds,
5269 "array subscript is outside array bounds");
5270 TREE_NO_WARNING (ref) = 1;
5271 }
5272 }
5273 else if (TREE_CODE (up_sub) == INTEGER_CST
5274 && (ignore_off_by_one
5275 ? (tree_int_cst_lt (up_bound, up_sub)
5276 && !tree_int_cst_equal (up_bound_p1, up_sub))
5277 : (tree_int_cst_lt (up_bound, up_sub)
5278 || tree_int_cst_equal (up_bound_p1, up_sub))))
5279 {
5280 warning_at (location, OPT_Warray_bounds,
5281 "array subscript is above array bounds");
5282 TREE_NO_WARNING (ref) = 1;
5283 }
5284 else if (TREE_CODE (low_sub) == INTEGER_CST
5285 && tree_int_cst_lt (low_sub, low_bound))
5286 {
5287 warning_at (location, OPT_Warray_bounds,
5288 "array subscript is below array bounds");
5289 TREE_NO_WARNING (ref) = 1;
5290 }
5291 }
5292
5293 /* Searches if the expr T, located at LOCATION computes
5294 address of an ARRAY_REF, and call check_array_ref on it. */
5295
5296 static void
5297 search_for_addr_array (tree t, location_t location)
5298 {
5299 while (TREE_CODE (t) == SSA_NAME)
5300 {
5301 gimple g = SSA_NAME_DEF_STMT (t);
5302
5303 if (gimple_code (g) != GIMPLE_ASSIGN)
5304 return;
5305
5306 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5307 != GIMPLE_SINGLE_RHS)
5308 return;
5309
5310 t = gimple_assign_rhs1 (g);
5311 }
5312
5313
5314 /* We are only interested in addresses of ARRAY_REF's. */
5315 if (TREE_CODE (t) != ADDR_EXPR)
5316 return;
5317
5318 /* Check each ARRAY_REFs in the reference chain. */
5319 do
5320 {
5321 if (TREE_CODE (t) == ARRAY_REF)
5322 check_array_ref (location, t, true /*ignore_off_by_one*/);
5323
5324 t = TREE_OPERAND (t, 0);
5325 }
5326 while (handled_component_p (t));
5327
5328 if (TREE_CODE (t) == MEM_REF
5329 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5330 && !TREE_NO_WARNING (t))
5331 {
5332 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5333 tree low_bound, up_bound, el_sz;
5334 double_int idx;
5335 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5336 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5337 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5338 return;
5339
5340 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5341 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5342 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5343 if (!low_bound
5344 || TREE_CODE (low_bound) != INTEGER_CST
5345 || !up_bound
5346 || TREE_CODE (up_bound) != INTEGER_CST
5347 || !el_sz
5348 || TREE_CODE (el_sz) != INTEGER_CST)
5349 return;
5350
5351 idx = mem_ref_offset (t);
5352 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5353 if (double_int_scmp (idx, double_int_zero) < 0)
5354 {
5355 warning_at (location, OPT_Warray_bounds,
5356 "array subscript is below array bounds");
5357 TREE_NO_WARNING (t) = 1;
5358 }
5359 else if (double_int_scmp (idx,
5360 double_int_add
5361 (double_int_add
5362 (tree_to_double_int (up_bound),
5363 double_int_neg
5364 (tree_to_double_int (low_bound))),
5365 double_int_one)) > 0)
5366 {
5367 warning_at (location, OPT_Warray_bounds,
5368 "array subscript is above array bounds");
5369 TREE_NO_WARNING (t) = 1;
5370 }
5371 }
5372 }
5373
5374 /* walk_tree() callback that checks if *TP is
5375 an ARRAY_REF inside an ADDR_EXPR (in which an array
5376 subscript one outside the valid range is allowed). Call
5377 check_array_ref for each ARRAY_REF found. The location is
5378 passed in DATA. */
5379
5380 static tree
5381 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5382 {
5383 tree t = *tp;
5384 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5385 location_t location;
5386
5387 if (EXPR_HAS_LOCATION (t))
5388 location = EXPR_LOCATION (t);
5389 else
5390 {
5391 location_t *locp = (location_t *) wi->info;
5392 location = *locp;
5393 }
5394
5395 *walk_subtree = TRUE;
5396
5397 if (TREE_CODE (t) == ARRAY_REF)
5398 check_array_ref (location, t, false /*ignore_off_by_one*/);
5399
5400 if (TREE_CODE (t) == MEM_REF
5401 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5402 search_for_addr_array (TREE_OPERAND (t, 0), location);
5403
5404 if (TREE_CODE (t) == ADDR_EXPR)
5405 *walk_subtree = FALSE;
5406
5407 return NULL_TREE;
5408 }
5409
5410 /* Walk over all statements of all reachable BBs and call check_array_bounds
5411 on them. */
5412
5413 static void
5414 check_all_array_refs (void)
5415 {
5416 basic_block bb;
5417 gimple_stmt_iterator si;
5418
5419 FOR_EACH_BB (bb)
5420 {
5421 edge_iterator ei;
5422 edge e;
5423 bool executable = false;
5424
5425 /* Skip blocks that were found to be unreachable. */
5426 FOR_EACH_EDGE (e, ei, bb->preds)
5427 executable |= !!(e->flags & EDGE_EXECUTABLE);
5428 if (!executable)
5429 continue;
5430
5431 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5432 {
5433 gimple stmt = gsi_stmt (si);
5434 struct walk_stmt_info wi;
5435 if (!gimple_has_location (stmt))
5436 continue;
5437
5438 if (is_gimple_call (stmt))
5439 {
5440 size_t i;
5441 size_t n = gimple_call_num_args (stmt);
5442 for (i = 0; i < n; i++)
5443 {
5444 tree arg = gimple_call_arg (stmt, i);
5445 search_for_addr_array (arg, gimple_location (stmt));
5446 }
5447 }
5448 else
5449 {
5450 memset (&wi, 0, sizeof (wi));
5451 wi.info = CONST_CAST (void *, (const void *)
5452 gimple_location_ptr (stmt));
5453
5454 walk_gimple_op (gsi_stmt (si),
5455 check_array_bounds,
5456 &wi);
5457 }
5458 }
5459 }
5460 }
5461
5462 /* Convert range assertion expressions into the implied copies and
5463 copy propagate away the copies. Doing the trivial copy propagation
5464 here avoids the need to run the full copy propagation pass after
5465 VRP.
5466
5467 FIXME, this will eventually lead to copy propagation removing the
5468 names that had useful range information attached to them. For
5469 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5470 then N_i will have the range [3, +INF].
5471
5472 However, by converting the assertion into the implied copy
5473 operation N_i = N_j, we will then copy-propagate N_j into the uses
5474 of N_i and lose the range information. We may want to hold on to
5475 ASSERT_EXPRs a little while longer as the ranges could be used in
5476 things like jump threading.
5477
5478 The problem with keeping ASSERT_EXPRs around is that passes after
5479 VRP need to handle them appropriately.
5480
5481 Another approach would be to make the range information a first
5482 class property of the SSA_NAME so that it can be queried from
5483 any pass. This is made somewhat more complex by the need for
5484 multiple ranges to be associated with one SSA_NAME. */
5485
5486 static void
5487 remove_range_assertions (void)
5488 {
5489 basic_block bb;
5490 gimple_stmt_iterator si;
5491
5492 /* Note that the BSI iterator bump happens at the bottom of the
5493 loop and no bump is necessary if we're removing the statement
5494 referenced by the current BSI. */
5495 FOR_EACH_BB (bb)
5496 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5497 {
5498 gimple stmt = gsi_stmt (si);
5499 gimple use_stmt;
5500
5501 if (is_gimple_assign (stmt)
5502 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5503 {
5504 tree rhs = gimple_assign_rhs1 (stmt);
5505 tree var;
5506 tree cond = fold (ASSERT_EXPR_COND (rhs));
5507 use_operand_p use_p;
5508 imm_use_iterator iter;
5509
5510 gcc_assert (cond != boolean_false_node);
5511
5512 /* Propagate the RHS into every use of the LHS. */
5513 var = ASSERT_EXPR_VAR (rhs);
5514 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5515 gimple_assign_lhs (stmt))
5516 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5517 {
5518 SET_USE (use_p, var);
5519 gcc_assert (TREE_CODE (var) == SSA_NAME);
5520 }
5521
5522 /* And finally, remove the copy, it is not needed. */
5523 gsi_remove (&si, true);
5524 release_defs (stmt);
5525 }
5526 else
5527 gsi_next (&si);
5528 }
5529 }
5530
5531
5532 /* Return true if STMT is interesting for VRP. */
5533
5534 static bool
5535 stmt_interesting_for_vrp (gimple stmt)
5536 {
5537 if (gimple_code (stmt) == GIMPLE_PHI
5538 && is_gimple_reg (gimple_phi_result (stmt))
5539 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5540 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5541 return true;
5542 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5543 {
5544 tree lhs = gimple_get_lhs (stmt);
5545
5546 /* In general, assignments with virtual operands are not useful
5547 for deriving ranges, with the obvious exception of calls to
5548 builtin functions. */
5549 if (lhs && TREE_CODE (lhs) == SSA_NAME
5550 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5551 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5552 && ((is_gimple_call (stmt)
5553 && gimple_call_fndecl (stmt) != NULL_TREE
5554 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
5555 || !gimple_vuse (stmt)))
5556 return true;
5557 }
5558 else if (gimple_code (stmt) == GIMPLE_COND
5559 || gimple_code (stmt) == GIMPLE_SWITCH)
5560 return true;
5561
5562 return false;
5563 }
5564
5565
5566 /* Initialize local data structures for VRP. */
5567
5568 static void
5569 vrp_initialize (void)
5570 {
5571 basic_block bb;
5572
5573 vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
5574 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5575
5576 FOR_EACH_BB (bb)
5577 {
5578 gimple_stmt_iterator si;
5579
5580 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5581 {
5582 gimple phi = gsi_stmt (si);
5583 if (!stmt_interesting_for_vrp (phi))
5584 {
5585 tree lhs = PHI_RESULT (phi);
5586 set_value_range_to_varying (get_value_range (lhs));
5587 prop_set_simulate_again (phi, false);
5588 }
5589 else
5590 prop_set_simulate_again (phi, true);
5591 }
5592
5593 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5594 {
5595 gimple stmt = gsi_stmt (si);
5596
5597 /* If the statement is a control insn, then we do not
5598 want to avoid simulating the statement once. Failure
5599 to do so means that those edges will never get added. */
5600 if (stmt_ends_bb_p (stmt))
5601 prop_set_simulate_again (stmt, true);
5602 else if (!stmt_interesting_for_vrp (stmt))
5603 {
5604 ssa_op_iter i;
5605 tree def;
5606 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5607 set_value_range_to_varying (get_value_range (def));
5608 prop_set_simulate_again (stmt, false);
5609 }
5610 else
5611 prop_set_simulate_again (stmt, true);
5612 }
5613 }
5614 }
5615
5616
5617 /* Visit assignment STMT. If it produces an interesting range, record
5618 the SSA name in *OUTPUT_P. */
5619
5620 static enum ssa_prop_result
5621 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5622 {
5623 tree def, lhs;
5624 ssa_op_iter iter;
5625 enum gimple_code code = gimple_code (stmt);
5626 lhs = gimple_get_lhs (stmt);
5627
5628 /* We only keep track of ranges in integral and pointer types. */
5629 if (TREE_CODE (lhs) == SSA_NAME
5630 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5631 /* It is valid to have NULL MIN/MAX values on a type. See
5632 build_range_type. */
5633 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5634 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5635 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5636 {
5637 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5638
5639 if (code == GIMPLE_CALL)
5640 extract_range_basic (&new_vr, stmt);
5641 else
5642 extract_range_from_assignment (&new_vr, stmt);
5643
5644 if (update_value_range (lhs, &new_vr))
5645 {
5646 *output_p = lhs;
5647
5648 if (dump_file && (dump_flags & TDF_DETAILS))
5649 {
5650 fprintf (dump_file, "Found new range for ");
5651 print_generic_expr (dump_file, lhs, 0);
5652 fprintf (dump_file, ": ");
5653 dump_value_range (dump_file, &new_vr);
5654 fprintf (dump_file, "\n\n");
5655 }
5656
5657 if (new_vr.type == VR_VARYING)
5658 return SSA_PROP_VARYING;
5659
5660 return SSA_PROP_INTERESTING;
5661 }
5662
5663 return SSA_PROP_NOT_INTERESTING;
5664 }
5665
5666 /* Every other statement produces no useful ranges. */
5667 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5668 set_value_range_to_varying (get_value_range (def));
5669
5670 return SSA_PROP_VARYING;
5671 }
5672
5673 /* Helper that gets the value range of the SSA_NAME with version I
5674 or a symbolic range containing the SSA_NAME only if the value range
5675 is varying or undefined. */
5676
5677 static inline value_range_t
5678 get_vr_for_comparison (int i)
5679 {
5680 value_range_t vr = *(vr_value[i]);
5681
5682 /* If name N_i does not have a valid range, use N_i as its own
5683 range. This allows us to compare against names that may
5684 have N_i in their ranges. */
5685 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5686 {
5687 vr.type = VR_RANGE;
5688 vr.min = ssa_name (i);
5689 vr.max = ssa_name (i);
5690 }
5691
5692 return vr;
5693 }
5694
5695 /* Compare all the value ranges for names equivalent to VAR with VAL
5696 using comparison code COMP. Return the same value returned by
5697 compare_range_with_value, including the setting of
5698 *STRICT_OVERFLOW_P. */
5699
5700 static tree
5701 compare_name_with_value (enum tree_code comp, tree var, tree val,
5702 bool *strict_overflow_p)
5703 {
5704 bitmap_iterator bi;
5705 unsigned i;
5706 bitmap e;
5707 tree retval, t;
5708 int used_strict_overflow;
5709 bool sop;
5710 value_range_t equiv_vr;
5711
5712 /* Get the set of equivalences for VAR. */
5713 e = get_value_range (var)->equiv;
5714
5715 /* Start at -1. Set it to 0 if we do a comparison without relying
5716 on overflow, or 1 if all comparisons rely on overflow. */
5717 used_strict_overflow = -1;
5718
5719 /* Compare vars' value range with val. */
5720 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5721 sop = false;
5722 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5723 if (retval)
5724 used_strict_overflow = sop ? 1 : 0;
5725
5726 /* If the equiv set is empty we have done all work we need to do. */
5727 if (e == NULL)
5728 {
5729 if (retval
5730 && used_strict_overflow > 0)
5731 *strict_overflow_p = true;
5732 return retval;
5733 }
5734
5735 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5736 {
5737 equiv_vr = get_vr_for_comparison (i);
5738 sop = false;
5739 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5740 if (t)
5741 {
5742 /* If we get different answers from different members
5743 of the equivalence set this check must be in a dead
5744 code region. Folding it to a trap representation
5745 would be correct here. For now just return don't-know. */
5746 if (retval != NULL
5747 && t != retval)
5748 {
5749 retval = NULL_TREE;
5750 break;
5751 }
5752 retval = t;
5753
5754 if (!sop)
5755 used_strict_overflow = 0;
5756 else if (used_strict_overflow < 0)
5757 used_strict_overflow = 1;
5758 }
5759 }
5760
5761 if (retval
5762 && used_strict_overflow > 0)
5763 *strict_overflow_p = true;
5764
5765 return retval;
5766 }
5767
5768
5769 /* Given a comparison code COMP and names N1 and N2, compare all the
5770 ranges equivalent to N1 against all the ranges equivalent to N2
5771 to determine the value of N1 COMP N2. Return the same value
5772 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5773 whether we relied on an overflow infinity in the comparison. */
5774
5775
5776 static tree
5777 compare_names (enum tree_code comp, tree n1, tree n2,
5778 bool *strict_overflow_p)
5779 {
5780 tree t, retval;
5781 bitmap e1, e2;
5782 bitmap_iterator bi1, bi2;
5783 unsigned i1, i2;
5784 int used_strict_overflow;
5785 static bitmap_obstack *s_obstack = NULL;
5786 static bitmap s_e1 = NULL, s_e2 = NULL;
5787
5788 /* Compare the ranges of every name equivalent to N1 against the
5789 ranges of every name equivalent to N2. */
5790 e1 = get_value_range (n1)->equiv;
5791 e2 = get_value_range (n2)->equiv;
5792
5793 /* Use the fake bitmaps if e1 or e2 are not available. */
5794 if (s_obstack == NULL)
5795 {
5796 s_obstack = XNEW (bitmap_obstack);
5797 bitmap_obstack_initialize (s_obstack);
5798 s_e1 = BITMAP_ALLOC (s_obstack);
5799 s_e2 = BITMAP_ALLOC (s_obstack);
5800 }
5801 if (e1 == NULL)
5802 e1 = s_e1;
5803 if (e2 == NULL)
5804 e2 = s_e2;
5805
5806 /* Add N1 and N2 to their own set of equivalences to avoid
5807 duplicating the body of the loop just to check N1 and N2
5808 ranges. */
5809 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5810 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5811
5812 /* If the equivalence sets have a common intersection, then the two
5813 names can be compared without checking their ranges. */
5814 if (bitmap_intersect_p (e1, e2))
5815 {
5816 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5817 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5818
5819 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5820 ? boolean_true_node
5821 : boolean_false_node;
5822 }
5823
5824 /* Start at -1. Set it to 0 if we do a comparison without relying
5825 on overflow, or 1 if all comparisons rely on overflow. */
5826 used_strict_overflow = -1;
5827
5828 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5829 N2 to their own set of equivalences to avoid duplicating the body
5830 of the loop just to check N1 and N2 ranges. */
5831 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5832 {
5833 value_range_t vr1 = get_vr_for_comparison (i1);
5834
5835 t = retval = NULL_TREE;
5836 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5837 {
5838 bool sop = false;
5839
5840 value_range_t vr2 = get_vr_for_comparison (i2);
5841
5842 t = compare_ranges (comp, &vr1, &vr2, &sop);
5843 if (t)
5844 {
5845 /* If we get different answers from different members
5846 of the equivalence set this check must be in a dead
5847 code region. Folding it to a trap representation
5848 would be correct here. For now just return don't-know. */
5849 if (retval != NULL
5850 && t != retval)
5851 {
5852 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5853 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5854 return NULL_TREE;
5855 }
5856 retval = t;
5857
5858 if (!sop)
5859 used_strict_overflow = 0;
5860 else if (used_strict_overflow < 0)
5861 used_strict_overflow = 1;
5862 }
5863 }
5864
5865 if (retval)
5866 {
5867 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5868 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5869 if (used_strict_overflow > 0)
5870 *strict_overflow_p = true;
5871 return retval;
5872 }
5873 }
5874
5875 /* None of the equivalent ranges are useful in computing this
5876 comparison. */
5877 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5878 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5879 return NULL_TREE;
5880 }
5881
5882 /* Helper function for vrp_evaluate_conditional_warnv. */
5883
5884 static tree
5885 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5886 tree op0, tree op1,
5887 bool * strict_overflow_p)
5888 {
5889 value_range_t *vr0, *vr1;
5890
5891 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5892 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5893
5894 if (vr0 && vr1)
5895 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5896 else if (vr0 && vr1 == NULL)
5897 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5898 else if (vr0 == NULL && vr1)
5899 return (compare_range_with_value
5900 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5901 return NULL;
5902 }
5903
5904 /* Helper function for vrp_evaluate_conditional_warnv. */
5905
5906 static tree
5907 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5908 tree op1, bool use_equiv_p,
5909 bool *strict_overflow_p, bool *only_ranges)
5910 {
5911 tree ret;
5912 if (only_ranges)
5913 *only_ranges = true;
5914
5915 /* We only deal with integral and pointer types. */
5916 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5917 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5918 return NULL_TREE;
5919
5920 if (use_equiv_p)
5921 {
5922 if (only_ranges
5923 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5924 (code, op0, op1, strict_overflow_p)))
5925 return ret;
5926 *only_ranges = false;
5927 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5928 return compare_names (code, op0, op1, strict_overflow_p);
5929 else if (TREE_CODE (op0) == SSA_NAME)
5930 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5931 else if (TREE_CODE (op1) == SSA_NAME)
5932 return (compare_name_with_value
5933 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5934 }
5935 else
5936 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5937 strict_overflow_p);
5938 return NULL_TREE;
5939 }
5940
5941 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5942 information. Return NULL if the conditional can not be evaluated.
5943 The ranges of all the names equivalent with the operands in COND
5944 will be used when trying to compute the value. If the result is
5945 based on undefined signed overflow, issue a warning if
5946 appropriate. */
5947
5948 static tree
5949 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5950 {
5951 bool sop;
5952 tree ret;
5953 bool only_ranges;
5954
5955 /* Some passes and foldings leak constants with overflow flag set
5956 into the IL. Avoid doing wrong things with these and bail out. */
5957 if ((TREE_CODE (op0) == INTEGER_CST
5958 && TREE_OVERFLOW (op0))
5959 || (TREE_CODE (op1) == INTEGER_CST
5960 && TREE_OVERFLOW (op1)))
5961 return NULL_TREE;
5962
5963 sop = false;
5964 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
5965 &only_ranges);
5966
5967 if (ret && sop)
5968 {
5969 enum warn_strict_overflow_code wc;
5970 const char* warnmsg;
5971
5972 if (is_gimple_min_invariant (ret))
5973 {
5974 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
5975 warnmsg = G_("assuming signed overflow does not occur when "
5976 "simplifying conditional to constant");
5977 }
5978 else
5979 {
5980 wc = WARN_STRICT_OVERFLOW_COMPARISON;
5981 warnmsg = G_("assuming signed overflow does not occur when "
5982 "simplifying conditional");
5983 }
5984
5985 if (issue_strict_overflow_warning (wc))
5986 {
5987 location_t location;
5988
5989 if (!gimple_has_location (stmt))
5990 location = input_location;
5991 else
5992 location = gimple_location (stmt);
5993 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
5994 }
5995 }
5996
5997 if (warn_type_limits
5998 && ret && only_ranges
5999 && TREE_CODE_CLASS (code) == tcc_comparison
6000 && TREE_CODE (op0) == SSA_NAME)
6001 {
6002 /* If the comparison is being folded and the operand on the LHS
6003 is being compared against a constant value that is outside of
6004 the natural range of OP0's type, then the predicate will
6005 always fold regardless of the value of OP0. If -Wtype-limits
6006 was specified, emit a warning. */
6007 tree type = TREE_TYPE (op0);
6008 value_range_t *vr0 = get_value_range (op0);
6009
6010 if (vr0->type != VR_VARYING
6011 && INTEGRAL_TYPE_P (type)
6012 && vrp_val_is_min (vr0->min)
6013 && vrp_val_is_max (vr0->max)
6014 && is_gimple_min_invariant (op1))
6015 {
6016 location_t location;
6017
6018 if (!gimple_has_location (stmt))
6019 location = input_location;
6020 else
6021 location = gimple_location (stmt);
6022
6023 warning_at (location, OPT_Wtype_limits,
6024 integer_zerop (ret)
6025 ? G_("comparison always false "
6026 "due to limited range of data type")
6027 : G_("comparison always true "
6028 "due to limited range of data type"));
6029 }
6030 }
6031
6032 return ret;
6033 }
6034
6035
6036 /* Visit conditional statement STMT. If we can determine which edge
6037 will be taken out of STMT's basic block, record it in
6038 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6039 SSA_PROP_VARYING. */
6040
6041 static enum ssa_prop_result
6042 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6043 {
6044 tree val;
6045 bool sop;
6046
6047 *taken_edge_p = NULL;
6048
6049 if (dump_file && (dump_flags & TDF_DETAILS))
6050 {
6051 tree use;
6052 ssa_op_iter i;
6053
6054 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6055 print_gimple_stmt (dump_file, stmt, 0, 0);
6056 fprintf (dump_file, "\nWith known ranges\n");
6057
6058 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6059 {
6060 fprintf (dump_file, "\t");
6061 print_generic_expr (dump_file, use, 0);
6062 fprintf (dump_file, ": ");
6063 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6064 }
6065
6066 fprintf (dump_file, "\n");
6067 }
6068
6069 /* Compute the value of the predicate COND by checking the known
6070 ranges of each of its operands.
6071
6072 Note that we cannot evaluate all the equivalent ranges here
6073 because those ranges may not yet be final and with the current
6074 propagation strategy, we cannot determine when the value ranges
6075 of the names in the equivalence set have changed.
6076
6077 For instance, given the following code fragment
6078
6079 i_5 = PHI <8, i_13>
6080 ...
6081 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6082 if (i_14 == 1)
6083 ...
6084
6085 Assume that on the first visit to i_14, i_5 has the temporary
6086 range [8, 8] because the second argument to the PHI function is
6087 not yet executable. We derive the range ~[0, 0] for i_14 and the
6088 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6089 the first time, since i_14 is equivalent to the range [8, 8], we
6090 determine that the predicate is always false.
6091
6092 On the next round of propagation, i_13 is determined to be
6093 VARYING, which causes i_5 to drop down to VARYING. So, another
6094 visit to i_14 is scheduled. In this second visit, we compute the
6095 exact same range and equivalence set for i_14, namely ~[0, 0] and
6096 { i_5 }. But we did not have the previous range for i_5
6097 registered, so vrp_visit_assignment thinks that the range for
6098 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6099 is not visited again, which stops propagation from visiting
6100 statements in the THEN clause of that if().
6101
6102 To properly fix this we would need to keep the previous range
6103 value for the names in the equivalence set. This way we would've
6104 discovered that from one visit to the other i_5 changed from
6105 range [8, 8] to VR_VARYING.
6106
6107 However, fixing this apparent limitation may not be worth the
6108 additional checking. Testing on several code bases (GCC, DLV,
6109 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6110 4 more predicates folded in SPEC. */
6111 sop = false;
6112
6113 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6114 gimple_cond_lhs (stmt),
6115 gimple_cond_rhs (stmt),
6116 false, &sop, NULL);
6117 if (val)
6118 {
6119 if (!sop)
6120 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6121 else
6122 {
6123 if (dump_file && (dump_flags & TDF_DETAILS))
6124 fprintf (dump_file,
6125 "\nIgnoring predicate evaluation because "
6126 "it assumes that signed overflow is undefined");
6127 val = NULL_TREE;
6128 }
6129 }
6130
6131 if (dump_file && (dump_flags & TDF_DETAILS))
6132 {
6133 fprintf (dump_file, "\nPredicate evaluates to: ");
6134 if (val == NULL_TREE)
6135 fprintf (dump_file, "DON'T KNOW\n");
6136 else
6137 print_generic_stmt (dump_file, val, 0);
6138 }
6139
6140 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6141 }
6142
6143 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6144 that includes the value VAL. The search is restricted to the range
6145 [START_IDX, n - 1] where n is the size of VEC.
6146
6147 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6148 returned.
6149
6150 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6151 it is placed in IDX and false is returned.
6152
6153 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6154 returned. */
6155
6156 static bool
6157 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6158 {
6159 size_t n = gimple_switch_num_labels (stmt);
6160 size_t low, high;
6161
6162 /* Find case label for minimum of the value range or the next one.
6163 At each iteration we are searching in [low, high - 1]. */
6164
6165 for (low = start_idx, high = n; high != low; )
6166 {
6167 tree t;
6168 int cmp;
6169 /* Note that i != high, so we never ask for n. */
6170 size_t i = (high + low) / 2;
6171 t = gimple_switch_label (stmt, i);
6172
6173 /* Cache the result of comparing CASE_LOW and val. */
6174 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6175
6176 if (cmp == 0)
6177 {
6178 /* Ranges cannot be empty. */
6179 *idx = i;
6180 return true;
6181 }
6182 else if (cmp > 0)
6183 high = i;
6184 else
6185 {
6186 low = i + 1;
6187 if (CASE_HIGH (t) != NULL
6188 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6189 {
6190 *idx = i;
6191 return true;
6192 }
6193 }
6194 }
6195
6196 *idx = high;
6197 return false;
6198 }
6199
6200 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6201 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6202 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6203 then MAX_IDX < MIN_IDX.
6204 Returns true if the default label is not needed. */
6205
6206 static bool
6207 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6208 size_t *max_idx)
6209 {
6210 size_t i, j;
6211 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6212 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6213
6214 if (i == j
6215 && min_take_default
6216 && max_take_default)
6217 {
6218 /* Only the default case label reached.
6219 Return an empty range. */
6220 *min_idx = 1;
6221 *max_idx = 0;
6222 return false;
6223 }
6224 else
6225 {
6226 bool take_default = min_take_default || max_take_default;
6227 tree low, high;
6228 size_t k;
6229
6230 if (max_take_default)
6231 j--;
6232
6233 /* If the case label range is continuous, we do not need
6234 the default case label. Verify that. */
6235 high = CASE_LOW (gimple_switch_label (stmt, i));
6236 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6237 high = CASE_HIGH (gimple_switch_label (stmt, i));
6238 for (k = i + 1; k <= j; ++k)
6239 {
6240 low = CASE_LOW (gimple_switch_label (stmt, k));
6241 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high, 0)))
6242 {
6243 take_default = true;
6244 break;
6245 }
6246 high = low;
6247 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6248 high = CASE_HIGH (gimple_switch_label (stmt, k));
6249 }
6250
6251 *min_idx = i;
6252 *max_idx = j;
6253 return !take_default;
6254 }
6255 }
6256
6257 /* Visit switch statement STMT. If we can determine which edge
6258 will be taken out of STMT's basic block, record it in
6259 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6260 SSA_PROP_VARYING. */
6261
6262 static enum ssa_prop_result
6263 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6264 {
6265 tree op, val;
6266 value_range_t *vr;
6267 size_t i = 0, j = 0;
6268 bool take_default;
6269
6270 *taken_edge_p = NULL;
6271 op = gimple_switch_index (stmt);
6272 if (TREE_CODE (op) != SSA_NAME)
6273 return SSA_PROP_VARYING;
6274
6275 vr = get_value_range (op);
6276 if (dump_file && (dump_flags & TDF_DETAILS))
6277 {
6278 fprintf (dump_file, "\nVisiting switch expression with operand ");
6279 print_generic_expr (dump_file, op, 0);
6280 fprintf (dump_file, " with known range ");
6281 dump_value_range (dump_file, vr);
6282 fprintf (dump_file, "\n");
6283 }
6284
6285 if (vr->type != VR_RANGE
6286 || symbolic_range_p (vr))
6287 return SSA_PROP_VARYING;
6288
6289 /* Find the single edge that is taken from the switch expression. */
6290 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6291
6292 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6293 label */
6294 if (j < i)
6295 {
6296 gcc_assert (take_default);
6297 val = gimple_switch_default_label (stmt);
6298 }
6299 else
6300 {
6301 /* Check if labels with index i to j and maybe the default label
6302 are all reaching the same label. */
6303
6304 val = gimple_switch_label (stmt, i);
6305 if (take_default
6306 && CASE_LABEL (gimple_switch_default_label (stmt))
6307 != CASE_LABEL (val))
6308 {
6309 if (dump_file && (dump_flags & TDF_DETAILS))
6310 fprintf (dump_file, " not a single destination for this "
6311 "range\n");
6312 return SSA_PROP_VARYING;
6313 }
6314 for (++i; i <= j; ++i)
6315 {
6316 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6317 {
6318 if (dump_file && (dump_flags & TDF_DETAILS))
6319 fprintf (dump_file, " not a single destination for this "
6320 "range\n");
6321 return SSA_PROP_VARYING;
6322 }
6323 }
6324 }
6325
6326 *taken_edge_p = find_edge (gimple_bb (stmt),
6327 label_to_block (CASE_LABEL (val)));
6328
6329 if (dump_file && (dump_flags & TDF_DETAILS))
6330 {
6331 fprintf (dump_file, " will take edge to ");
6332 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6333 }
6334
6335 return SSA_PROP_INTERESTING;
6336 }
6337
6338
6339 /* Evaluate statement STMT. If the statement produces a useful range,
6340 return SSA_PROP_INTERESTING and record the SSA name with the
6341 interesting range into *OUTPUT_P.
6342
6343 If STMT is a conditional branch and we can determine its truth
6344 value, the taken edge is recorded in *TAKEN_EDGE_P.
6345
6346 If STMT produces a varying value, return SSA_PROP_VARYING. */
6347
6348 static enum ssa_prop_result
6349 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6350 {
6351 tree def;
6352 ssa_op_iter iter;
6353
6354 if (dump_file && (dump_flags & TDF_DETAILS))
6355 {
6356 fprintf (dump_file, "\nVisiting statement:\n");
6357 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6358 fprintf (dump_file, "\n");
6359 }
6360
6361 if (!stmt_interesting_for_vrp (stmt))
6362 gcc_assert (stmt_ends_bb_p (stmt));
6363 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6364 {
6365 /* In general, assignments with virtual operands are not useful
6366 for deriving ranges, with the obvious exception of calls to
6367 builtin functions. */
6368
6369 if ((is_gimple_call (stmt)
6370 && gimple_call_fndecl (stmt) != NULL_TREE
6371 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
6372 || !gimple_vuse (stmt))
6373 return vrp_visit_assignment_or_call (stmt, output_p);
6374 }
6375 else if (gimple_code (stmt) == GIMPLE_COND)
6376 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6377 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6378 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6379
6380 /* All other statements produce nothing of interest for VRP, so mark
6381 their outputs varying and prevent further simulation. */
6382 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6383 set_value_range_to_varying (get_value_range (def));
6384
6385 return SSA_PROP_VARYING;
6386 }
6387
6388
6389 /* Meet operation for value ranges. Given two value ranges VR0 and
6390 VR1, store in VR0 a range that contains both VR0 and VR1. This
6391 may not be the smallest possible such range. */
6392
6393 static void
6394 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6395 {
6396 if (vr0->type == VR_UNDEFINED)
6397 {
6398 copy_value_range (vr0, vr1);
6399 return;
6400 }
6401
6402 if (vr1->type == VR_UNDEFINED)
6403 {
6404 /* Nothing to do. VR0 already has the resulting range. */
6405 return;
6406 }
6407
6408 if (vr0->type == VR_VARYING)
6409 {
6410 /* Nothing to do. VR0 already has the resulting range. */
6411 return;
6412 }
6413
6414 if (vr1->type == VR_VARYING)
6415 {
6416 set_value_range_to_varying (vr0);
6417 return;
6418 }
6419
6420 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6421 {
6422 int cmp;
6423 tree min, max;
6424
6425 /* Compute the convex hull of the ranges. The lower limit of
6426 the new range is the minimum of the two ranges. If they
6427 cannot be compared, then give up. */
6428 cmp = compare_values (vr0->min, vr1->min);
6429 if (cmp == 0 || cmp == 1)
6430 min = vr1->min;
6431 else if (cmp == -1)
6432 min = vr0->min;
6433 else
6434 goto give_up;
6435
6436 /* Similarly, the upper limit of the new range is the maximum
6437 of the two ranges. If they cannot be compared, then
6438 give up. */
6439 cmp = compare_values (vr0->max, vr1->max);
6440 if (cmp == 0 || cmp == -1)
6441 max = vr1->max;
6442 else if (cmp == 1)
6443 max = vr0->max;
6444 else
6445 goto give_up;
6446
6447 /* Check for useless ranges. */
6448 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6449 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6450 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6451 goto give_up;
6452
6453 /* The resulting set of equivalences is the intersection of
6454 the two sets. */
6455 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6456 bitmap_and_into (vr0->equiv, vr1->equiv);
6457 else if (vr0->equiv && !vr1->equiv)
6458 bitmap_clear (vr0->equiv);
6459
6460 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6461 }
6462 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6463 {
6464 /* Two anti-ranges meet only if their complements intersect.
6465 Only handle the case of identical ranges. */
6466 if (compare_values (vr0->min, vr1->min) == 0
6467 && compare_values (vr0->max, vr1->max) == 0
6468 && compare_values (vr0->min, vr0->max) == 0)
6469 {
6470 /* The resulting set of equivalences is the intersection of
6471 the two sets. */
6472 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6473 bitmap_and_into (vr0->equiv, vr1->equiv);
6474 else if (vr0->equiv && !vr1->equiv)
6475 bitmap_clear (vr0->equiv);
6476 }
6477 else
6478 goto give_up;
6479 }
6480 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6481 {
6482 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6483 only handle the case where the ranges have an empty intersection.
6484 The result of the meet operation is the anti-range. */
6485 if (!symbolic_range_p (vr0)
6486 && !symbolic_range_p (vr1)
6487 && !value_ranges_intersect_p (vr0, vr1))
6488 {
6489 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6490 set. We need to compute the intersection of the two
6491 equivalence sets. */
6492 if (vr1->type == VR_ANTI_RANGE)
6493 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6494
6495 /* The resulting set of equivalences is the intersection of
6496 the two sets. */
6497 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6498 bitmap_and_into (vr0->equiv, vr1->equiv);
6499 else if (vr0->equiv && !vr1->equiv)
6500 bitmap_clear (vr0->equiv);
6501 }
6502 else
6503 goto give_up;
6504 }
6505 else
6506 gcc_unreachable ();
6507
6508 return;
6509
6510 give_up:
6511 /* Failed to find an efficient meet. Before giving up and setting
6512 the result to VARYING, see if we can at least derive a useful
6513 anti-range. FIXME, all this nonsense about distinguishing
6514 anti-ranges from ranges is necessary because of the odd
6515 semantics of range_includes_zero_p and friends. */
6516 if (!symbolic_range_p (vr0)
6517 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6518 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6519 && !symbolic_range_p (vr1)
6520 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6521 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6522 {
6523 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6524
6525 /* Since this meet operation did not result from the meeting of
6526 two equivalent names, VR0 cannot have any equivalences. */
6527 if (vr0->equiv)
6528 bitmap_clear (vr0->equiv);
6529 }
6530 else
6531 set_value_range_to_varying (vr0);
6532 }
6533
6534
6535 /* Visit all arguments for PHI node PHI that flow through executable
6536 edges. If a valid value range can be derived from all the incoming
6537 value ranges, set a new range for the LHS of PHI. */
6538
6539 static enum ssa_prop_result
6540 vrp_visit_phi_node (gimple phi)
6541 {
6542 size_t i;
6543 tree lhs = PHI_RESULT (phi);
6544 value_range_t *lhs_vr = get_value_range (lhs);
6545 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6546 int edges, old_edges;
6547 struct loop *l;
6548
6549 if (dump_file && (dump_flags & TDF_DETAILS))
6550 {
6551 fprintf (dump_file, "\nVisiting PHI node: ");
6552 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6553 }
6554
6555 edges = 0;
6556 for (i = 0; i < gimple_phi_num_args (phi); i++)
6557 {
6558 edge e = gimple_phi_arg_edge (phi, i);
6559
6560 if (dump_file && (dump_flags & TDF_DETAILS))
6561 {
6562 fprintf (dump_file,
6563 "\n Argument #%d (%d -> %d %sexecutable)\n",
6564 (int) i, e->src->index, e->dest->index,
6565 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6566 }
6567
6568 if (e->flags & EDGE_EXECUTABLE)
6569 {
6570 tree arg = PHI_ARG_DEF (phi, i);
6571 value_range_t vr_arg;
6572
6573 ++edges;
6574
6575 if (TREE_CODE (arg) == SSA_NAME)
6576 {
6577 vr_arg = *(get_value_range (arg));
6578 }
6579 else
6580 {
6581 if (is_overflow_infinity (arg))
6582 {
6583 arg = copy_node (arg);
6584 TREE_OVERFLOW (arg) = 0;
6585 }
6586
6587 vr_arg.type = VR_RANGE;
6588 vr_arg.min = arg;
6589 vr_arg.max = arg;
6590 vr_arg.equiv = NULL;
6591 }
6592
6593 if (dump_file && (dump_flags & TDF_DETAILS))
6594 {
6595 fprintf (dump_file, "\t");
6596 print_generic_expr (dump_file, arg, dump_flags);
6597 fprintf (dump_file, "\n\tValue: ");
6598 dump_value_range (dump_file, &vr_arg);
6599 fprintf (dump_file, "\n");
6600 }
6601
6602 vrp_meet (&vr_result, &vr_arg);
6603
6604 if (vr_result.type == VR_VARYING)
6605 break;
6606 }
6607 }
6608
6609 if (vr_result.type == VR_VARYING)
6610 goto varying;
6611
6612 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6613 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6614
6615 /* To prevent infinite iterations in the algorithm, derive ranges
6616 when the new value is slightly bigger or smaller than the
6617 previous one. We don't do this if we have seen a new executable
6618 edge; this helps us avoid an overflow infinity for conditionals
6619 which are not in a loop. */
6620 if (edges > 0
6621 && edges == old_edges)
6622 {
6623 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6624 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6625
6626 /* For non VR_RANGE or for pointers fall back to varying if
6627 the range changed. */
6628 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
6629 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6630 && (cmp_min != 0 || cmp_max != 0))
6631 goto varying;
6632
6633 /* If the new minimum is smaller or larger than the previous
6634 one, go all the way to -INF. In the first case, to avoid
6635 iterating millions of times to reach -INF, and in the
6636 other case to avoid infinite bouncing between different
6637 minimums. */
6638 if (cmp_min > 0 || cmp_min < 0)
6639 {
6640 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6641 || !vrp_var_may_overflow (lhs, phi))
6642 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6643 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6644 vr_result.min =
6645 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6646 }
6647
6648 /* Similarly, if the new maximum is smaller or larger than
6649 the previous one, go all the way to +INF. */
6650 if (cmp_max < 0 || cmp_max > 0)
6651 {
6652 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6653 || !vrp_var_may_overflow (lhs, phi))
6654 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6655 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6656 vr_result.max =
6657 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6658 }
6659
6660 /* If we dropped either bound to +-INF then if this is a loop
6661 PHI node SCEV may known more about its value-range. */
6662 if ((cmp_min > 0 || cmp_min < 0
6663 || cmp_max < 0 || cmp_max > 0)
6664 && current_loops
6665 && (l = loop_containing_stmt (phi))
6666 && l->header == gimple_bb (phi))
6667 adjust_range_with_scev (&vr_result, l, phi, lhs);
6668
6669 /* If we will end up with a (-INF, +INF) range, set it to
6670 VARYING. Same if the previous max value was invalid for
6671 the type and we end up with vr_result.min > vr_result.max. */
6672 if ((vrp_val_is_max (vr_result.max)
6673 && vrp_val_is_min (vr_result.min))
6674 || compare_values (vr_result.min,
6675 vr_result.max) > 0)
6676 goto varying;
6677 }
6678
6679 /* If the new range is different than the previous value, keep
6680 iterating. */
6681 if (update_value_range (lhs, &vr_result))
6682 {
6683 if (dump_file && (dump_flags & TDF_DETAILS))
6684 {
6685 fprintf (dump_file, "Found new range for ");
6686 print_generic_expr (dump_file, lhs, 0);
6687 fprintf (dump_file, ": ");
6688 dump_value_range (dump_file, &vr_result);
6689 fprintf (dump_file, "\n\n");
6690 }
6691
6692 return SSA_PROP_INTERESTING;
6693 }
6694
6695 /* Nothing changed, don't add outgoing edges. */
6696 return SSA_PROP_NOT_INTERESTING;
6697
6698 /* No match found. Set the LHS to VARYING. */
6699 varying:
6700 set_value_range_to_varying (lhs_vr);
6701 return SSA_PROP_VARYING;
6702 }
6703
6704 /* Simplify boolean operations if the source is known
6705 to be already a boolean. */
6706 static bool
6707 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6708 {
6709 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6710 tree val = NULL;
6711 tree op0, op1;
6712 value_range_t *vr;
6713 bool sop = false;
6714 bool need_conversion;
6715
6716 op0 = gimple_assign_rhs1 (stmt);
6717 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1)
6718 {
6719 if (TREE_CODE (op0) != SSA_NAME)
6720 return false;
6721 vr = get_value_range (op0);
6722
6723 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6724 if (!val || !integer_onep (val))
6725 return false;
6726
6727 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6728 if (!val || !integer_onep (val))
6729 return false;
6730 }
6731
6732 if (rhs_code == TRUTH_NOT_EXPR)
6733 {
6734 rhs_code = NE_EXPR;
6735 op1 = build_int_cst (TREE_TYPE (op0), 1);
6736 }
6737 else
6738 {
6739 op1 = gimple_assign_rhs2 (stmt);
6740
6741 /* Reduce number of cases to handle. */
6742 if (is_gimple_min_invariant (op1))
6743 {
6744 /* Exclude anything that should have been already folded. */
6745 if (rhs_code != EQ_EXPR
6746 && rhs_code != NE_EXPR
6747 && rhs_code != TRUTH_XOR_EXPR)
6748 return false;
6749
6750 if (!integer_zerop (op1)
6751 && !integer_onep (op1)
6752 && !integer_all_onesp (op1))
6753 return false;
6754
6755 /* Limit the number of cases we have to consider. */
6756 if (rhs_code == EQ_EXPR)
6757 {
6758 rhs_code = NE_EXPR;
6759 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
6760 }
6761 }
6762 else
6763 {
6764 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6765 if (rhs_code == EQ_EXPR)
6766 return false;
6767
6768 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1)
6769 {
6770 vr = get_value_range (op1);
6771 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6772 if (!val || !integer_onep (val))
6773 return false;
6774
6775 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6776 if (!val || !integer_onep (val))
6777 return false;
6778 }
6779 }
6780 }
6781
6782 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6783 {
6784 location_t location;
6785
6786 if (!gimple_has_location (stmt))
6787 location = input_location;
6788 else
6789 location = gimple_location (stmt);
6790
6791 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
6792 warning_at (location, OPT_Wstrict_overflow,
6793 _("assuming signed overflow does not occur when "
6794 "simplifying && or || to & or |"));
6795 else
6796 warning_at (location, OPT_Wstrict_overflow,
6797 _("assuming signed overflow does not occur when "
6798 "simplifying ==, != or ! to identity or ^"));
6799 }
6800
6801 need_conversion =
6802 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
6803 TREE_TYPE (op0));
6804
6805 /* Make sure to not sign-extend -1 as a boolean value. */
6806 if (need_conversion
6807 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6808 && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
6809 return false;
6810
6811 switch (rhs_code)
6812 {
6813 case TRUTH_AND_EXPR:
6814 rhs_code = BIT_AND_EXPR;
6815 break;
6816 case TRUTH_OR_EXPR:
6817 rhs_code = BIT_IOR_EXPR;
6818 break;
6819 case TRUTH_XOR_EXPR:
6820 case NE_EXPR:
6821 if (integer_zerop (op1))
6822 {
6823 gimple_assign_set_rhs_with_ops (gsi,
6824 need_conversion ? NOP_EXPR : SSA_NAME,
6825 op0, NULL);
6826 update_stmt (gsi_stmt (*gsi));
6827 return true;
6828 }
6829
6830 rhs_code = BIT_XOR_EXPR;
6831 break;
6832 default:
6833 gcc_unreachable ();
6834 }
6835
6836 if (need_conversion)
6837 return false;
6838
6839 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1);
6840 update_stmt (gsi_stmt (*gsi));
6841 return true;
6842 }
6843
6844 /* Simplify a division or modulo operator to a right shift or
6845 bitwise and if the first operand is unsigned or is greater
6846 than zero and the second operand is an exact power of two. */
6847
6848 static bool
6849 simplify_div_or_mod_using_ranges (gimple stmt)
6850 {
6851 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6852 tree val = NULL;
6853 tree op0 = gimple_assign_rhs1 (stmt);
6854 tree op1 = gimple_assign_rhs2 (stmt);
6855 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6856
6857 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6858 {
6859 val = integer_one_node;
6860 }
6861 else
6862 {
6863 bool sop = false;
6864
6865 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6866
6867 if (val
6868 && sop
6869 && integer_onep (val)
6870 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6871 {
6872 location_t location;
6873
6874 if (!gimple_has_location (stmt))
6875 location = input_location;
6876 else
6877 location = gimple_location (stmt);
6878 warning_at (location, OPT_Wstrict_overflow,
6879 "assuming signed overflow does not occur when "
6880 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6881 }
6882 }
6883
6884 if (val && integer_onep (val))
6885 {
6886 tree t;
6887
6888 if (rhs_code == TRUNC_DIV_EXPR)
6889 {
6890 t = build_int_cst (NULL_TREE, tree_log2 (op1));
6891 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6892 gimple_assign_set_rhs1 (stmt, op0);
6893 gimple_assign_set_rhs2 (stmt, t);
6894 }
6895 else
6896 {
6897 t = build_int_cst (TREE_TYPE (op1), 1);
6898 t = int_const_binop (MINUS_EXPR, op1, t, 0);
6899 t = fold_convert (TREE_TYPE (op0), t);
6900
6901 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6902 gimple_assign_set_rhs1 (stmt, op0);
6903 gimple_assign_set_rhs2 (stmt, t);
6904 }
6905
6906 update_stmt (stmt);
6907 return true;
6908 }
6909
6910 return false;
6911 }
6912
6913 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6914 ABS_EXPR. If the operand is <= 0, then simplify the
6915 ABS_EXPR into a NEGATE_EXPR. */
6916
6917 static bool
6918 simplify_abs_using_ranges (gimple stmt)
6919 {
6920 tree val = NULL;
6921 tree op = gimple_assign_rhs1 (stmt);
6922 tree type = TREE_TYPE (op);
6923 value_range_t *vr = get_value_range (op);
6924
6925 if (TYPE_UNSIGNED (type))
6926 {
6927 val = integer_zero_node;
6928 }
6929 else if (vr)
6930 {
6931 bool sop = false;
6932
6933 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6934 if (!val)
6935 {
6936 sop = false;
6937 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6938 &sop);
6939
6940 if (val)
6941 {
6942 if (integer_zerop (val))
6943 val = integer_one_node;
6944 else if (integer_onep (val))
6945 val = integer_zero_node;
6946 }
6947 }
6948
6949 if (val
6950 && (integer_onep (val) || integer_zerop (val)))
6951 {
6952 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6953 {
6954 location_t location;
6955
6956 if (!gimple_has_location (stmt))
6957 location = input_location;
6958 else
6959 location = gimple_location (stmt);
6960 warning_at (location, OPT_Wstrict_overflow,
6961 "assuming signed overflow does not occur when "
6962 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
6963 }
6964
6965 gimple_assign_set_rhs1 (stmt, op);
6966 if (integer_onep (val))
6967 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
6968 else
6969 gimple_assign_set_rhs_code (stmt, SSA_NAME);
6970 update_stmt (stmt);
6971 return true;
6972 }
6973 }
6974
6975 return false;
6976 }
6977
6978 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
6979 If all the bits that are being cleared by & are already
6980 known to be zero from VR, or all the bits that are being
6981 set by | are already known to be one from VR, the bit
6982 operation is redundant. */
6983
6984 static bool
6985 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6986 {
6987 tree op0 = gimple_assign_rhs1 (stmt);
6988 tree op1 = gimple_assign_rhs2 (stmt);
6989 tree op = NULL_TREE;
6990 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6991 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6992 double_int may_be_nonzero0, may_be_nonzero1;
6993 double_int must_be_nonzero0, must_be_nonzero1;
6994 double_int mask;
6995
6996 if (TREE_CODE (op0) == SSA_NAME)
6997 vr0 = *(get_value_range (op0));
6998 else if (is_gimple_min_invariant (op0))
6999 set_value_range_to_value (&vr0, op0, NULL);
7000 else
7001 return false;
7002
7003 if (TREE_CODE (op1) == SSA_NAME)
7004 vr1 = *(get_value_range (op1));
7005 else if (is_gimple_min_invariant (op1))
7006 set_value_range_to_value (&vr1, op1, NULL);
7007 else
7008 return false;
7009
7010 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
7011 return false;
7012 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
7013 return false;
7014
7015 switch (gimple_assign_rhs_code (stmt))
7016 {
7017 case BIT_AND_EXPR:
7018 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7019 if (double_int_zero_p (mask))
7020 {
7021 op = op0;
7022 break;
7023 }
7024 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7025 if (double_int_zero_p (mask))
7026 {
7027 op = op1;
7028 break;
7029 }
7030 break;
7031 case BIT_IOR_EXPR:
7032 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7033 if (double_int_zero_p (mask))
7034 {
7035 op = op1;
7036 break;
7037 }
7038 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7039 if (double_int_zero_p (mask))
7040 {
7041 op = op0;
7042 break;
7043 }
7044 break;
7045 default:
7046 gcc_unreachable ();
7047 }
7048
7049 if (op == NULL_TREE)
7050 return false;
7051
7052 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
7053 update_stmt (gsi_stmt (*gsi));
7054 return true;
7055 }
7056
7057 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7058 a known value range VR.
7059
7060 If there is one and only one value which will satisfy the
7061 conditional, then return that value. Else return NULL. */
7062
7063 static tree
7064 test_for_singularity (enum tree_code cond_code, tree op0,
7065 tree op1, value_range_t *vr)
7066 {
7067 tree min = NULL;
7068 tree max = NULL;
7069
7070 /* Extract minimum/maximum values which satisfy the
7071 the conditional as it was written. */
7072 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
7073 {
7074 /* This should not be negative infinity; there is no overflow
7075 here. */
7076 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
7077
7078 max = op1;
7079 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
7080 {
7081 tree one = build_int_cst (TREE_TYPE (op0), 1);
7082 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
7083 if (EXPR_P (max))
7084 TREE_NO_WARNING (max) = 1;
7085 }
7086 }
7087 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
7088 {
7089 /* This should not be positive infinity; there is no overflow
7090 here. */
7091 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
7092
7093 min = op1;
7094 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
7095 {
7096 tree one = build_int_cst (TREE_TYPE (op0), 1);
7097 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
7098 if (EXPR_P (min))
7099 TREE_NO_WARNING (min) = 1;
7100 }
7101 }
7102
7103 /* Now refine the minimum and maximum values using any
7104 value range information we have for op0. */
7105 if (min && max)
7106 {
7107 if (compare_values (vr->min, min) == 1)
7108 min = vr->min;
7109 if (compare_values (vr->max, max) == -1)
7110 max = vr->max;
7111
7112 /* If the new min/max values have converged to a single value,
7113 then there is only one value which can satisfy the condition,
7114 return that value. */
7115 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7116 return min;
7117 }
7118 return NULL;
7119 }
7120
7121 /* Simplify a conditional using a relational operator to an equality
7122 test if the range information indicates only one value can satisfy
7123 the original conditional. */
7124
7125 static bool
7126 simplify_cond_using_ranges (gimple stmt)
7127 {
7128 tree op0 = gimple_cond_lhs (stmt);
7129 tree op1 = gimple_cond_rhs (stmt);
7130 enum tree_code cond_code = gimple_cond_code (stmt);
7131
7132 if (cond_code != NE_EXPR
7133 && cond_code != EQ_EXPR
7134 && TREE_CODE (op0) == SSA_NAME
7135 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
7136 && is_gimple_min_invariant (op1))
7137 {
7138 value_range_t *vr = get_value_range (op0);
7139
7140 /* If we have range information for OP0, then we might be
7141 able to simplify this conditional. */
7142 if (vr->type == VR_RANGE)
7143 {
7144 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
7145
7146 if (new_tree)
7147 {
7148 if (dump_file)
7149 {
7150 fprintf (dump_file, "Simplified relational ");
7151 print_gimple_stmt (dump_file, stmt, 0, 0);
7152 fprintf (dump_file, " into ");
7153 }
7154
7155 gimple_cond_set_code (stmt, EQ_EXPR);
7156 gimple_cond_set_lhs (stmt, op0);
7157 gimple_cond_set_rhs (stmt, new_tree);
7158
7159 update_stmt (stmt);
7160
7161 if (dump_file)
7162 {
7163 print_gimple_stmt (dump_file, stmt, 0, 0);
7164 fprintf (dump_file, "\n");
7165 }
7166
7167 return true;
7168 }
7169
7170 /* Try again after inverting the condition. We only deal
7171 with integral types here, so no need to worry about
7172 issues with inverting FP comparisons. */
7173 cond_code = invert_tree_comparison (cond_code, false);
7174 new_tree = test_for_singularity (cond_code, op0, op1, vr);
7175
7176 if (new_tree)
7177 {
7178 if (dump_file)
7179 {
7180 fprintf (dump_file, "Simplified relational ");
7181 print_gimple_stmt (dump_file, stmt, 0, 0);
7182 fprintf (dump_file, " into ");
7183 }
7184
7185 gimple_cond_set_code (stmt, NE_EXPR);
7186 gimple_cond_set_lhs (stmt, op0);
7187 gimple_cond_set_rhs (stmt, new_tree);
7188
7189 update_stmt (stmt);
7190
7191 if (dump_file)
7192 {
7193 print_gimple_stmt (dump_file, stmt, 0, 0);
7194 fprintf (dump_file, "\n");
7195 }
7196
7197 return true;
7198 }
7199 }
7200 }
7201
7202 return false;
7203 }
7204
7205 /* Simplify a switch statement using the value range of the switch
7206 argument. */
7207
7208 static bool
7209 simplify_switch_using_ranges (gimple stmt)
7210 {
7211 tree op = gimple_switch_index (stmt);
7212 value_range_t *vr;
7213 bool take_default;
7214 edge e;
7215 edge_iterator ei;
7216 size_t i = 0, j = 0, n, n2;
7217 tree vec2;
7218 switch_update su;
7219
7220 if (TREE_CODE (op) == SSA_NAME)
7221 {
7222 vr = get_value_range (op);
7223
7224 /* We can only handle integer ranges. */
7225 if (vr->type != VR_RANGE
7226 || symbolic_range_p (vr))
7227 return false;
7228
7229 /* Find case label for min/max of the value range. */
7230 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7231 }
7232 else if (TREE_CODE (op) == INTEGER_CST)
7233 {
7234 take_default = !find_case_label_index (stmt, 1, op, &i);
7235 if (take_default)
7236 {
7237 i = 1;
7238 j = 0;
7239 }
7240 else
7241 {
7242 j = i;
7243 }
7244 }
7245 else
7246 return false;
7247
7248 n = gimple_switch_num_labels (stmt);
7249
7250 /* Bail out if this is just all edges taken. */
7251 if (i == 1
7252 && j == n - 1
7253 && take_default)
7254 return false;
7255
7256 /* Build a new vector of taken case labels. */
7257 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7258 n2 = 0;
7259
7260 /* Add the default edge, if necessary. */
7261 if (take_default)
7262 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7263
7264 for (; i <= j; ++i, ++n2)
7265 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7266
7267 /* Mark needed edges. */
7268 for (i = 0; i < n2; ++i)
7269 {
7270 e = find_edge (gimple_bb (stmt),
7271 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7272 e->aux = (void *)-1;
7273 }
7274
7275 /* Queue not needed edges for later removal. */
7276 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7277 {
7278 if (e->aux == (void *)-1)
7279 {
7280 e->aux = NULL;
7281 continue;
7282 }
7283
7284 if (dump_file && (dump_flags & TDF_DETAILS))
7285 {
7286 fprintf (dump_file, "removing unreachable case label\n");
7287 }
7288 VEC_safe_push (edge, heap, to_remove_edges, e);
7289 e->flags &= ~EDGE_EXECUTABLE;
7290 }
7291
7292 /* And queue an update for the stmt. */
7293 su.stmt = stmt;
7294 su.vec = vec2;
7295 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7296 return false;
7297 }
7298
7299 /* Simplify STMT using ranges if possible. */
7300
7301 static bool
7302 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7303 {
7304 gimple stmt = gsi_stmt (*gsi);
7305 if (is_gimple_assign (stmt))
7306 {
7307 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7308
7309 switch (rhs_code)
7310 {
7311 case EQ_EXPR:
7312 case NE_EXPR:
7313 case TRUTH_NOT_EXPR:
7314 case TRUTH_AND_EXPR:
7315 case TRUTH_OR_EXPR:
7316 case TRUTH_XOR_EXPR:
7317 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
7318 or identity if the RHS is zero or one, and the LHS are known
7319 to be boolean values. Transform all TRUTH_*_EXPR into
7320 BIT_*_EXPR if both arguments are known to be boolean values. */
7321 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7322 return simplify_truth_ops_using_ranges (gsi, stmt);
7323 break;
7324
7325 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7326 and BIT_AND_EXPR respectively if the first operand is greater
7327 than zero and the second operand is an exact power of two. */
7328 case TRUNC_DIV_EXPR:
7329 case TRUNC_MOD_EXPR:
7330 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))
7331 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7332 return simplify_div_or_mod_using_ranges (stmt);
7333 break;
7334
7335 /* Transform ABS (X) into X or -X as appropriate. */
7336 case ABS_EXPR:
7337 if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
7338 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7339 return simplify_abs_using_ranges (stmt);
7340 break;
7341
7342 case BIT_AND_EXPR:
7343 case BIT_IOR_EXPR:
7344 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7345 if all the bits being cleared are already cleared or
7346 all the bits being set are already set. */
7347 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7348 return simplify_bit_ops_using_ranges (gsi, stmt);
7349 break;
7350
7351 default:
7352 break;
7353 }
7354 }
7355 else if (gimple_code (stmt) == GIMPLE_COND)
7356 return simplify_cond_using_ranges (stmt);
7357 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7358 return simplify_switch_using_ranges (stmt);
7359
7360 return false;
7361 }
7362
7363 /* If the statement pointed by SI has a predicate whose value can be
7364 computed using the value range information computed by VRP, compute
7365 its value and return true. Otherwise, return false. */
7366
7367 static bool
7368 fold_predicate_in (gimple_stmt_iterator *si)
7369 {
7370 bool assignment_p = false;
7371 tree val;
7372 gimple stmt = gsi_stmt (*si);
7373
7374 if (is_gimple_assign (stmt)
7375 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7376 {
7377 assignment_p = true;
7378 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7379 gimple_assign_rhs1 (stmt),
7380 gimple_assign_rhs2 (stmt),
7381 stmt);
7382 }
7383 else if (gimple_code (stmt) == GIMPLE_COND)
7384 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7385 gimple_cond_lhs (stmt),
7386 gimple_cond_rhs (stmt),
7387 stmt);
7388 else
7389 return false;
7390
7391 if (val)
7392 {
7393 if (assignment_p)
7394 val = fold_convert (gimple_expr_type (stmt), val);
7395
7396 if (dump_file)
7397 {
7398 fprintf (dump_file, "Folding predicate ");
7399 print_gimple_expr (dump_file, stmt, 0, 0);
7400 fprintf (dump_file, " to ");
7401 print_generic_expr (dump_file, val, 0);
7402 fprintf (dump_file, "\n");
7403 }
7404
7405 if (is_gimple_assign (stmt))
7406 gimple_assign_set_rhs_from_tree (si, val);
7407 else
7408 {
7409 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7410 if (integer_zerop (val))
7411 gimple_cond_make_false (stmt);
7412 else if (integer_onep (val))
7413 gimple_cond_make_true (stmt);
7414 else
7415 gcc_unreachable ();
7416 }
7417
7418 return true;
7419 }
7420
7421 return false;
7422 }
7423
7424 /* Callback for substitute_and_fold folding the stmt at *SI. */
7425
7426 static bool
7427 vrp_fold_stmt (gimple_stmt_iterator *si)
7428 {
7429 if (fold_predicate_in (si))
7430 return true;
7431
7432 return simplify_stmt_using_ranges (si);
7433 }
7434
7435 /* Stack of dest,src equivalency pairs that need to be restored after
7436 each attempt to thread a block's incoming edge to an outgoing edge.
7437
7438 A NULL entry is used to mark the end of pairs which need to be
7439 restored. */
7440 static VEC(tree,heap) *stack;
7441
7442 /* A trivial wrapper so that we can present the generic jump threading
7443 code with a simple API for simplifying statements. STMT is the
7444 statement we want to simplify, WITHIN_STMT provides the location
7445 for any overflow warnings. */
7446
7447 static tree
7448 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7449 {
7450 /* We only use VRP information to simplify conditionals. This is
7451 overly conservative, but it's unclear if doing more would be
7452 worth the compile time cost. */
7453 if (gimple_code (stmt) != GIMPLE_COND)
7454 return NULL;
7455
7456 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7457 gimple_cond_lhs (stmt),
7458 gimple_cond_rhs (stmt), within_stmt);
7459 }
7460
7461 /* Blocks which have more than one predecessor and more than
7462 one successor present jump threading opportunities, i.e.,
7463 when the block is reached from a specific predecessor, we
7464 may be able to determine which of the outgoing edges will
7465 be traversed. When this optimization applies, we are able
7466 to avoid conditionals at runtime and we may expose secondary
7467 optimization opportunities.
7468
7469 This routine is effectively a driver for the generic jump
7470 threading code. It basically just presents the generic code
7471 with edges that may be suitable for jump threading.
7472
7473 Unlike DOM, we do not iterate VRP if jump threading was successful.
7474 While iterating may expose new opportunities for VRP, it is expected
7475 those opportunities would be very limited and the compile time cost
7476 to expose those opportunities would be significant.
7477
7478 As jump threading opportunities are discovered, they are registered
7479 for later realization. */
7480
7481 static void
7482 identify_jump_threads (void)
7483 {
7484 basic_block bb;
7485 gimple dummy;
7486 int i;
7487 edge e;
7488
7489 /* Ugh. When substituting values earlier in this pass we can
7490 wipe the dominance information. So rebuild the dominator
7491 information as we need it within the jump threading code. */
7492 calculate_dominance_info (CDI_DOMINATORS);
7493
7494 /* We do not allow VRP information to be used for jump threading
7495 across a back edge in the CFG. Otherwise it becomes too
7496 difficult to avoid eliminating loop exit tests. Of course
7497 EDGE_DFS_BACK is not accurate at this time so we have to
7498 recompute it. */
7499 mark_dfs_back_edges ();
7500
7501 /* Do not thread across edges we are about to remove. Just marking
7502 them as EDGE_DFS_BACK will do. */
7503 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7504 e->flags |= EDGE_DFS_BACK;
7505
7506 /* Allocate our unwinder stack to unwind any temporary equivalences
7507 that might be recorded. */
7508 stack = VEC_alloc (tree, heap, 20);
7509
7510 /* To avoid lots of silly node creation, we create a single
7511 conditional and just modify it in-place when attempting to
7512 thread jumps. */
7513 dummy = gimple_build_cond (EQ_EXPR,
7514 integer_zero_node, integer_zero_node,
7515 NULL, NULL);
7516
7517 /* Walk through all the blocks finding those which present a
7518 potential jump threading opportunity. We could set this up
7519 as a dominator walker and record data during the walk, but
7520 I doubt it's worth the effort for the classes of jump
7521 threading opportunities we are trying to identify at this
7522 point in compilation. */
7523 FOR_EACH_BB (bb)
7524 {
7525 gimple last;
7526
7527 /* If the generic jump threading code does not find this block
7528 interesting, then there is nothing to do. */
7529 if (! potentially_threadable_block (bb))
7530 continue;
7531
7532 /* We only care about blocks ending in a COND_EXPR. While there
7533 may be some value in handling SWITCH_EXPR here, I doubt it's
7534 terribly important. */
7535 last = gsi_stmt (gsi_last_bb (bb));
7536 if (gimple_code (last) != GIMPLE_COND)
7537 continue;
7538
7539 /* We're basically looking for any kind of conditional with
7540 integral type arguments. */
7541 if (TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7542 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7543 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7544 || is_gimple_min_invariant (gimple_cond_rhs (last)))
7545 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last))))
7546 {
7547 edge_iterator ei;
7548
7549 /* We've got a block with multiple predecessors and multiple
7550 successors which also ends in a suitable conditional. For
7551 each predecessor, see if we can thread it to a specific
7552 successor. */
7553 FOR_EACH_EDGE (e, ei, bb->preds)
7554 {
7555 /* Do not thread across back edges or abnormal edges
7556 in the CFG. */
7557 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7558 continue;
7559
7560 thread_across_edge (dummy, e, true, &stack,
7561 simplify_stmt_for_jump_threading);
7562 }
7563 }
7564 }
7565
7566 /* We do not actually update the CFG or SSA graphs at this point as
7567 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7568 handle ASSERT_EXPRs gracefully. */
7569 }
7570
7571 /* We identified all the jump threading opportunities earlier, but could
7572 not transform the CFG at that time. This routine transforms the
7573 CFG and arranges for the dominator tree to be rebuilt if necessary.
7574
7575 Note the SSA graph update will occur during the normal TODO
7576 processing by the pass manager. */
7577 static void
7578 finalize_jump_threads (void)
7579 {
7580 thread_through_all_blocks (false);
7581 VEC_free (tree, heap, stack);
7582 }
7583
7584
7585 /* Traverse all the blocks folding conditionals with known ranges. */
7586
7587 static void
7588 vrp_finalize (void)
7589 {
7590 size_t i;
7591 unsigned num = num_ssa_names;
7592
7593 if (dump_file)
7594 {
7595 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7596 dump_all_value_ranges (dump_file);
7597 fprintf (dump_file, "\n");
7598 }
7599
7600 substitute_and_fold (op_with_constant_singleton_value_range,
7601 vrp_fold_stmt, false);
7602
7603 if (warn_array_bounds)
7604 check_all_array_refs ();
7605
7606 /* We must identify jump threading opportunities before we release
7607 the datastructures built by VRP. */
7608 identify_jump_threads ();
7609
7610 /* Free allocated memory. */
7611 for (i = 0; i < num; i++)
7612 if (vr_value[i])
7613 {
7614 BITMAP_FREE (vr_value[i]->equiv);
7615 free (vr_value[i]);
7616 }
7617
7618 free (vr_value);
7619 free (vr_phi_edge_counts);
7620
7621 /* So that we can distinguish between VRP data being available
7622 and not available. */
7623 vr_value = NULL;
7624 vr_phi_edge_counts = NULL;
7625 }
7626
7627
7628 /* Main entry point to VRP (Value Range Propagation). This pass is
7629 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7630 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7631 Programming Language Design and Implementation, pp. 67-78, 1995.
7632 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7633
7634 This is essentially an SSA-CCP pass modified to deal with ranges
7635 instead of constants.
7636
7637 While propagating ranges, we may find that two or more SSA name
7638 have equivalent, though distinct ranges. For instance,
7639
7640 1 x_9 = p_3->a;
7641 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7642 3 if (p_4 == q_2)
7643 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7644 5 endif
7645 6 if (q_2)
7646
7647 In the code above, pointer p_5 has range [q_2, q_2], but from the
7648 code we can also determine that p_5 cannot be NULL and, if q_2 had
7649 a non-varying range, p_5's range should also be compatible with it.
7650
7651 These equivalences are created by two expressions: ASSERT_EXPR and
7652 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7653 result of another assertion, then we can use the fact that p_5 and
7654 p_4 are equivalent when evaluating p_5's range.
7655
7656 Together with value ranges, we also propagate these equivalences
7657 between names so that we can take advantage of information from
7658 multiple ranges when doing final replacement. Note that this
7659 equivalency relation is transitive but not symmetric.
7660
7661 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7662 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7663 in contexts where that assertion does not hold (e.g., in line 6).
7664
7665 TODO, the main difference between this pass and Patterson's is that
7666 we do not propagate edge probabilities. We only compute whether
7667 edges can be taken or not. That is, instead of having a spectrum
7668 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7669 DON'T KNOW. In the future, it may be worthwhile to propagate
7670 probabilities to aid branch prediction. */
7671
7672 static unsigned int
7673 execute_vrp (void)
7674 {
7675 int i;
7676 edge e;
7677 switch_update *su;
7678
7679 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7680 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7681 scev_initialize ();
7682
7683 /* Estimate number of iterations - but do not use undefined behavior
7684 for this. We can't do this lazily as other functions may compute
7685 this using undefined behavior. */
7686 free_numbers_of_iterations_estimates ();
7687 estimate_numbers_of_iterations (false);
7688
7689 insert_range_assertions ();
7690
7691 to_remove_edges = VEC_alloc (edge, heap, 10);
7692 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7693 threadedge_initialize_values ();
7694
7695 vrp_initialize ();
7696 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7697 vrp_finalize ();
7698
7699 /* ASSERT_EXPRs must be removed before finalizing jump threads
7700 as finalizing jump threads calls the CFG cleanup code which
7701 does not properly handle ASSERT_EXPRs. */
7702 remove_range_assertions ();
7703
7704 /* If we exposed any new variables, go ahead and put them into
7705 SSA form now, before we handle jump threading. This simplifies
7706 interactions between rewriting of _DECL nodes into SSA form
7707 and rewriting SSA_NAME nodes into SSA form after block
7708 duplication and CFG manipulation. */
7709 update_ssa (TODO_update_ssa);
7710
7711 finalize_jump_threads ();
7712
7713 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7714 CFG in a broken state and requires a cfg_cleanup run. */
7715 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7716 remove_edge (e);
7717 /* Update SWITCH_EXPR case label vector. */
7718 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
7719 {
7720 size_t j;
7721 size_t n = TREE_VEC_LENGTH (su->vec);
7722 tree label;
7723 gimple_switch_set_num_labels (su->stmt, n);
7724 for (j = 0; j < n; j++)
7725 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7726 /* As we may have replaced the default label with a regular one
7727 make sure to make it a real default label again. This ensures
7728 optimal expansion. */
7729 label = gimple_switch_default_label (su->stmt);
7730 CASE_LOW (label) = NULL_TREE;
7731 CASE_HIGH (label) = NULL_TREE;
7732 }
7733
7734 if (VEC_length (edge, to_remove_edges) > 0)
7735 free_dominance_info (CDI_DOMINATORS);
7736
7737 VEC_free (edge, heap, to_remove_edges);
7738 VEC_free (switch_update, heap, to_update_switch_stmts);
7739 threadedge_finalize_values ();
7740
7741 scev_finalize ();
7742 loop_optimizer_finalize ();
7743 return 0;
7744 }
7745
7746 static bool
7747 gate_vrp (void)
7748 {
7749 return flag_tree_vrp != 0;
7750 }
7751
7752 struct gimple_opt_pass pass_vrp =
7753 {
7754 {
7755 GIMPLE_PASS,
7756 "vrp", /* name */
7757 gate_vrp, /* gate */
7758 execute_vrp, /* execute */
7759 NULL, /* sub */
7760 NULL, /* next */
7761 0, /* static_pass_number */
7762 TV_TREE_VRP, /* tv_id */
7763 PROP_ssa, /* properties_required */
7764 0, /* properties_provided */
7765 0, /* properties_destroyed */
7766 0, /* todo_flags_start */
7767 TODO_cleanup_cfg
7768 | TODO_ggc_collect
7769 | TODO_verify_ssa
7770 | TODO_dump_func
7771 | TODO_update_ssa /* todo_flags_finish */
7772 }
7773 };