C-family, Objective-C [1/3] : Implement Wobjc-root-class [PR77404].
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "flags.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "calls.h"
38 #include "cfganal.h"
39 #include "gimple-fold.h"
40 #include "tree-eh.h"
41 #include "gimple-iterator.h"
42 #include "gimple-walk.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-into-ssa.h"
48 #include "tree-ssa.h"
49 #include "cfgloop.h"
50 #include "tree-scalar-evolution.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-chrec.h"
53 #include "tree-ssa-threadupdate.h"
54 #include "tree-ssa-scopedtables.h"
55 #include "tree-ssa-threadedge.h"
56 #include "omp-general.h"
57 #include "target.h"
58 #include "case-cfn-macros.h"
59 #include "alloc-pool.h"
60 #include "domwalk.h"
61 #include "tree-cfgcleanup.h"
62 #include "stringpool.h"
63 #include "attribs.h"
64 #include "vr-values.h"
65 #include "builtins.h"
66 #include "range-op.h"
67 #include "value-range-equiv.h"
68 #include "gimple-array-bounds.h"
69
70 /* Set of SSA names found live during the RPO traversal of the function
71 for still active basic-blocks. */
72 class live_names
73 {
74 public:
75 live_names ();
76 ~live_names ();
77 void set (tree, basic_block);
78 void clear (tree, basic_block);
79 void merge (basic_block dest, basic_block src);
80 bool live_on_block_p (tree, basic_block);
81 bool live_on_edge_p (tree, edge);
82 bool block_has_live_names_p (basic_block);
83 void clear_block (basic_block);
84
85 private:
86 sbitmap *live;
87 unsigned num_blocks;
88 void init_bitmap_if_needed (basic_block);
89 };
90
91 void
92 live_names::init_bitmap_if_needed (basic_block bb)
93 {
94 unsigned i = bb->index;
95 if (!live[i])
96 {
97 live[i] = sbitmap_alloc (num_ssa_names);
98 bitmap_clear (live[i]);
99 }
100 }
101
102 bool
103 live_names::block_has_live_names_p (basic_block bb)
104 {
105 unsigned i = bb->index;
106 return live[i] && bitmap_empty_p (live[i]);
107 }
108
109 void
110 live_names::clear_block (basic_block bb)
111 {
112 unsigned i = bb->index;
113 if (live[i])
114 {
115 sbitmap_free (live[i]);
116 live[i] = NULL;
117 }
118 }
119
120 void
121 live_names::merge (basic_block dest, basic_block src)
122 {
123 init_bitmap_if_needed (dest);
124 init_bitmap_if_needed (src);
125 bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
126 }
127
128 void
129 live_names::set (tree name, basic_block bb)
130 {
131 init_bitmap_if_needed (bb);
132 bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
133 }
134
135 void
136 live_names::clear (tree name, basic_block bb)
137 {
138 unsigned i = bb->index;
139 if (live[i])
140 bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
141 }
142
143 live_names::live_names ()
144 {
145 num_blocks = last_basic_block_for_fn (cfun);
146 live = XCNEWVEC (sbitmap, num_blocks);
147 }
148
149 live_names::~live_names ()
150 {
151 for (unsigned i = 0; i < num_blocks; ++i)
152 if (live[i])
153 sbitmap_free (live[i]);
154 XDELETEVEC (live);
155 }
156
157 bool
158 live_names::live_on_block_p (tree name, basic_block bb)
159 {
160 return (live[bb->index]
161 && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
162 }
163
164 /* Return true if the SSA name NAME is live on the edge E. */
165
166 bool
167 live_names::live_on_edge_p (tree name, edge e)
168 {
169 return live_on_block_p (name, e->dest);
170 }
171
172
173 /* VR_TYPE describes a range with mininum value *MIN and maximum
174 value *MAX. Restrict the range to the set of values that have
175 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
176 return the new range type.
177
178 SGN gives the sign of the values described by the range. */
179
180 enum value_range_kind
181 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
182 wide_int *min, wide_int *max,
183 const wide_int &nonzero_bits,
184 signop sgn)
185 {
186 if (vr_type == VR_ANTI_RANGE)
187 {
188 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
189 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
190 to create an inclusive upper bound for A and an inclusive lower
191 bound for B. */
192 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
193 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
194
195 /* If the calculation of A_MAX wrapped, A is effectively empty
196 and A_MAX is the highest value that satisfies NONZERO_BITS.
197 Likewise if the calculation of B_MIN wrapped, B is effectively
198 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
199 bool a_empty = wi::ge_p (a_max, *min, sgn);
200 bool b_empty = wi::le_p (b_min, *max, sgn);
201
202 /* If both A and B are empty, there are no valid values. */
203 if (a_empty && b_empty)
204 return VR_UNDEFINED;
205
206 /* If exactly one of A or B is empty, return a VR_RANGE for the
207 other one. */
208 if (a_empty || b_empty)
209 {
210 *min = b_min;
211 *max = a_max;
212 gcc_checking_assert (wi::le_p (*min, *max, sgn));
213 return VR_RANGE;
214 }
215
216 /* Update the VR_ANTI_RANGE bounds. */
217 *min = a_max + 1;
218 *max = b_min - 1;
219 gcc_checking_assert (wi::le_p (*min, *max, sgn));
220
221 /* Now check whether the excluded range includes any values that
222 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
223 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
224 {
225 unsigned int precision = min->get_precision ();
226 *min = wi::min_value (precision, sgn);
227 *max = wi::max_value (precision, sgn);
228 vr_type = VR_RANGE;
229 }
230 }
231 if (vr_type == VR_RANGE)
232 {
233 *max = wi::round_down_for_mask (*max, nonzero_bits);
234
235 /* Check that the range contains at least one valid value. */
236 if (wi::gt_p (*min, *max, sgn))
237 return VR_UNDEFINED;
238
239 *min = wi::round_up_for_mask (*min, nonzero_bits);
240 gcc_checking_assert (wi::le_p (*min, *max, sgn));
241 }
242 return vr_type;
243 }
244
245 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
246 a singleton. */
247
248 bool
249 range_int_cst_p (const value_range *vr)
250 {
251 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
252 }
253
254 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
255 otherwise. We only handle additive operations and set NEG to true if the
256 symbol is negated and INV to the invariant part, if any. */
257
258 tree
259 get_single_symbol (tree t, bool *neg, tree *inv)
260 {
261 bool neg_;
262 tree inv_;
263
264 *inv = NULL_TREE;
265 *neg = false;
266
267 if (TREE_CODE (t) == PLUS_EXPR
268 || TREE_CODE (t) == POINTER_PLUS_EXPR
269 || TREE_CODE (t) == MINUS_EXPR)
270 {
271 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
272 {
273 neg_ = (TREE_CODE (t) == MINUS_EXPR);
274 inv_ = TREE_OPERAND (t, 0);
275 t = TREE_OPERAND (t, 1);
276 }
277 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
278 {
279 neg_ = false;
280 inv_ = TREE_OPERAND (t, 1);
281 t = TREE_OPERAND (t, 0);
282 }
283 else
284 return NULL_TREE;
285 }
286 else
287 {
288 neg_ = false;
289 inv_ = NULL_TREE;
290 }
291
292 if (TREE_CODE (t) == NEGATE_EXPR)
293 {
294 t = TREE_OPERAND (t, 0);
295 neg_ = !neg_;
296 }
297
298 if (TREE_CODE (t) != SSA_NAME)
299 return NULL_TREE;
300
301 if (inv_ && TREE_OVERFLOW_P (inv_))
302 inv_ = drop_tree_overflow (inv_);
303
304 *neg = neg_;
305 *inv = inv_;
306 return t;
307 }
308
309 /* The reverse operation: build a symbolic expression with TYPE
310 from symbol SYM, negated according to NEG, and invariant INV. */
311
312 static tree
313 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
314 {
315 const bool pointer_p = POINTER_TYPE_P (type);
316 tree t = sym;
317
318 if (neg)
319 t = build1 (NEGATE_EXPR, type, t);
320
321 if (integer_zerop (inv))
322 return t;
323
324 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
325 }
326
327 /* Return
328 1 if VAL < VAL2
329 0 if !(VAL < VAL2)
330 -2 if those are incomparable. */
331 int
332 operand_less_p (tree val, tree val2)
333 {
334 /* LT is folded faster than GE and others. Inline the common case. */
335 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
336 return tree_int_cst_lt (val, val2);
337 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
338 return val == val2 ? 0 : -2;
339 else
340 {
341 int cmp = compare_values (val, val2);
342 if (cmp == -1)
343 return 1;
344 else if (cmp == 0 || cmp == 1)
345 return 0;
346 else
347 return -2;
348 }
349
350 return 0;
351 }
352
353 /* Compare two values VAL1 and VAL2. Return
354
355 -2 if VAL1 and VAL2 cannot be compared at compile-time,
356 -1 if VAL1 < VAL2,
357 0 if VAL1 == VAL2,
358 +1 if VAL1 > VAL2, and
359 +2 if VAL1 != VAL2
360
361 This is similar to tree_int_cst_compare but supports pointer values
362 and values that cannot be compared at compile time.
363
364 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
365 true if the return value is only valid if we assume that signed
366 overflow is undefined. */
367
368 int
369 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
370 {
371 if (val1 == val2)
372 return 0;
373
374 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
375 both integers. */
376 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
377 == POINTER_TYPE_P (TREE_TYPE (val2)));
378
379 /* Convert the two values into the same type. This is needed because
380 sizetype causes sign extension even for unsigned types. */
381 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
382 val2 = fold_convert (TREE_TYPE (val1), val2);
383
384 const bool overflow_undefined
385 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
386 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
387 tree inv1, inv2;
388 bool neg1, neg2;
389 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
390 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
391
392 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
393 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
394 if (sym1 && sym2)
395 {
396 /* Both values must use the same name with the same sign. */
397 if (sym1 != sym2 || neg1 != neg2)
398 return -2;
399
400 /* [-]NAME + CST == [-]NAME + CST. */
401 if (inv1 == inv2)
402 return 0;
403
404 /* If overflow is defined we cannot simplify more. */
405 if (!overflow_undefined)
406 return -2;
407
408 if (strict_overflow_p != NULL
409 /* Symbolic range building sets TREE_NO_WARNING to declare
410 that overflow doesn't happen. */
411 && (!inv1 || !TREE_NO_WARNING (val1))
412 && (!inv2 || !TREE_NO_WARNING (val2)))
413 *strict_overflow_p = true;
414
415 if (!inv1)
416 inv1 = build_int_cst (TREE_TYPE (val1), 0);
417 if (!inv2)
418 inv2 = build_int_cst (TREE_TYPE (val2), 0);
419
420 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
421 TYPE_SIGN (TREE_TYPE (val1)));
422 }
423
424 const bool cst1 = is_gimple_min_invariant (val1);
425 const bool cst2 = is_gimple_min_invariant (val2);
426
427 /* If one is of the form '[-]NAME + CST' and the other is constant, then
428 it might be possible to say something depending on the constants. */
429 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
430 {
431 if (!overflow_undefined)
432 return -2;
433
434 if (strict_overflow_p != NULL
435 /* Symbolic range building sets TREE_NO_WARNING to declare
436 that overflow doesn't happen. */
437 && (!sym1 || !TREE_NO_WARNING (val1))
438 && (!sym2 || !TREE_NO_WARNING (val2)))
439 *strict_overflow_p = true;
440
441 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
442 tree cst = cst1 ? val1 : val2;
443 tree inv = cst1 ? inv2 : inv1;
444
445 /* Compute the difference between the constants. If it overflows or
446 underflows, this means that we can trivially compare the NAME with
447 it and, consequently, the two values with each other. */
448 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
449 if (wi::cmp (0, wi::to_wide (inv), sgn)
450 != wi::cmp (diff, wi::to_wide (cst), sgn))
451 {
452 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
453 return cst1 ? res : -res;
454 }
455
456 return -2;
457 }
458
459 /* We cannot say anything more for non-constants. */
460 if (!cst1 || !cst2)
461 return -2;
462
463 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
464 {
465 /* We cannot compare overflowed values. */
466 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
467 return -2;
468
469 if (TREE_CODE (val1) == INTEGER_CST
470 && TREE_CODE (val2) == INTEGER_CST)
471 return tree_int_cst_compare (val1, val2);
472
473 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
474 {
475 if (known_eq (wi::to_poly_widest (val1),
476 wi::to_poly_widest (val2)))
477 return 0;
478 if (known_lt (wi::to_poly_widest (val1),
479 wi::to_poly_widest (val2)))
480 return -1;
481 if (known_gt (wi::to_poly_widest (val1),
482 wi::to_poly_widest (val2)))
483 return 1;
484 }
485
486 return -2;
487 }
488 else
489 {
490 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
491 {
492 /* We cannot compare overflowed values. */
493 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
494 return -2;
495
496 return tree_int_cst_compare (val1, val2);
497 }
498
499 /* First see if VAL1 and VAL2 are not the same. */
500 if (operand_equal_p (val1, val2, 0))
501 return 0;
502
503 fold_defer_overflow_warnings ();
504
505 /* If VAL1 is a lower address than VAL2, return -1. */
506 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
507 if (t && integer_onep (t))
508 {
509 fold_undefer_and_ignore_overflow_warnings ();
510 return -1;
511 }
512
513 /* If VAL1 is a higher address than VAL2, return +1. */
514 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
515 if (t && integer_onep (t))
516 {
517 fold_undefer_and_ignore_overflow_warnings ();
518 return 1;
519 }
520
521 /* If VAL1 is different than VAL2, return +2. */
522 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
523 fold_undefer_and_ignore_overflow_warnings ();
524 if (t && integer_onep (t))
525 return 2;
526
527 return -2;
528 }
529 }
530
531 /* Compare values like compare_values_warnv. */
532
533 int
534 compare_values (tree val1, tree val2)
535 {
536 bool sop;
537 return compare_values_warnv (val1, val2, &sop);
538 }
539
540 /* If BOUND will include a symbolic bound, adjust it accordingly,
541 otherwise leave it as is.
542
543 CODE is the original operation that combined the bounds (PLUS_EXPR
544 or MINUS_EXPR).
545
546 TYPE is the type of the original operation.
547
548 SYM_OPn is the symbolic for OPn if it has a symbolic.
549
550 NEG_OPn is TRUE if the OPn was negated. */
551
552 static void
553 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
554 tree sym_op0, tree sym_op1,
555 bool neg_op0, bool neg_op1)
556 {
557 bool minus_p = (code == MINUS_EXPR);
558 /* If the result bound is constant, we're done; otherwise, build the
559 symbolic lower bound. */
560 if (sym_op0 == sym_op1)
561 ;
562 else if (sym_op0)
563 bound = build_symbolic_expr (type, sym_op0,
564 neg_op0, bound);
565 else if (sym_op1)
566 {
567 /* We may not negate if that might introduce
568 undefined overflow. */
569 if (!minus_p
570 || neg_op1
571 || TYPE_OVERFLOW_WRAPS (type))
572 bound = build_symbolic_expr (type, sym_op1,
573 neg_op1 ^ minus_p, bound);
574 else
575 bound = NULL_TREE;
576 }
577 }
578
579 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
580 int bound according to CODE. CODE is the operation combining the
581 bound (either a PLUS_EXPR or a MINUS_EXPR).
582
583 TYPE is the type of the combine operation.
584
585 WI is the wide int to store the result.
586
587 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
588 if over/underflow occurred. */
589
590 static void
591 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
592 tree type, tree op0, tree op1)
593 {
594 bool minus_p = (code == MINUS_EXPR);
595 const signop sgn = TYPE_SIGN (type);
596 const unsigned int prec = TYPE_PRECISION (type);
597
598 /* Combine the bounds, if any. */
599 if (op0 && op1)
600 {
601 if (minus_p)
602 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
603 else
604 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
605 }
606 else if (op0)
607 wi = wi::to_wide (op0);
608 else if (op1)
609 {
610 if (minus_p)
611 wi = wi::neg (wi::to_wide (op1), &ovf);
612 else
613 wi = wi::to_wide (op1);
614 }
615 else
616 wi = wi::shwi (0, prec);
617 }
618
619 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
620 put the result in VR.
621
622 TYPE is the type of the range.
623
624 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
625 occurred while originally calculating WMIN or WMAX. -1 indicates
626 underflow. +1 indicates overflow. 0 indicates neither. */
627
628 static void
629 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
630 tree type,
631 const wide_int &wmin, const wide_int &wmax,
632 wi::overflow_type min_ovf,
633 wi::overflow_type max_ovf)
634 {
635 const signop sgn = TYPE_SIGN (type);
636 const unsigned int prec = TYPE_PRECISION (type);
637
638 /* For one bit precision if max < min, then the swapped
639 range covers all values. */
640 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
641 {
642 kind = VR_VARYING;
643 return;
644 }
645
646 if (TYPE_OVERFLOW_WRAPS (type))
647 {
648 /* If overflow wraps, truncate the values and adjust the
649 range kind and bounds appropriately. */
650 wide_int tmin = wide_int::from (wmin, prec, sgn);
651 wide_int tmax = wide_int::from (wmax, prec, sgn);
652 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
653 {
654 /* If the limits are swapped, we wrapped around and cover
655 the entire range. */
656 if (wi::gt_p (tmin, tmax, sgn))
657 kind = VR_VARYING;
658 else
659 {
660 kind = VR_RANGE;
661 /* No overflow or both overflow or underflow. The
662 range kind stays VR_RANGE. */
663 min = wide_int_to_tree (type, tmin);
664 max = wide_int_to_tree (type, tmax);
665 }
666 return;
667 }
668 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
669 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
670 {
671 /* Min underflow or max overflow. The range kind
672 changes to VR_ANTI_RANGE. */
673 bool covers = false;
674 wide_int tem = tmin;
675 tmin = tmax + 1;
676 if (wi::cmp (tmin, tmax, sgn) < 0)
677 covers = true;
678 tmax = tem - 1;
679 if (wi::cmp (tmax, tem, sgn) > 0)
680 covers = true;
681 /* If the anti-range would cover nothing, drop to varying.
682 Likewise if the anti-range bounds are outside of the
683 types values. */
684 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
685 {
686 kind = VR_VARYING;
687 return;
688 }
689 kind = VR_ANTI_RANGE;
690 min = wide_int_to_tree (type, tmin);
691 max = wide_int_to_tree (type, tmax);
692 return;
693 }
694 else
695 {
696 /* Other underflow and/or overflow, drop to VR_VARYING. */
697 kind = VR_VARYING;
698 return;
699 }
700 }
701 else
702 {
703 /* If overflow does not wrap, saturate to the types min/max
704 value. */
705 wide_int type_min = wi::min_value (prec, sgn);
706 wide_int type_max = wi::max_value (prec, sgn);
707 kind = VR_RANGE;
708 if (min_ovf == wi::OVF_UNDERFLOW)
709 min = wide_int_to_tree (type, type_min);
710 else if (min_ovf == wi::OVF_OVERFLOW)
711 min = wide_int_to_tree (type, type_max);
712 else
713 min = wide_int_to_tree (type, wmin);
714
715 if (max_ovf == wi::OVF_UNDERFLOW)
716 max = wide_int_to_tree (type, type_min);
717 else if (max_ovf == wi::OVF_OVERFLOW)
718 max = wide_int_to_tree (type, type_max);
719 else
720 max = wide_int_to_tree (type, wmax);
721 }
722 }
723
724 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
725
726 static void
727 extract_range_from_pointer_plus_expr (value_range *vr,
728 enum tree_code code,
729 tree expr_type,
730 const value_range *vr0,
731 const value_range *vr1)
732 {
733 gcc_checking_assert (POINTER_TYPE_P (expr_type)
734 && code == POINTER_PLUS_EXPR);
735 /* For pointer types, we are really only interested in asserting
736 whether the expression evaluates to non-NULL.
737 With -fno-delete-null-pointer-checks we need to be more
738 conservative. As some object might reside at address 0,
739 then some offset could be added to it and the same offset
740 subtracted again and the result would be NULL.
741 E.g.
742 static int a[12]; where &a[0] is NULL and
743 ptr = &a[6];
744 ptr -= 6;
745 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
746 where the first range doesn't include zero and the second one
747 doesn't either. As the second operand is sizetype (unsigned),
748 consider all ranges where the MSB could be set as possible
749 subtractions where the result might be NULL. */
750 if ((!range_includes_zero_p (vr0)
751 || !range_includes_zero_p (vr1))
752 && !TYPE_OVERFLOW_WRAPS (expr_type)
753 && (flag_delete_null_pointer_checks
754 || (range_int_cst_p (vr1)
755 && !tree_int_cst_sign_bit (vr1->max ()))))
756 vr->set_nonzero (expr_type);
757 else if (vr0->zero_p () && vr1->zero_p ())
758 vr->set_zero (expr_type);
759 else
760 vr->set_varying (expr_type);
761 }
762
763 /* Extract range information from a PLUS/MINUS_EXPR and store the
764 result in *VR. */
765
766 static void
767 extract_range_from_plus_minus_expr (value_range *vr,
768 enum tree_code code,
769 tree expr_type,
770 const value_range *vr0_,
771 const value_range *vr1_)
772 {
773 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
774
775 value_range vr0 = *vr0_, vr1 = *vr1_;
776 value_range vrtem0, vrtem1;
777
778 /* Now canonicalize anti-ranges to ranges when they are not symbolic
779 and express ~[] op X as ([]' op X) U ([]'' op X). */
780 if (vr0.kind () == VR_ANTI_RANGE
781 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
782 {
783 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
784 if (!vrtem1.undefined_p ())
785 {
786 value_range vrres;
787 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
788 &vrtem1, vr1_);
789 vr->union_ (&vrres);
790 }
791 return;
792 }
793 /* Likewise for X op ~[]. */
794 if (vr1.kind () == VR_ANTI_RANGE
795 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
796 {
797 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
798 if (!vrtem1.undefined_p ())
799 {
800 value_range vrres;
801 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
802 vr0_, &vrtem1);
803 vr->union_ (&vrres);
804 }
805 return;
806 }
807
808 value_range_kind kind;
809 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
810 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
811 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
812 tree min = NULL_TREE, max = NULL_TREE;
813
814 /* This will normalize things such that calculating
815 [0,0] - VR_VARYING is not dropped to varying, but is
816 calculated as [MIN+1, MAX]. */
817 if (vr0.varying_p ())
818 {
819 vr0_kind = VR_RANGE;
820 vr0_min = vrp_val_min (expr_type);
821 vr0_max = vrp_val_max (expr_type);
822 }
823 if (vr1.varying_p ())
824 {
825 vr1_kind = VR_RANGE;
826 vr1_min = vrp_val_min (expr_type);
827 vr1_max = vrp_val_max (expr_type);
828 }
829
830 const bool minus_p = (code == MINUS_EXPR);
831 tree min_op0 = vr0_min;
832 tree min_op1 = minus_p ? vr1_max : vr1_min;
833 tree max_op0 = vr0_max;
834 tree max_op1 = minus_p ? vr1_min : vr1_max;
835 tree sym_min_op0 = NULL_TREE;
836 tree sym_min_op1 = NULL_TREE;
837 tree sym_max_op0 = NULL_TREE;
838 tree sym_max_op1 = NULL_TREE;
839 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
840
841 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
842
843 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
844 single-symbolic ranges, try to compute the precise resulting range,
845 but only if we know that this resulting range will also be constant
846 or single-symbolic. */
847 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
848 && (TREE_CODE (min_op0) == INTEGER_CST
849 || (sym_min_op0
850 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
851 && (TREE_CODE (min_op1) == INTEGER_CST
852 || (sym_min_op1
853 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
854 && (!(sym_min_op0 && sym_min_op1)
855 || (sym_min_op0 == sym_min_op1
856 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
857 && (TREE_CODE (max_op0) == INTEGER_CST
858 || (sym_max_op0
859 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
860 && (TREE_CODE (max_op1) == INTEGER_CST
861 || (sym_max_op1
862 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
863 && (!(sym_max_op0 && sym_max_op1)
864 || (sym_max_op0 == sym_max_op1
865 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
866 {
867 wide_int wmin, wmax;
868 wi::overflow_type min_ovf = wi::OVF_NONE;
869 wi::overflow_type max_ovf = wi::OVF_NONE;
870
871 /* Build the bounds. */
872 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
873 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
874
875 /* If the resulting range will be symbolic, we need to eliminate any
876 explicit or implicit overflow introduced in the above computation
877 because compare_values could make an incorrect use of it. That's
878 why we require one of the ranges to be a singleton. */
879 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
880 && ((bool)min_ovf || (bool)max_ovf
881 || (min_op0 != max_op0 && min_op1 != max_op1)))
882 {
883 vr->set_varying (expr_type);
884 return;
885 }
886
887 /* Adjust the range for possible overflow. */
888 set_value_range_with_overflow (kind, min, max, expr_type,
889 wmin, wmax, min_ovf, max_ovf);
890 if (kind == VR_VARYING)
891 {
892 vr->set_varying (expr_type);
893 return;
894 }
895
896 /* Build the symbolic bounds if needed. */
897 adjust_symbolic_bound (min, code, expr_type,
898 sym_min_op0, sym_min_op1,
899 neg_min_op0, neg_min_op1);
900 adjust_symbolic_bound (max, code, expr_type,
901 sym_max_op0, sym_max_op1,
902 neg_max_op0, neg_max_op1);
903 }
904 else
905 {
906 /* For other cases, for example if we have a PLUS_EXPR with two
907 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
908 to compute a precise range for such a case.
909 ??? General even mixed range kind operations can be expressed
910 by for example transforming ~[3, 5] + [1, 2] to range-only
911 operations and a union primitive:
912 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
913 [-INF+1, 4] U [6, +INF(OVF)]
914 though usually the union is not exactly representable with
915 a single range or anti-range as the above is
916 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
917 but one could use a scheme similar to equivalences for this. */
918 vr->set_varying (expr_type);
919 return;
920 }
921
922 /* If either MIN or MAX overflowed, then set the resulting range to
923 VARYING. */
924 if (min == NULL_TREE
925 || TREE_OVERFLOW_P (min)
926 || max == NULL_TREE
927 || TREE_OVERFLOW_P (max))
928 {
929 vr->set_varying (expr_type);
930 return;
931 }
932
933 int cmp = compare_values (min, max);
934 if (cmp == -2 || cmp == 1)
935 {
936 /* If the new range has its limits swapped around (MIN > MAX),
937 then the operation caused one of them to wrap around, mark
938 the new range VARYING. */
939 vr->set_varying (expr_type);
940 }
941 else
942 vr->set (min, max, kind);
943 }
944
945 /* Return the range-ops handler for CODE and EXPR_TYPE. If no
946 suitable operator is found, return NULL and set VR to VARYING. */
947
948 static const range_operator *
949 get_range_op_handler (value_range *vr,
950 enum tree_code code,
951 tree expr_type)
952 {
953 const range_operator *op = range_op_handler (code, expr_type);
954 if (!op)
955 vr->set_varying (expr_type);
956 return op;
957 }
958
959 /* If the types passed are supported, return TRUE, otherwise set VR to
960 VARYING and return FALSE. */
961
962 static bool
963 supported_types_p (value_range *vr,
964 tree type0,
965 tree type1 = NULL)
966 {
967 if (!value_range::supports_type_p (type0)
968 || (type1 && !value_range::supports_type_p (type1)))
969 {
970 vr->set_varying (type0);
971 return false;
972 }
973 return true;
974 }
975
976 /* If any of the ranges passed are defined, return TRUE, otherwise set
977 VR to UNDEFINED and return FALSE. */
978
979 static bool
980 defined_ranges_p (value_range *vr,
981 const value_range *vr0, const value_range *vr1 = NULL)
982 {
983 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
984 {
985 vr->set_undefined ();
986 return false;
987 }
988 return true;
989 }
990
991 static value_range
992 drop_undefines_to_varying (const value_range *vr, tree expr_type)
993 {
994 if (vr->undefined_p ())
995 return value_range (expr_type);
996 else
997 return *vr;
998 }
999
1000 /* If any operand is symbolic, perform a binary operation on them and
1001 return TRUE, otherwise return FALSE. */
1002
1003 static bool
1004 range_fold_binary_symbolics_p (value_range *vr,
1005 tree_code code,
1006 tree expr_type,
1007 const value_range *vr0_,
1008 const value_range *vr1_)
1009 {
1010 if (vr0_->symbolic_p () || vr1_->symbolic_p ())
1011 {
1012 value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
1013 value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
1014 if ((code == PLUS_EXPR || code == MINUS_EXPR))
1015 {
1016 extract_range_from_plus_minus_expr (vr, code, expr_type,
1017 &vr0, &vr1);
1018 return true;
1019 }
1020 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1021 {
1022 extract_range_from_pointer_plus_expr (vr, code, expr_type,
1023 &vr0, &vr1);
1024 return true;
1025 }
1026 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1027 vr0.normalize_symbolics ();
1028 vr1.normalize_symbolics ();
1029 return op->fold_range (*vr, expr_type, vr0, vr1);
1030 }
1031 return false;
1032 }
1033
1034 /* If operand is symbolic, perform a unary operation on it and return
1035 TRUE, otherwise return FALSE. */
1036
1037 static bool
1038 range_fold_unary_symbolics_p (value_range *vr,
1039 tree_code code,
1040 tree expr_type,
1041 const value_range *vr0)
1042 {
1043 if (vr0->symbolic_p ())
1044 {
1045 if (code == NEGATE_EXPR)
1046 {
1047 /* -X is simply 0 - X. */
1048 value_range zero;
1049 zero.set_zero (vr0->type ());
1050 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1051 return true;
1052 }
1053 if (code == BIT_NOT_EXPR)
1054 {
1055 /* ~X is simply -1 - X. */
1056 value_range minusone;
1057 minusone.set (build_int_cst (vr0->type (), -1));
1058 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1059 return true;
1060 }
1061 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1062 value_range vr0_cst (*vr0);
1063 vr0_cst.normalize_symbolics ();
1064 return op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1065 }
1066 return false;
1067 }
1068
1069 /* Perform a binary operation on a pair of ranges. */
1070
1071 void
1072 range_fold_binary_expr (value_range *vr,
1073 enum tree_code code,
1074 tree expr_type,
1075 const value_range *vr0_,
1076 const value_range *vr1_)
1077 {
1078 if (!supported_types_p (vr, expr_type)
1079 || !defined_ranges_p (vr, vr0_, vr1_))
1080 return;
1081 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1082 if (!op)
1083 return;
1084
1085 if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
1086 return;
1087
1088 value_range vr0 (*vr0_);
1089 value_range vr1 (*vr1_);
1090 if (vr0.undefined_p ())
1091 vr0.set_varying (expr_type);
1092 if (vr1.undefined_p ())
1093 vr1.set_varying (expr_type);
1094 vr0.normalize_addresses ();
1095 vr1.normalize_addresses ();
1096 op->fold_range (*vr, expr_type, vr0, vr1);
1097 }
1098
1099 /* Perform a unary operation on a range. */
1100
1101 void
1102 range_fold_unary_expr (value_range *vr,
1103 enum tree_code code, tree expr_type,
1104 const value_range *vr0,
1105 tree vr0_type)
1106 {
1107 if (!supported_types_p (vr, expr_type, vr0_type)
1108 || !defined_ranges_p (vr, vr0))
1109 return;
1110 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1111 if (!op)
1112 return;
1113
1114 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1115 return;
1116
1117 value_range vr0_cst (*vr0);
1118 vr0_cst.normalize_addresses ();
1119 op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1120 }
1121
1122 /* If the range of values taken by OP can be inferred after STMT executes,
1123 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1124 describes the inferred range. Return true if a range could be
1125 inferred. */
1126
1127 bool
1128 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1129 {
1130 *val_p = NULL_TREE;
1131 *comp_code_p = ERROR_MARK;
1132
1133 /* Do not attempt to infer anything in names that flow through
1134 abnormal edges. */
1135 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1136 return false;
1137
1138 /* If STMT is the last statement of a basic block with no normal
1139 successors, there is no point inferring anything about any of its
1140 operands. We would not be able to find a proper insertion point
1141 for the assertion, anyway. */
1142 if (stmt_ends_bb_p (stmt))
1143 {
1144 edge_iterator ei;
1145 edge e;
1146
1147 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1148 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1149 break;
1150 if (e == NULL)
1151 return false;
1152 }
1153
1154 if (infer_nonnull_range (stmt, op))
1155 {
1156 *val_p = build_int_cst (TREE_TYPE (op), 0);
1157 *comp_code_p = NE_EXPR;
1158 return true;
1159 }
1160
1161 return false;
1162 }
1163
1164 /* Dump assert_info structure. */
1165
1166 void
1167 dump_assert_info (FILE *file, const assert_info &assert)
1168 {
1169 fprintf (file, "Assert for: ");
1170 print_generic_expr (file, assert.name);
1171 fprintf (file, "\n\tPREDICATE: expr=[");
1172 print_generic_expr (file, assert.expr);
1173 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1174 fprintf (file, "val=[");
1175 print_generic_expr (file, assert.val);
1176 fprintf (file, "]\n\n");
1177 }
1178
1179 DEBUG_FUNCTION void
1180 debug (const assert_info &assert)
1181 {
1182 dump_assert_info (stderr, assert);
1183 }
1184
1185 /* Dump a vector of assert_info's. */
1186
1187 void
1188 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1189 {
1190 for (unsigned i = 0; i < asserts.length (); ++i)
1191 {
1192 dump_assert_info (file, asserts[i]);
1193 fprintf (file, "\n");
1194 }
1195 }
1196
1197 DEBUG_FUNCTION void
1198 debug (const vec<assert_info> &asserts)
1199 {
1200 dump_asserts_info (stderr, asserts);
1201 }
1202
1203 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
1204
1205 static void
1206 add_assert_info (vec<assert_info> &asserts,
1207 tree name, tree expr, enum tree_code comp_code, tree val)
1208 {
1209 assert_info info;
1210 info.comp_code = comp_code;
1211 info.name = name;
1212 if (TREE_OVERFLOW_P (val))
1213 val = drop_tree_overflow (val);
1214 info.val = val;
1215 info.expr = expr;
1216 asserts.safe_push (info);
1217 if (dump_enabled_p ())
1218 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1219 "Adding assert for %T from %T %s %T\n",
1220 name, expr, op_symbol_code (comp_code), val);
1221 }
1222
1223 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1224 Extract a suitable test code and value and store them into *CODE_P and
1225 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1226
1227 If no extraction was possible, return FALSE, otherwise return TRUE.
1228
1229 If INVERT is true, then we invert the result stored into *CODE_P. */
1230
1231 static bool
1232 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1233 tree cond_op0, tree cond_op1,
1234 bool invert, enum tree_code *code_p,
1235 tree *val_p)
1236 {
1237 enum tree_code comp_code;
1238 tree val;
1239
1240 /* Otherwise, we have a comparison of the form NAME COMP VAL
1241 or VAL COMP NAME. */
1242 if (name == cond_op1)
1243 {
1244 /* If the predicate is of the form VAL COMP NAME, flip
1245 COMP around because we need to register NAME as the
1246 first operand in the predicate. */
1247 comp_code = swap_tree_comparison (cond_code);
1248 val = cond_op0;
1249 }
1250 else if (name == cond_op0)
1251 {
1252 /* The comparison is of the form NAME COMP VAL, so the
1253 comparison code remains unchanged. */
1254 comp_code = cond_code;
1255 val = cond_op1;
1256 }
1257 else
1258 gcc_unreachable ();
1259
1260 /* Invert the comparison code as necessary. */
1261 if (invert)
1262 comp_code = invert_tree_comparison (comp_code, 0);
1263
1264 /* VRP only handles integral and pointer types. */
1265 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1266 && ! POINTER_TYPE_P (TREE_TYPE (val)))
1267 return false;
1268
1269 /* Do not register always-false predicates.
1270 FIXME: this works around a limitation in fold() when dealing with
1271 enumerations. Given 'enum { N1, N2 } x;', fold will not
1272 fold 'if (x > N2)' to 'if (0)'. */
1273 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1274 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1275 {
1276 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1277 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1278
1279 if (comp_code == GT_EXPR
1280 && (!max
1281 || compare_values (val, max) == 0))
1282 return false;
1283
1284 if (comp_code == LT_EXPR
1285 && (!min
1286 || compare_values (val, min) == 0))
1287 return false;
1288 }
1289 *code_p = comp_code;
1290 *val_p = val;
1291 return true;
1292 }
1293
1294 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1295 (otherwise return VAL). VAL and MASK must be zero-extended for
1296 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
1297 (to transform signed values into unsigned) and at the end xor
1298 SGNBIT back. */
1299
1300 wide_int
1301 masked_increment (const wide_int &val_in, const wide_int &mask,
1302 const wide_int &sgnbit, unsigned int prec)
1303 {
1304 wide_int bit = wi::one (prec), res;
1305 unsigned int i;
1306
1307 wide_int val = val_in ^ sgnbit;
1308 for (i = 0; i < prec; i++, bit += bit)
1309 {
1310 res = mask;
1311 if ((res & bit) == 0)
1312 continue;
1313 res = bit - 1;
1314 res = wi::bit_and_not (val + bit, res);
1315 res &= mask;
1316 if (wi::gtu_p (res, val))
1317 return res ^ sgnbit;
1318 }
1319 return val ^ sgnbit;
1320 }
1321
1322 /* Helper for overflow_comparison_p
1323
1324 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1325 OP1's defining statement to see if it ultimately has the form
1326 OP0 CODE (OP0 PLUS INTEGER_CST)
1327
1328 If so, return TRUE indicating this is an overflow test and store into
1329 *NEW_CST an updated constant that can be used in a narrowed range test.
1330
1331 REVERSED indicates if the comparison was originally:
1332
1333 OP1 CODE' OP0.
1334
1335 This affects how we build the updated constant. */
1336
1337 static bool
1338 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1339 bool follow_assert_exprs, bool reversed, tree *new_cst)
1340 {
1341 /* See if this is a relational operation between two SSA_NAMES with
1342 unsigned, overflow wrapping values. If so, check it more deeply. */
1343 if ((code == LT_EXPR || code == LE_EXPR
1344 || code == GE_EXPR || code == GT_EXPR)
1345 && TREE_CODE (op0) == SSA_NAME
1346 && TREE_CODE (op1) == SSA_NAME
1347 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1348 && TYPE_UNSIGNED (TREE_TYPE (op0))
1349 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1350 {
1351 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1352
1353 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
1354 if (follow_assert_exprs)
1355 {
1356 while (gimple_assign_single_p (op1_def)
1357 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1358 {
1359 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1360 if (TREE_CODE (op1) != SSA_NAME)
1361 break;
1362 op1_def = SSA_NAME_DEF_STMT (op1);
1363 }
1364 }
1365
1366 /* Now look at the defining statement of OP1 to see if it adds
1367 or subtracts a nonzero constant from another operand. */
1368 if (op1_def
1369 && is_gimple_assign (op1_def)
1370 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1371 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1372 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1373 {
1374 tree target = gimple_assign_rhs1 (op1_def);
1375
1376 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1377 for one where TARGET appears on the RHS. */
1378 if (follow_assert_exprs)
1379 {
1380 /* Now see if that "other operand" is op0, following the chain
1381 of ASSERT_EXPRs if necessary. */
1382 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1383 while (op0 != target
1384 && gimple_assign_single_p (op0_def)
1385 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1386 {
1387 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1388 if (TREE_CODE (op0) != SSA_NAME)
1389 break;
1390 op0_def = SSA_NAME_DEF_STMT (op0);
1391 }
1392 }
1393
1394 /* If we did not find our target SSA_NAME, then this is not
1395 an overflow test. */
1396 if (op0 != target)
1397 return false;
1398
1399 tree type = TREE_TYPE (op0);
1400 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1401 tree inc = gimple_assign_rhs2 (op1_def);
1402 if (reversed)
1403 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1404 else
1405 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1406 return true;
1407 }
1408 }
1409 return false;
1410 }
1411
1412 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1413 OP1's defining statement to see if it ultimately has the form
1414 OP0 CODE (OP0 PLUS INTEGER_CST)
1415
1416 If so, return TRUE indicating this is an overflow test and store into
1417 *NEW_CST an updated constant that can be used in a narrowed range test.
1418
1419 These statements are left as-is in the IL to facilitate discovery of
1420 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
1421 the alternate range representation is often useful within VRP. */
1422
1423 bool
1424 overflow_comparison_p (tree_code code, tree name, tree val,
1425 bool use_equiv_p, tree *new_cst)
1426 {
1427 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1428 return true;
1429 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1430 use_equiv_p, true, new_cst);
1431 }
1432
1433
1434 /* Try to register an edge assertion for SSA name NAME on edge E for
1435 the condition COND contributing to the conditional jump pointed to by BSI.
1436 Invert the condition COND if INVERT is true. */
1437
1438 static void
1439 register_edge_assert_for_2 (tree name, edge e,
1440 enum tree_code cond_code,
1441 tree cond_op0, tree cond_op1, bool invert,
1442 vec<assert_info> &asserts)
1443 {
1444 tree val;
1445 enum tree_code comp_code;
1446
1447 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1448 cond_op0,
1449 cond_op1,
1450 invert, &comp_code, &val))
1451 return;
1452
1453 /* Queue the assert. */
1454 tree x;
1455 if (overflow_comparison_p (comp_code, name, val, false, &x))
1456 {
1457 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1458 ? GT_EXPR : LE_EXPR);
1459 add_assert_info (asserts, name, name, new_code, x);
1460 }
1461 add_assert_info (asserts, name, name, comp_code, val);
1462
1463 /* In the case of NAME <= CST and NAME being defined as
1464 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1465 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
1466 This catches range and anti-range tests. */
1467 if ((comp_code == LE_EXPR
1468 || comp_code == GT_EXPR)
1469 && TREE_CODE (val) == INTEGER_CST
1470 && TYPE_UNSIGNED (TREE_TYPE (val)))
1471 {
1472 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1473 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1474
1475 /* Extract CST2 from the (optional) addition. */
1476 if (is_gimple_assign (def_stmt)
1477 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1478 {
1479 name2 = gimple_assign_rhs1 (def_stmt);
1480 cst2 = gimple_assign_rhs2 (def_stmt);
1481 if (TREE_CODE (name2) == SSA_NAME
1482 && TREE_CODE (cst2) == INTEGER_CST)
1483 def_stmt = SSA_NAME_DEF_STMT (name2);
1484 }
1485
1486 /* Extract NAME2 from the (optional) sign-changing cast. */
1487 if (gimple_assign_cast_p (def_stmt))
1488 {
1489 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
1490 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1491 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
1492 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
1493 name3 = gimple_assign_rhs1 (def_stmt);
1494 }
1495
1496 /* If name3 is used later, create an ASSERT_EXPR for it. */
1497 if (name3 != NULL_TREE
1498 && TREE_CODE (name3) == SSA_NAME
1499 && (cst2 == NULL_TREE
1500 || TREE_CODE (cst2) == INTEGER_CST)
1501 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1502 {
1503 tree tmp;
1504
1505 /* Build an expression for the range test. */
1506 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1507 if (cst2 != NULL_TREE)
1508 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1509 add_assert_info (asserts, name3, tmp, comp_code, val);
1510 }
1511
1512 /* If name2 is used later, create an ASSERT_EXPR for it. */
1513 if (name2 != NULL_TREE
1514 && TREE_CODE (name2) == SSA_NAME
1515 && TREE_CODE (cst2) == INTEGER_CST
1516 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1517 {
1518 tree tmp;
1519
1520 /* Build an expression for the range test. */
1521 tmp = name2;
1522 if (TREE_TYPE (name) != TREE_TYPE (name2))
1523 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1524 if (cst2 != NULL_TREE)
1525 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1526 add_assert_info (asserts, name2, tmp, comp_code, val);
1527 }
1528 }
1529
1530 /* In the case of post-in/decrement tests like if (i++) ... and uses
1531 of the in/decremented value on the edge the extra name we want to
1532 assert for is not on the def chain of the name compared. Instead
1533 it is in the set of use stmts.
1534 Similar cases happen for conversions that were simplified through
1535 fold_{sign_changed,widened}_comparison. */
1536 if ((comp_code == NE_EXPR
1537 || comp_code == EQ_EXPR)
1538 && TREE_CODE (val) == INTEGER_CST)
1539 {
1540 imm_use_iterator ui;
1541 gimple *use_stmt;
1542 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1543 {
1544 if (!is_gimple_assign (use_stmt))
1545 continue;
1546
1547 /* Cut off to use-stmts that are dominating the predecessor. */
1548 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1549 continue;
1550
1551 tree name2 = gimple_assign_lhs (use_stmt);
1552 if (TREE_CODE (name2) != SSA_NAME)
1553 continue;
1554
1555 enum tree_code code = gimple_assign_rhs_code (use_stmt);
1556 tree cst;
1557 if (code == PLUS_EXPR
1558 || code == MINUS_EXPR)
1559 {
1560 cst = gimple_assign_rhs2 (use_stmt);
1561 if (TREE_CODE (cst) != INTEGER_CST)
1562 continue;
1563 cst = int_const_binop (code, val, cst);
1564 }
1565 else if (CONVERT_EXPR_CODE_P (code))
1566 {
1567 /* For truncating conversions we cannot record
1568 an inequality. */
1569 if (comp_code == NE_EXPR
1570 && (TYPE_PRECISION (TREE_TYPE (name2))
1571 < TYPE_PRECISION (TREE_TYPE (name))))
1572 continue;
1573 cst = fold_convert (TREE_TYPE (name2), val);
1574 }
1575 else
1576 continue;
1577
1578 if (TREE_OVERFLOW_P (cst))
1579 cst = drop_tree_overflow (cst);
1580 add_assert_info (asserts, name2, name2, comp_code, cst);
1581 }
1582 }
1583
1584 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1585 && TREE_CODE (val) == INTEGER_CST)
1586 {
1587 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1588 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1589 tree val2 = NULL_TREE;
1590 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1591 wide_int mask = wi::zero (prec);
1592 unsigned int nprec = prec;
1593 enum tree_code rhs_code = ERROR_MARK;
1594
1595 if (is_gimple_assign (def_stmt))
1596 rhs_code = gimple_assign_rhs_code (def_stmt);
1597
1598 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1599 assert that A != CST1 -+ CST2. */
1600 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1601 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1602 {
1603 tree op0 = gimple_assign_rhs1 (def_stmt);
1604 tree op1 = gimple_assign_rhs2 (def_stmt);
1605 if (TREE_CODE (op0) == SSA_NAME
1606 && TREE_CODE (op1) == INTEGER_CST)
1607 {
1608 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1609 ? MINUS_EXPR : PLUS_EXPR);
1610 op1 = int_const_binop (reverse_op, val, op1);
1611 if (TREE_OVERFLOW (op1))
1612 op1 = drop_tree_overflow (op1);
1613 add_assert_info (asserts, op0, op0, comp_code, op1);
1614 }
1615 }
1616
1617 /* Add asserts for NAME cmp CST and NAME being defined
1618 as NAME = (int) NAME2. */
1619 if (!TYPE_UNSIGNED (TREE_TYPE (val))
1620 && (comp_code == LE_EXPR || comp_code == LT_EXPR
1621 || comp_code == GT_EXPR || comp_code == GE_EXPR)
1622 && gimple_assign_cast_p (def_stmt))
1623 {
1624 name2 = gimple_assign_rhs1 (def_stmt);
1625 if (CONVERT_EXPR_CODE_P (rhs_code)
1626 && TREE_CODE (name2) == SSA_NAME
1627 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1628 && TYPE_UNSIGNED (TREE_TYPE (name2))
1629 && prec == TYPE_PRECISION (TREE_TYPE (name2))
1630 && (comp_code == LE_EXPR || comp_code == GT_EXPR
1631 || !tree_int_cst_equal (val,
1632 TYPE_MIN_VALUE (TREE_TYPE (val)))))
1633 {
1634 tree tmp, cst;
1635 enum tree_code new_comp_code = comp_code;
1636
1637 cst = fold_convert (TREE_TYPE (name2),
1638 TYPE_MIN_VALUE (TREE_TYPE (val)));
1639 /* Build an expression for the range test. */
1640 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
1641 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
1642 fold_convert (TREE_TYPE (name2), val));
1643 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1644 {
1645 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
1646 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
1647 build_int_cst (TREE_TYPE (name2), 1));
1648 }
1649 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
1650 }
1651 }
1652
1653 /* Add asserts for NAME cmp CST and NAME being defined as
1654 NAME = NAME2 >> CST2.
1655
1656 Extract CST2 from the right shift. */
1657 if (rhs_code == RSHIFT_EXPR)
1658 {
1659 name2 = gimple_assign_rhs1 (def_stmt);
1660 cst2 = gimple_assign_rhs2 (def_stmt);
1661 if (TREE_CODE (name2) == SSA_NAME
1662 && tree_fits_uhwi_p (cst2)
1663 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1664 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
1665 && type_has_mode_precision_p (TREE_TYPE (val)))
1666 {
1667 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
1668 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
1669 }
1670 }
1671 if (val2 != NULL_TREE
1672 && TREE_CODE (val2) == INTEGER_CST
1673 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
1674 TREE_TYPE (val),
1675 val2, cst2), val))
1676 {
1677 enum tree_code new_comp_code = comp_code;
1678 tree tmp, new_val;
1679
1680 tmp = name2;
1681 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
1682 {
1683 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
1684 {
1685 tree type = build_nonstandard_integer_type (prec, 1);
1686 tmp = build1 (NOP_EXPR, type, name2);
1687 val2 = fold_convert (type, val2);
1688 }
1689 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
1690 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
1691 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
1692 }
1693 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1694 {
1695 wide_int minval
1696 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1697 new_val = val2;
1698 if (minval == wi::to_wide (new_val))
1699 new_val = NULL_TREE;
1700 }
1701 else
1702 {
1703 wide_int maxval
1704 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1705 mask |= wi::to_wide (val2);
1706 if (wi::eq_p (mask, maxval))
1707 new_val = NULL_TREE;
1708 else
1709 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
1710 }
1711
1712 if (new_val)
1713 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
1714 }
1715
1716 /* If we have a conversion that doesn't change the value of the source
1717 simply register the same assert for it. */
1718 if (CONVERT_EXPR_CODE_P (rhs_code))
1719 {
1720 wide_int rmin, rmax;
1721 tree rhs1 = gimple_assign_rhs1 (def_stmt);
1722 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1723 && TREE_CODE (rhs1) == SSA_NAME
1724 /* Make sure the relation preserves the upper/lower boundary of
1725 the range conservatively. */
1726 && (comp_code == NE_EXPR
1727 || comp_code == EQ_EXPR
1728 || (TYPE_SIGN (TREE_TYPE (name))
1729 == TYPE_SIGN (TREE_TYPE (rhs1)))
1730 || ((comp_code == LE_EXPR
1731 || comp_code == LT_EXPR)
1732 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
1733 || ((comp_code == GE_EXPR
1734 || comp_code == GT_EXPR)
1735 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
1736 /* And the conversion does not alter the value we compare
1737 against and all values in rhs1 can be represented in
1738 the converted to type. */
1739 && int_fits_type_p (val, TREE_TYPE (rhs1))
1740 && ((TYPE_PRECISION (TREE_TYPE (name))
1741 > TYPE_PRECISION (TREE_TYPE (rhs1)))
1742 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
1743 && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
1744 && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
1745 add_assert_info (asserts, rhs1, rhs1,
1746 comp_code, fold_convert (TREE_TYPE (rhs1), val));
1747 }
1748
1749 /* Add asserts for NAME cmp CST and NAME being defined as
1750 NAME = NAME2 & CST2.
1751
1752 Extract CST2 from the and.
1753
1754 Also handle
1755 NAME = (unsigned) NAME2;
1756 casts where NAME's type is unsigned and has smaller precision
1757 than NAME2's type as if it was NAME = NAME2 & MASK. */
1758 names[0] = NULL_TREE;
1759 names[1] = NULL_TREE;
1760 cst2 = NULL_TREE;
1761 if (rhs_code == BIT_AND_EXPR
1762 || (CONVERT_EXPR_CODE_P (rhs_code)
1763 && INTEGRAL_TYPE_P (TREE_TYPE (val))
1764 && TYPE_UNSIGNED (TREE_TYPE (val))
1765 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1766 > prec))
1767 {
1768 name2 = gimple_assign_rhs1 (def_stmt);
1769 if (rhs_code == BIT_AND_EXPR)
1770 cst2 = gimple_assign_rhs2 (def_stmt);
1771 else
1772 {
1773 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
1774 nprec = TYPE_PRECISION (TREE_TYPE (name2));
1775 }
1776 if (TREE_CODE (name2) == SSA_NAME
1777 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1778 && TREE_CODE (cst2) == INTEGER_CST
1779 && !integer_zerop (cst2)
1780 && (nprec > 1
1781 || TYPE_UNSIGNED (TREE_TYPE (val))))
1782 {
1783 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
1784 if (gimple_assign_cast_p (def_stmt2))
1785 {
1786 names[1] = gimple_assign_rhs1 (def_stmt2);
1787 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
1788 || TREE_CODE (names[1]) != SSA_NAME
1789 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
1790 || (TYPE_PRECISION (TREE_TYPE (name2))
1791 != TYPE_PRECISION (TREE_TYPE (names[1]))))
1792 names[1] = NULL_TREE;
1793 }
1794 names[0] = name2;
1795 }
1796 }
1797 if (names[0] || names[1])
1798 {
1799 wide_int minv, maxv, valv, cst2v;
1800 wide_int tem, sgnbit;
1801 bool valid_p = false, valn, cst2n;
1802 enum tree_code ccode = comp_code;
1803
1804 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
1805 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
1806 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
1807 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
1808 /* If CST2 doesn't have most significant bit set,
1809 but VAL is negative, we have comparison like
1810 if ((x & 0x123) > -4) (always true). Just give up. */
1811 if (!cst2n && valn)
1812 ccode = ERROR_MARK;
1813 if (cst2n)
1814 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1815 else
1816 sgnbit = wi::zero (nprec);
1817 minv = valv & cst2v;
1818 switch (ccode)
1819 {
1820 case EQ_EXPR:
1821 /* Minimum unsigned value for equality is VAL & CST2
1822 (should be equal to VAL, otherwise we probably should
1823 have folded the comparison into false) and
1824 maximum unsigned value is VAL | ~CST2. */
1825 maxv = valv | ~cst2v;
1826 valid_p = true;
1827 break;
1828
1829 case NE_EXPR:
1830 tem = valv | ~cst2v;
1831 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
1832 if (valv == 0)
1833 {
1834 cst2n = false;
1835 sgnbit = wi::zero (nprec);
1836 goto gt_expr;
1837 }
1838 /* If (VAL | ~CST2) is all ones, handle it as
1839 (X & CST2) < VAL. */
1840 if (tem == -1)
1841 {
1842 cst2n = false;
1843 valn = false;
1844 sgnbit = wi::zero (nprec);
1845 goto lt_expr;
1846 }
1847 if (!cst2n && wi::neg_p (cst2v))
1848 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1849 if (sgnbit != 0)
1850 {
1851 if (valv == sgnbit)
1852 {
1853 cst2n = true;
1854 valn = true;
1855 goto gt_expr;
1856 }
1857 if (tem == wi::mask (nprec - 1, false, nprec))
1858 {
1859 cst2n = true;
1860 goto lt_expr;
1861 }
1862 if (!cst2n)
1863 sgnbit = wi::zero (nprec);
1864 }
1865 break;
1866
1867 case GE_EXPR:
1868 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
1869 is VAL and maximum unsigned value is ~0. For signed
1870 comparison, if CST2 doesn't have most significant bit
1871 set, handle it similarly. If CST2 has MSB set,
1872 the minimum is the same, and maximum is ~0U/2. */
1873 if (minv != valv)
1874 {
1875 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
1876 VAL. */
1877 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1878 if (minv == valv)
1879 break;
1880 }
1881 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1882 valid_p = true;
1883 break;
1884
1885 case GT_EXPR:
1886 gt_expr:
1887 /* Find out smallest MINV where MINV > VAL
1888 && (MINV & CST2) == MINV, if any. If VAL is signed and
1889 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
1890 minv = masked_increment (valv, cst2v, sgnbit, nprec);
1891 if (minv == valv)
1892 break;
1893 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1894 valid_p = true;
1895 break;
1896
1897 case LE_EXPR:
1898 /* Minimum unsigned value for <= is 0 and maximum
1899 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
1900 Otherwise, find smallest VAL2 where VAL2 > VAL
1901 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1902 as maximum.
1903 For signed comparison, if CST2 doesn't have most
1904 significant bit set, handle it similarly. If CST2 has
1905 MSB set, the maximum is the same and minimum is INT_MIN. */
1906 if (minv == valv)
1907 maxv = valv;
1908 else
1909 {
1910 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1911 if (maxv == valv)
1912 break;
1913 maxv -= 1;
1914 }
1915 maxv |= ~cst2v;
1916 minv = sgnbit;
1917 valid_p = true;
1918 break;
1919
1920 case LT_EXPR:
1921 lt_expr:
1922 /* Minimum unsigned value for < is 0 and maximum
1923 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
1924 Otherwise, find smallest VAL2 where VAL2 > VAL
1925 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1926 as maximum.
1927 For signed comparison, if CST2 doesn't have most
1928 significant bit set, handle it similarly. If CST2 has
1929 MSB set, the maximum is the same and minimum is INT_MIN. */
1930 if (minv == valv)
1931 {
1932 if (valv == sgnbit)
1933 break;
1934 maxv = valv;
1935 }
1936 else
1937 {
1938 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1939 if (maxv == valv)
1940 break;
1941 }
1942 maxv -= 1;
1943 maxv |= ~cst2v;
1944 minv = sgnbit;
1945 valid_p = true;
1946 break;
1947
1948 default:
1949 break;
1950 }
1951 if (valid_p
1952 && (maxv - minv) != -1)
1953 {
1954 tree tmp, new_val, type;
1955 int i;
1956
1957 for (i = 0; i < 2; i++)
1958 if (names[i])
1959 {
1960 wide_int maxv2 = maxv;
1961 tmp = names[i];
1962 type = TREE_TYPE (names[i]);
1963 if (!TYPE_UNSIGNED (type))
1964 {
1965 type = build_nonstandard_integer_type (nprec, 1);
1966 tmp = build1 (NOP_EXPR, type, names[i]);
1967 }
1968 if (minv != 0)
1969 {
1970 tmp = build2 (PLUS_EXPR, type, tmp,
1971 wide_int_to_tree (type, -minv));
1972 maxv2 = maxv - minv;
1973 }
1974 new_val = wide_int_to_tree (type, maxv2);
1975 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
1976 }
1977 }
1978 }
1979 }
1980 }
1981
1982 /* OP is an operand of a truth value expression which is known to have
1983 a particular value. Register any asserts for OP and for any
1984 operands in OP's defining statement.
1985
1986 If CODE is EQ_EXPR, then we want to register OP is zero (false),
1987 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
1988
1989 static void
1990 register_edge_assert_for_1 (tree op, enum tree_code code,
1991 edge e, vec<assert_info> &asserts)
1992 {
1993 gimple *op_def;
1994 tree val;
1995 enum tree_code rhs_code;
1996
1997 /* We only care about SSA_NAMEs. */
1998 if (TREE_CODE (op) != SSA_NAME)
1999 return;
2000
2001 /* We know that OP will have a zero or nonzero value. */
2002 val = build_int_cst (TREE_TYPE (op), 0);
2003 add_assert_info (asserts, op, op, code, val);
2004
2005 /* Now look at how OP is set. If it's set from a comparison,
2006 a truth operation or some bit operations, then we may be able
2007 to register information about the operands of that assignment. */
2008 op_def = SSA_NAME_DEF_STMT (op);
2009 if (gimple_code (op_def) != GIMPLE_ASSIGN)
2010 return;
2011
2012 rhs_code = gimple_assign_rhs_code (op_def);
2013
2014 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2015 {
2016 bool invert = (code == EQ_EXPR ? true : false);
2017 tree op0 = gimple_assign_rhs1 (op_def);
2018 tree op1 = gimple_assign_rhs2 (op_def);
2019
2020 if (TREE_CODE (op0) == SSA_NAME)
2021 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2022 if (TREE_CODE (op1) == SSA_NAME)
2023 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2024 }
2025 else if ((code == NE_EXPR
2026 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2027 || (code == EQ_EXPR
2028 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2029 {
2030 /* Recurse on each operand. */
2031 tree op0 = gimple_assign_rhs1 (op_def);
2032 tree op1 = gimple_assign_rhs2 (op_def);
2033 if (TREE_CODE (op0) == SSA_NAME
2034 && has_single_use (op0))
2035 register_edge_assert_for_1 (op0, code, e, asserts);
2036 if (TREE_CODE (op1) == SSA_NAME
2037 && has_single_use (op1))
2038 register_edge_assert_for_1 (op1, code, e, asserts);
2039 }
2040 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2041 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2042 {
2043 /* Recurse, flipping CODE. */
2044 code = invert_tree_comparison (code, false);
2045 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2046 }
2047 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2048 {
2049 /* Recurse through the copy. */
2050 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2051 }
2052 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2053 {
2054 /* Recurse through the type conversion, unless it is a narrowing
2055 conversion or conversion from non-integral type. */
2056 tree rhs = gimple_assign_rhs1 (op_def);
2057 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2058 && (TYPE_PRECISION (TREE_TYPE (rhs))
2059 <= TYPE_PRECISION (TREE_TYPE (op))))
2060 register_edge_assert_for_1 (rhs, code, e, asserts);
2061 }
2062 }
2063
2064 /* Check if comparison
2065 NAME COND_OP INTEGER_CST
2066 has a form of
2067 (X & 11...100..0) COND_OP XX...X00...0
2068 Such comparison can yield assertions like
2069 X >= XX...X00...0
2070 X <= XX...X11...1
2071 in case of COND_OP being EQ_EXPR or
2072 X < XX...X00...0
2073 X > XX...X11...1
2074 in case of NE_EXPR. */
2075
2076 static bool
2077 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2078 tree *new_name, tree *low, enum tree_code *low_code,
2079 tree *high, enum tree_code *high_code)
2080 {
2081 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2082
2083 if (!is_gimple_assign (def_stmt)
2084 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2085 return false;
2086
2087 tree t = gimple_assign_rhs1 (def_stmt);
2088 tree maskt = gimple_assign_rhs2 (def_stmt);
2089 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2090 return false;
2091
2092 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2093 wide_int inv_mask = ~mask;
2094 /* Must have been removed by now so don't bother optimizing. */
2095 if (mask == 0 || inv_mask == 0)
2096 return false;
2097
2098 /* Assume VALT is INTEGER_CST. */
2099 wi::tree_to_wide_ref val = wi::to_wide (valt);
2100
2101 if ((inv_mask & (inv_mask + 1)) != 0
2102 || (val & mask) != val)
2103 return false;
2104
2105 bool is_range = cond_code == EQ_EXPR;
2106
2107 tree type = TREE_TYPE (t);
2108 wide_int min = wi::min_value (type),
2109 max = wi::max_value (type);
2110
2111 if (is_range)
2112 {
2113 *low_code = val == min ? ERROR_MARK : GE_EXPR;
2114 *high_code = val == max ? ERROR_MARK : LE_EXPR;
2115 }
2116 else
2117 {
2118 /* We can still generate assertion if one of alternatives
2119 is known to always be false. */
2120 if (val == min)
2121 {
2122 *low_code = (enum tree_code) 0;
2123 *high_code = GT_EXPR;
2124 }
2125 else if ((val | inv_mask) == max)
2126 {
2127 *low_code = LT_EXPR;
2128 *high_code = (enum tree_code) 0;
2129 }
2130 else
2131 return false;
2132 }
2133
2134 *new_name = t;
2135 *low = wide_int_to_tree (type, val);
2136 *high = wide_int_to_tree (type, val | inv_mask);
2137
2138 return true;
2139 }
2140
2141 /* Try to register an edge assertion for SSA name NAME on edge E for
2142 the condition COND contributing to the conditional jump pointed to by
2143 SI. */
2144
2145 void
2146 register_edge_assert_for (tree name, edge e,
2147 enum tree_code cond_code, tree cond_op0,
2148 tree cond_op1, vec<assert_info> &asserts)
2149 {
2150 tree val;
2151 enum tree_code comp_code;
2152 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2153
2154 /* Do not attempt to infer anything in names that flow through
2155 abnormal edges. */
2156 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2157 return;
2158
2159 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2160 cond_op0, cond_op1,
2161 is_else_edge,
2162 &comp_code, &val))
2163 return;
2164
2165 /* Register ASSERT_EXPRs for name. */
2166 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2167 cond_op1, is_else_edge, asserts);
2168
2169
2170 /* If COND is effectively an equality test of an SSA_NAME against
2171 the value zero or one, then we may be able to assert values
2172 for SSA_NAMEs which flow into COND. */
2173
2174 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2175 statement of NAME we can assert both operands of the BIT_AND_EXPR
2176 have nonzero value. */
2177 if (((comp_code == EQ_EXPR && integer_onep (val))
2178 || (comp_code == NE_EXPR && integer_zerop (val))))
2179 {
2180 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2181
2182 if (is_gimple_assign (def_stmt)
2183 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2184 {
2185 tree op0 = gimple_assign_rhs1 (def_stmt);
2186 tree op1 = gimple_assign_rhs2 (def_stmt);
2187 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2188 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2189 }
2190 }
2191
2192 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2193 statement of NAME we can assert both operands of the BIT_IOR_EXPR
2194 have zero value. */
2195 if (((comp_code == EQ_EXPR && integer_zerop (val))
2196 || (comp_code == NE_EXPR && integer_onep (val))))
2197 {
2198 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2199
2200 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2201 necessarily zero value, or if type-precision is one. */
2202 if (is_gimple_assign (def_stmt)
2203 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
2204 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
2205 || comp_code == EQ_EXPR)))
2206 {
2207 tree op0 = gimple_assign_rhs1 (def_stmt);
2208 tree op1 = gimple_assign_rhs2 (def_stmt);
2209 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2210 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2211 }
2212 }
2213
2214 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
2215 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2216 && TREE_CODE (val) == INTEGER_CST)
2217 {
2218 enum tree_code low_code, high_code;
2219 tree low, high;
2220 if (is_masked_range_test (name, val, comp_code, &name, &low,
2221 &low_code, &high, &high_code))
2222 {
2223 if (low_code != ERROR_MARK)
2224 register_edge_assert_for_2 (name, e, low_code, name,
2225 low, /*invert*/false, asserts);
2226 if (high_code != ERROR_MARK)
2227 register_edge_assert_for_2 (name, e, high_code, name,
2228 high, /*invert*/false, asserts);
2229 }
2230 }
2231 }
2232
2233 /* Handle
2234 _4 = x_3 & 31;
2235 if (_4 != 0)
2236 goto <bb 6>;
2237 else
2238 goto <bb 7>;
2239 <bb 6>:
2240 __builtin_unreachable ();
2241 <bb 7>:
2242 x_5 = ASSERT_EXPR <x_3, ...>;
2243 If x_3 has no other immediate uses (checked by caller),
2244 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
2245 from the non-zero bitmask. */
2246
2247 void
2248 maybe_set_nonzero_bits (edge e, tree var)
2249 {
2250 basic_block cond_bb = e->src;
2251 gimple *stmt = last_stmt (cond_bb);
2252 tree cst;
2253
2254 if (stmt == NULL
2255 || gimple_code (stmt) != GIMPLE_COND
2256 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
2257 ? EQ_EXPR : NE_EXPR)
2258 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
2259 || !integer_zerop (gimple_cond_rhs (stmt)))
2260 return;
2261
2262 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2263 if (!is_gimple_assign (stmt)
2264 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
2265 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
2266 return;
2267 if (gimple_assign_rhs1 (stmt) != var)
2268 {
2269 gimple *stmt2;
2270
2271 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
2272 return;
2273 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2274 if (!gimple_assign_cast_p (stmt2)
2275 || gimple_assign_rhs1 (stmt2) != var
2276 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
2277 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
2278 != TYPE_PRECISION (TREE_TYPE (var))))
2279 return;
2280 }
2281 cst = gimple_assign_rhs2 (stmt);
2282 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
2283 wi::to_wide (cst)));
2284 }
2285
2286 /* Return true if STMT is interesting for VRP. */
2287
2288 bool
2289 stmt_interesting_for_vrp (gimple *stmt)
2290 {
2291 if (gimple_code (stmt) == GIMPLE_PHI)
2292 {
2293 tree res = gimple_phi_result (stmt);
2294 return (!virtual_operand_p (res)
2295 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
2296 || POINTER_TYPE_P (TREE_TYPE (res))));
2297 }
2298 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
2299 {
2300 tree lhs = gimple_get_lhs (stmt);
2301
2302 /* In general, assignments with virtual operands are not useful
2303 for deriving ranges, with the obvious exception of calls to
2304 builtin functions. */
2305 if (lhs && TREE_CODE (lhs) == SSA_NAME
2306 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2307 || POINTER_TYPE_P (TREE_TYPE (lhs)))
2308 && (is_gimple_call (stmt)
2309 || !gimple_vuse (stmt)))
2310 return true;
2311 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
2312 switch (gimple_call_internal_fn (stmt))
2313 {
2314 case IFN_ADD_OVERFLOW:
2315 case IFN_SUB_OVERFLOW:
2316 case IFN_MUL_OVERFLOW:
2317 case IFN_ATOMIC_COMPARE_EXCHANGE:
2318 /* These internal calls return _Complex integer type,
2319 but are interesting to VRP nevertheless. */
2320 if (lhs && TREE_CODE (lhs) == SSA_NAME)
2321 return true;
2322 break;
2323 default:
2324 break;
2325 }
2326 }
2327 else if (gimple_code (stmt) == GIMPLE_COND
2328 || gimple_code (stmt) == GIMPLE_SWITCH)
2329 return true;
2330
2331 return false;
2332 }
2333
2334
2335 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
2336 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
2337 BB. If no such ASSERT_EXPR is found, return OP. */
2338
2339 static tree
2340 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
2341 {
2342 imm_use_iterator imm_iter;
2343 gimple *use_stmt;
2344 use_operand_p use_p;
2345
2346 if (TREE_CODE (op) == SSA_NAME)
2347 {
2348 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
2349 {
2350 use_stmt = USE_STMT (use_p);
2351 if (use_stmt != stmt
2352 && gimple_assign_single_p (use_stmt)
2353 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
2354 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
2355 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
2356 return gimple_assign_lhs (use_stmt);
2357 }
2358 }
2359 return op;
2360 }
2361
2362 /* A hack. */
2363 static class vr_values *x_vr_values;
2364
2365 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
2366 that includes the value VAL. The search is restricted to the range
2367 [START_IDX, n - 1] where n is the size of VEC.
2368
2369 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
2370 returned.
2371
2372 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
2373 it is placed in IDX and false is returned.
2374
2375 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
2376 returned. */
2377
2378 bool
2379 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
2380 {
2381 size_t n = gimple_switch_num_labels (stmt);
2382 size_t low, high;
2383
2384 /* Find case label for minimum of the value range or the next one.
2385 At each iteration we are searching in [low, high - 1]. */
2386
2387 for (low = start_idx, high = n; high != low; )
2388 {
2389 tree t;
2390 int cmp;
2391 /* Note that i != high, so we never ask for n. */
2392 size_t i = (high + low) / 2;
2393 t = gimple_switch_label (stmt, i);
2394
2395 /* Cache the result of comparing CASE_LOW and val. */
2396 cmp = tree_int_cst_compare (CASE_LOW (t), val);
2397
2398 if (cmp == 0)
2399 {
2400 /* Ranges cannot be empty. */
2401 *idx = i;
2402 return true;
2403 }
2404 else if (cmp > 0)
2405 high = i;
2406 else
2407 {
2408 low = i + 1;
2409 if (CASE_HIGH (t) != NULL
2410 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2411 {
2412 *idx = i;
2413 return true;
2414 }
2415 }
2416 }
2417
2418 *idx = high;
2419 return false;
2420 }
2421
2422 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
2423 for values between MIN and MAX. The first index is placed in MIN_IDX. The
2424 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
2425 then MAX_IDX < MIN_IDX.
2426 Returns true if the default label is not needed. */
2427
2428 bool
2429 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
2430 size_t *max_idx)
2431 {
2432 size_t i, j;
2433 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
2434 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
2435
2436 if (i == j
2437 && min_take_default
2438 && max_take_default)
2439 {
2440 /* Only the default case label reached.
2441 Return an empty range. */
2442 *min_idx = 1;
2443 *max_idx = 0;
2444 return false;
2445 }
2446 else
2447 {
2448 bool take_default = min_take_default || max_take_default;
2449 tree low, high;
2450 size_t k;
2451
2452 if (max_take_default)
2453 j--;
2454
2455 /* If the case label range is continuous, we do not need
2456 the default case label. Verify that. */
2457 high = CASE_LOW (gimple_switch_label (stmt, i));
2458 if (CASE_HIGH (gimple_switch_label (stmt, i)))
2459 high = CASE_HIGH (gimple_switch_label (stmt, i));
2460 for (k = i + 1; k <= j; ++k)
2461 {
2462 low = CASE_LOW (gimple_switch_label (stmt, k));
2463 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
2464 {
2465 take_default = true;
2466 break;
2467 }
2468 high = low;
2469 if (CASE_HIGH (gimple_switch_label (stmt, k)))
2470 high = CASE_HIGH (gimple_switch_label (stmt, k));
2471 }
2472
2473 *min_idx = i;
2474 *max_idx = j;
2475 return !take_default;
2476 }
2477 }
2478
2479 /* Given a SWITCH_STMT, return the case label that encompasses the
2480 known possible values for the switch operand. RANGE_OF_OP is a
2481 range for the known values of the switch operand. */
2482
2483 tree
2484 find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
2485 {
2486 if (range_of_op->undefined_p ()
2487 || range_of_op->varying_p ()
2488 || range_of_op->symbolic_p ())
2489 return NULL_TREE;
2490
2491 size_t i, j;
2492 tree op = gimple_switch_index (switch_stmt);
2493 tree type = TREE_TYPE (op);
2494 tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
2495 tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
2496 find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
2497 if (i == j)
2498 {
2499 /* Look for exactly one label that encompasses the range of
2500 the operand. */
2501 tree label = gimple_switch_label (switch_stmt, i);
2502 tree case_high
2503 = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
2504 int_range_max label_range (CASE_LOW (label), case_high);
2505 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2506 range_cast (label_range, range_of_op->type ());
2507 label_range.intersect (range_of_op);
2508 if (label_range == *range_of_op)
2509 return label;
2510 }
2511 else if (i > j)
2512 {
2513 /* If there are no labels at all, take the default. */
2514 return gimple_switch_label (switch_stmt, 0);
2515 }
2516 else
2517 {
2518 /* Otherwise, there are various labels that can encompass
2519 the range of operand. In which case, see if the range of
2520 the operand is entirely *outside* the bounds of all the
2521 (non-default) case labels. If so, take the default. */
2522 unsigned n = gimple_switch_num_labels (switch_stmt);
2523 tree min_label = gimple_switch_label (switch_stmt, 1);
2524 tree max_label = gimple_switch_label (switch_stmt, n - 1);
2525 tree case_high = CASE_HIGH (max_label);
2526 if (!case_high)
2527 case_high = CASE_LOW (max_label);
2528 int_range_max label_range (CASE_LOW (min_label), case_high);
2529 if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2530 range_cast (label_range, range_of_op->type ());
2531 label_range.intersect (range_of_op);
2532 if (label_range.undefined_p ())
2533 return gimple_switch_label (switch_stmt, 0);
2534 }
2535 return NULL_TREE;
2536 }
2537
2538 struct case_info
2539 {
2540 tree expr;
2541 basic_block bb;
2542 };
2543
2544 /* Location information for ASSERT_EXPRs. Each instance of this
2545 structure describes an ASSERT_EXPR for an SSA name. Since a single
2546 SSA name may have more than one assertion associated with it, these
2547 locations are kept in a linked list attached to the corresponding
2548 SSA name. */
2549 struct assert_locus
2550 {
2551 /* Basic block where the assertion would be inserted. */
2552 basic_block bb;
2553
2554 /* Some assertions need to be inserted on an edge (e.g., assertions
2555 generated by COND_EXPRs). In those cases, BB will be NULL. */
2556 edge e;
2557
2558 /* Pointer to the statement that generated this assertion. */
2559 gimple_stmt_iterator si;
2560
2561 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
2562 enum tree_code comp_code;
2563
2564 /* Value being compared against. */
2565 tree val;
2566
2567 /* Expression to compare. */
2568 tree expr;
2569
2570 /* Next node in the linked list. */
2571 assert_locus *next;
2572 };
2573
2574 /* Class to traverse the flowgraph looking for conditional jumps to
2575 insert ASSERT_EXPR range expressions. These range expressions are
2576 meant to provide information to optimizations that need to reason
2577 in terms of value ranges. They will not be expanded into RTL. */
2578
2579 class vrp_asserts
2580 {
2581 public:
2582 vrp_asserts (struct function *fn) : fun (fn) { }
2583
2584 void insert_range_assertions ();
2585
2586 /* Convert range assertion expressions into the implied copies and
2587 copy propagate away the copies. */
2588 void remove_range_assertions ();
2589
2590 /* Dump all the registered assertions for all the names to FILE. */
2591 void dump (FILE *);
2592
2593 /* Dump all the registered assertions for NAME to FILE. */
2594 void dump (FILE *file, tree name);
2595
2596 /* Dump all the registered assertions for NAME to stderr. */
2597 void debug (tree name)
2598 {
2599 dump (stderr, name);
2600 }
2601
2602 /* Dump all the registered assertions for all the names to stderr. */
2603 void debug ()
2604 {
2605 dump (stderr);
2606 }
2607
2608 private:
2609 /* Set of SSA names found live during the RPO traversal of the function
2610 for still active basic-blocks. */
2611 live_names live;
2612
2613 /* Function to work on. */
2614 struct function *fun;
2615
2616 /* If bit I is present, it means that SSA name N_i has a list of
2617 assertions that should be inserted in the IL. */
2618 bitmap need_assert_for;
2619
2620 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
2621 holds a list of ASSERT_LOCUS_T nodes that describe where
2622 ASSERT_EXPRs for SSA name N_I should be inserted. */
2623 assert_locus **asserts_for;
2624
2625 /* Finish found ASSERTS for E and register them at GSI. */
2626 void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2627 vec<assert_info> &asserts);
2628
2629 /* Determine whether the outgoing edges of BB should receive an
2630 ASSERT_EXPR for each of the operands of BB's LAST statement. The
2631 last statement of BB must be a SWITCH_EXPR.
2632
2633 If any of the sub-graphs rooted at BB have an interesting use of
2634 the predicate operands, an assert location node is added to the
2635 list of assertions for the corresponding operands. */
2636 void find_switch_asserts (basic_block bb, gswitch *last);
2637
2638 /* Do an RPO walk over the function computing SSA name liveness
2639 on-the-fly and deciding on assert expressions to insert. */
2640 void find_assert_locations ();
2641
2642 /* Traverse all the statements in block BB looking for statements that
2643 may generate useful assertions for the SSA names in their operand.
2644 See method implementation comentary for more information. */
2645 void find_assert_locations_in_bb (basic_block bb);
2646
2647 /* Determine whether the outgoing edges of BB should receive an
2648 ASSERT_EXPR for each of the operands of BB's LAST statement.
2649 The last statement of BB must be a COND_EXPR.
2650
2651 If any of the sub-graphs rooted at BB have an interesting use of
2652 the predicate operands, an assert location node is added to the
2653 list of assertions for the corresponding operands. */
2654 void find_conditional_asserts (basic_block bb, gcond *last);
2655
2656 /* Process all the insertions registered for every name N_i registered
2657 in NEED_ASSERT_FOR. The list of assertions to be inserted are
2658 found in ASSERTS_FOR[i]. */
2659 void process_assert_insertions ();
2660
2661 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2662 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2663 E->DEST, then register this location as a possible insertion point
2664 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2665
2666 BB, E and SI provide the exact insertion point for the new
2667 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2668 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2669 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2670 must not be NULL. */
2671 void register_new_assert_for (tree name, tree expr,
2672 enum tree_code comp_code,
2673 tree val, basic_block bb,
2674 edge e, gimple_stmt_iterator si);
2675
2676 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2677 create a new SSA name N and return the assertion assignment
2678 'N = ASSERT_EXPR <V, V OP W>'. */
2679 gimple *build_assert_expr_for (tree cond, tree v);
2680
2681 /* Create an ASSERT_EXPR for NAME and insert it in the location
2682 indicated by LOC. Return true if we made any edge insertions. */
2683 bool process_assert_insertions_for (tree name, assert_locus *loc);
2684
2685 /* Qsort callback for sorting assert locations. */
2686 template <bool stable> static int compare_assert_loc (const void *,
2687 const void *);
2688
2689 /* Return false if EXPR is a predicate expression involving floating
2690 point values. */
2691 bool fp_predicate (gimple *stmt)
2692 {
2693 GIMPLE_CHECK (stmt, GIMPLE_COND);
2694 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2695 }
2696
2697 bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
2698 basic_block cond_bb);
2699
2700 static int compare_case_labels (const void *, const void *);
2701 };
2702
2703 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2704 create a new SSA name N and return the assertion assignment
2705 'N = ASSERT_EXPR <V, V OP W>'. */
2706
2707 gimple *
2708 vrp_asserts::build_assert_expr_for (tree cond, tree v)
2709 {
2710 tree a;
2711 gassign *assertion;
2712
2713 gcc_assert (TREE_CODE (v) == SSA_NAME
2714 && COMPARISON_CLASS_P (cond));
2715
2716 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2717 assertion = gimple_build_assign (NULL_TREE, a);
2718
2719 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2720 operand of the ASSERT_EXPR. Create it so the new name and the old one
2721 are registered in the replacement table so that we can fix the SSA web
2722 after adding all the ASSERT_EXPRs. */
2723 tree new_def = create_new_def_for (v, assertion, NULL);
2724 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2725 given we have to be able to fully propagate those out to re-create
2726 valid SSA when removing the asserts. */
2727 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2728 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2729
2730 return assertion;
2731 }
2732
2733 /* Dump all the registered assertions for NAME to FILE. */
2734
2735 void
2736 vrp_asserts::dump (FILE *file, tree name)
2737 {
2738 assert_locus *loc;
2739
2740 fprintf (file, "Assertions to be inserted for ");
2741 print_generic_expr (file, name);
2742 fprintf (file, "\n");
2743
2744 loc = asserts_for[SSA_NAME_VERSION (name)];
2745 while (loc)
2746 {
2747 fprintf (file, "\t");
2748 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2749 fprintf (file, "\n\tBB #%d", loc->bb->index);
2750 if (loc->e)
2751 {
2752 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2753 loc->e->dest->index);
2754 dump_edge_info (file, loc->e, dump_flags, 0);
2755 }
2756 fprintf (file, "\n\tPREDICATE: ");
2757 print_generic_expr (file, loc->expr);
2758 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2759 print_generic_expr (file, loc->val);
2760 fprintf (file, "\n\n");
2761 loc = loc->next;
2762 }
2763
2764 fprintf (file, "\n");
2765 }
2766
2767 /* Dump all the registered assertions for all the names to FILE. */
2768
2769 void
2770 vrp_asserts::dump (FILE *file)
2771 {
2772 unsigned i;
2773 bitmap_iterator bi;
2774
2775 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2776 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2777 dump (file, ssa_name (i));
2778 fprintf (file, "\n");
2779 }
2780
2781 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2782 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2783 E->DEST, then register this location as a possible insertion point
2784 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2785
2786 BB, E and SI provide the exact insertion point for the new
2787 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2788 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2789 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2790 must not be NULL. */
2791
2792 void
2793 vrp_asserts::register_new_assert_for (tree name, tree expr,
2794 enum tree_code comp_code,
2795 tree val,
2796 basic_block bb,
2797 edge e,
2798 gimple_stmt_iterator si)
2799 {
2800 assert_locus *n, *loc, *last_loc;
2801 basic_block dest_bb;
2802
2803 gcc_checking_assert (bb == NULL || e == NULL);
2804
2805 if (e == NULL)
2806 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2807 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2808
2809 /* Never build an assert comparing against an integer constant with
2810 TREE_OVERFLOW set. This confuses our undefined overflow warning
2811 machinery. */
2812 if (TREE_OVERFLOW_P (val))
2813 val = drop_tree_overflow (val);
2814
2815 /* The new assertion A will be inserted at BB or E. We need to
2816 determine if the new location is dominated by a previously
2817 registered location for A. If we are doing an edge insertion,
2818 assume that A will be inserted at E->DEST. Note that this is not
2819 necessarily true.
2820
2821 If E is a critical edge, it will be split. But even if E is
2822 split, the new block will dominate the same set of blocks that
2823 E->DEST dominates.
2824
2825 The reverse, however, is not true, blocks dominated by E->DEST
2826 will not be dominated by the new block created to split E. So,
2827 if the insertion location is on a critical edge, we will not use
2828 the new location to move another assertion previously registered
2829 at a block dominated by E->DEST. */
2830 dest_bb = (bb) ? bb : e->dest;
2831
2832 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2833 VAL at a block dominating DEST_BB, then we don't need to insert a new
2834 one. Similarly, if the same assertion already exists at a block
2835 dominated by DEST_BB and the new location is not on a critical
2836 edge, then update the existing location for the assertion (i.e.,
2837 move the assertion up in the dominance tree).
2838
2839 Note, this is implemented as a simple linked list because there
2840 should not be more than a handful of assertions registered per
2841 name. If this becomes a performance problem, a table hashed by
2842 COMP_CODE and VAL could be implemented. */
2843 loc = asserts_for[SSA_NAME_VERSION (name)];
2844 last_loc = loc;
2845 while (loc)
2846 {
2847 if (loc->comp_code == comp_code
2848 && (loc->val == val
2849 || operand_equal_p (loc->val, val, 0))
2850 && (loc->expr == expr
2851 || operand_equal_p (loc->expr, expr, 0)))
2852 {
2853 /* If E is not a critical edge and DEST_BB
2854 dominates the existing location for the assertion, move
2855 the assertion up in the dominance tree by updating its
2856 location information. */
2857 if ((e == NULL || !EDGE_CRITICAL_P (e))
2858 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2859 {
2860 loc->bb = dest_bb;
2861 loc->e = e;
2862 loc->si = si;
2863 return;
2864 }
2865 }
2866
2867 /* Update the last node of the list and move to the next one. */
2868 last_loc = loc;
2869 loc = loc->next;
2870 }
2871
2872 /* If we didn't find an assertion already registered for
2873 NAME COMP_CODE VAL, add a new one at the end of the list of
2874 assertions associated with NAME. */
2875 n = XNEW (struct assert_locus);
2876 n->bb = dest_bb;
2877 n->e = e;
2878 n->si = si;
2879 n->comp_code = comp_code;
2880 n->val = val;
2881 n->expr = expr;
2882 n->next = NULL;
2883
2884 if (last_loc)
2885 last_loc->next = n;
2886 else
2887 asserts_for[SSA_NAME_VERSION (name)] = n;
2888
2889 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2890 }
2891
2892 /* Finish found ASSERTS for E and register them at GSI. */
2893
2894 void
2895 vrp_asserts::finish_register_edge_assert_for (edge e,
2896 gimple_stmt_iterator gsi,
2897 vec<assert_info> &asserts)
2898 {
2899 for (unsigned i = 0; i < asserts.length (); ++i)
2900 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2901 reachable from E. */
2902 if (live.live_on_edge_p (asserts[i].name, e))
2903 register_new_assert_for (asserts[i].name, asserts[i].expr,
2904 asserts[i].comp_code, asserts[i].val,
2905 NULL, e, gsi);
2906 }
2907
2908 /* Determine whether the outgoing edges of BB should receive an
2909 ASSERT_EXPR for each of the operands of BB's LAST statement.
2910 The last statement of BB must be a COND_EXPR.
2911
2912 If any of the sub-graphs rooted at BB have an interesting use of
2913 the predicate operands, an assert location node is added to the
2914 list of assertions for the corresponding operands. */
2915
2916 void
2917 vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
2918 {
2919 gimple_stmt_iterator bsi;
2920 tree op;
2921 edge_iterator ei;
2922 edge e;
2923 ssa_op_iter iter;
2924
2925 bsi = gsi_for_stmt (last);
2926
2927 /* Look for uses of the operands in each of the sub-graphs
2928 rooted at BB. We need to check each of the outgoing edges
2929 separately, so that we know what kind of ASSERT_EXPR to
2930 insert. */
2931 FOR_EACH_EDGE (e, ei, bb->succs)
2932 {
2933 if (e->dest == bb)
2934 continue;
2935
2936 /* Register the necessary assertions for each operand in the
2937 conditional predicate. */
2938 auto_vec<assert_info, 8> asserts;
2939 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2940 register_edge_assert_for (op, e,
2941 gimple_cond_code (last),
2942 gimple_cond_lhs (last),
2943 gimple_cond_rhs (last), asserts);
2944 finish_register_edge_assert_for (e, bsi, asserts);
2945 }
2946 }
2947
2948 /* Compare two case labels sorting first by the destination bb index
2949 and then by the case value. */
2950
2951 int
2952 vrp_asserts::compare_case_labels (const void *p1, const void *p2)
2953 {
2954 const struct case_info *ci1 = (const struct case_info *) p1;
2955 const struct case_info *ci2 = (const struct case_info *) p2;
2956 int idx1 = ci1->bb->index;
2957 int idx2 = ci2->bb->index;
2958
2959 if (idx1 < idx2)
2960 return -1;
2961 else if (idx1 == idx2)
2962 {
2963 /* Make sure the default label is first in a group. */
2964 if (!CASE_LOW (ci1->expr))
2965 return -1;
2966 else if (!CASE_LOW (ci2->expr))
2967 return 1;
2968 else
2969 return tree_int_cst_compare (CASE_LOW (ci1->expr),
2970 CASE_LOW (ci2->expr));
2971 }
2972 else
2973 return 1;
2974 }
2975
2976 /* Determine whether the outgoing edges of BB should receive an
2977 ASSERT_EXPR for each of the operands of BB's LAST statement.
2978 The last statement of BB must be a SWITCH_EXPR.
2979
2980 If any of the sub-graphs rooted at BB have an interesting use of
2981 the predicate operands, an assert location node is added to the
2982 list of assertions for the corresponding operands. */
2983
2984 void
2985 vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
2986 {
2987 gimple_stmt_iterator bsi;
2988 tree op;
2989 edge e;
2990 struct case_info *ci;
2991 size_t n = gimple_switch_num_labels (last);
2992 #if GCC_VERSION >= 4000
2993 unsigned int idx;
2994 #else
2995 /* Work around GCC 3.4 bug (PR 37086). */
2996 volatile unsigned int idx;
2997 #endif
2998
2999 bsi = gsi_for_stmt (last);
3000 op = gimple_switch_index (last);
3001 if (TREE_CODE (op) != SSA_NAME)
3002 return;
3003
3004 /* Build a vector of case labels sorted by destination label. */
3005 ci = XNEWVEC (struct case_info, n);
3006 for (idx = 0; idx < n; ++idx)
3007 {
3008 ci[idx].expr = gimple_switch_label (last, idx);
3009 ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
3010 }
3011 edge default_edge = find_edge (bb, ci[0].bb);
3012 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3013
3014 for (idx = 0; idx < n; ++idx)
3015 {
3016 tree min, max;
3017 tree cl = ci[idx].expr;
3018 basic_block cbb = ci[idx].bb;
3019
3020 min = CASE_LOW (cl);
3021 max = CASE_HIGH (cl);
3022
3023 /* If there are multiple case labels with the same destination
3024 we need to combine them to a single value range for the edge. */
3025 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3026 {
3027 /* Skip labels until the last of the group. */
3028 do {
3029 ++idx;
3030 } while (idx < n && cbb == ci[idx].bb);
3031 --idx;
3032
3033 /* Pick up the maximum of the case label range. */
3034 if (CASE_HIGH (ci[idx].expr))
3035 max = CASE_HIGH (ci[idx].expr);
3036 else
3037 max = CASE_LOW (ci[idx].expr);
3038 }
3039
3040 /* Can't extract a useful assertion out of a range that includes the
3041 default label. */
3042 if (min == NULL_TREE)
3043 continue;
3044
3045 /* Find the edge to register the assert expr on. */
3046 e = find_edge (bb, cbb);
3047
3048 /* Register the necessary assertions for the operand in the
3049 SWITCH_EXPR. */
3050 auto_vec<assert_info, 8> asserts;
3051 register_edge_assert_for (op, e,
3052 max ? GE_EXPR : EQ_EXPR,
3053 op, fold_convert (TREE_TYPE (op), min),
3054 asserts);
3055 if (max)
3056 register_edge_assert_for (op, e, LE_EXPR, op,
3057 fold_convert (TREE_TYPE (op), max),
3058 asserts);
3059 finish_register_edge_assert_for (e, bsi, asserts);
3060 }
3061
3062 XDELETEVEC (ci);
3063
3064 if (!live.live_on_edge_p (op, default_edge))
3065 return;
3066
3067 /* Now register along the default label assertions that correspond to the
3068 anti-range of each label. */
3069 int insertion_limit = param_max_vrp_switch_assertions;
3070 if (insertion_limit == 0)
3071 return;
3072
3073 /* We can't do this if the default case shares a label with another case. */
3074 tree default_cl = gimple_switch_default_label (last);
3075 for (idx = 1; idx < n; idx++)
3076 {
3077 tree min, max;
3078 tree cl = gimple_switch_label (last, idx);
3079 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3080 continue;
3081
3082 min = CASE_LOW (cl);
3083 max = CASE_HIGH (cl);
3084
3085 /* Combine contiguous case ranges to reduce the number of assertions
3086 to insert. */
3087 for (idx = idx + 1; idx < n; idx++)
3088 {
3089 tree next_min, next_max;
3090 tree next_cl = gimple_switch_label (last, idx);
3091 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3092 break;
3093
3094 next_min = CASE_LOW (next_cl);
3095 next_max = CASE_HIGH (next_cl);
3096
3097 wide_int difference = (wi::to_wide (next_min)
3098 - wi::to_wide (max ? max : min));
3099 if (wi::eq_p (difference, 1))
3100 max = next_max ? next_max : next_min;
3101 else
3102 break;
3103 }
3104 idx--;
3105
3106 if (max == NULL_TREE)
3107 {
3108 /* Register the assertion OP != MIN. */
3109 auto_vec<assert_info, 8> asserts;
3110 min = fold_convert (TREE_TYPE (op), min);
3111 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3112 asserts);
3113 finish_register_edge_assert_for (default_edge, bsi, asserts);
3114 }
3115 else
3116 {
3117 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3118 which will give OP the anti-range ~[MIN,MAX]. */
3119 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3120 min = fold_convert (TREE_TYPE (uop), min);
3121 max = fold_convert (TREE_TYPE (uop), max);
3122
3123 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3124 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3125 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3126 NULL, default_edge, bsi);
3127 }
3128
3129 if (--insertion_limit == 0)
3130 break;
3131 }
3132 }
3133
3134 /* Traverse all the statements in block BB looking for statements that
3135 may generate useful assertions for the SSA names in their operand.
3136 If a statement produces a useful assertion A for name N_i, then the
3137 list of assertions already generated for N_i is scanned to
3138 determine if A is actually needed.
3139
3140 If N_i already had the assertion A at a location dominating the
3141 current location, then nothing needs to be done. Otherwise, the
3142 new location for A is recorded instead.
3143
3144 1- For every statement S in BB, all the variables used by S are
3145 added to bitmap FOUND_IN_SUBGRAPH.
3146
3147 2- If statement S uses an operand N in a way that exposes a known
3148 value range for N, then if N was not already generated by an
3149 ASSERT_EXPR, create a new assert location for N. For instance,
3150 if N is a pointer and the statement dereferences it, we can
3151 assume that N is not NULL.
3152
3153 3- COND_EXPRs are a special case of #2. We can derive range
3154 information from the predicate but need to insert different
3155 ASSERT_EXPRs for each of the sub-graphs rooted at the
3156 conditional block. If the last statement of BB is a conditional
3157 expression of the form 'X op Y', then
3158
3159 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3160
3161 b) If the conditional is the only entry point to the sub-graph
3162 corresponding to the THEN_CLAUSE, recurse into it. On
3163 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3164 an ASSERT_EXPR is added for the corresponding variable.
3165
3166 c) Repeat step (b) on the ELSE_CLAUSE.
3167
3168 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3169
3170 For instance,
3171
3172 if (a == 9)
3173 b = a;
3174 else
3175 b = c + 1;
3176
3177 In this case, an assertion on the THEN clause is useful to
3178 determine that 'a' is always 9 on that edge. However, an assertion
3179 on the ELSE clause would be unnecessary.
3180
3181 4- If BB does not end in a conditional expression, then we recurse
3182 into BB's dominator children.
3183
3184 At the end of the recursive traversal, every SSA name will have a
3185 list of locations where ASSERT_EXPRs should be added. When a new
3186 location for name N is found, it is registered by calling
3187 register_new_assert_for. That function keeps track of all the
3188 registered assertions to prevent adding unnecessary assertions.
3189 For instance, if a pointer P_4 is dereferenced more than once in a
3190 dominator tree, only the location dominating all the dereference of
3191 P_4 will receive an ASSERT_EXPR. */
3192
3193 void
3194 vrp_asserts::find_assert_locations_in_bb (basic_block bb)
3195 {
3196 gimple *last;
3197
3198 last = last_stmt (bb);
3199
3200 /* If BB's last statement is a conditional statement involving integer
3201 operands, determine if we need to add ASSERT_EXPRs. */
3202 if (last
3203 && gimple_code (last) == GIMPLE_COND
3204 && !fp_predicate (last)
3205 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3206 find_conditional_asserts (bb, as_a <gcond *> (last));
3207
3208 /* If BB's last statement is a switch statement involving integer
3209 operands, determine if we need to add ASSERT_EXPRs. */
3210 if (last
3211 && gimple_code (last) == GIMPLE_SWITCH
3212 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3213 find_switch_asserts (bb, as_a <gswitch *> (last));
3214
3215 /* Traverse all the statements in BB marking used names and looking
3216 for statements that may infer assertions for their used operands. */
3217 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3218 gsi_prev (&si))
3219 {
3220 gimple *stmt;
3221 tree op;
3222 ssa_op_iter i;
3223
3224 stmt = gsi_stmt (si);
3225
3226 if (is_gimple_debug (stmt))
3227 continue;
3228
3229 /* See if we can derive an assertion for any of STMT's operands. */
3230 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3231 {
3232 tree value;
3233 enum tree_code comp_code;
3234
3235 /* If op is not live beyond this stmt, do not bother to insert
3236 asserts for it. */
3237 if (!live.live_on_block_p (op, bb))
3238 continue;
3239
3240 /* If OP is used in such a way that we can infer a value
3241 range for it, and we don't find a previous assertion for
3242 it, create a new assertion location node for OP. */
3243 if (infer_value_range (stmt, op, &comp_code, &value))
3244 {
3245 /* If we are able to infer a nonzero value range for OP,
3246 then walk backwards through the use-def chain to see if OP
3247 was set via a typecast.
3248
3249 If so, then we can also infer a nonzero value range
3250 for the operand of the NOP_EXPR. */
3251 if (comp_code == NE_EXPR && integer_zerop (value))
3252 {
3253 tree t = op;
3254 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3255
3256 while (is_gimple_assign (def_stmt)
3257 && CONVERT_EXPR_CODE_P
3258 (gimple_assign_rhs_code (def_stmt))
3259 && TREE_CODE
3260 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3261 && POINTER_TYPE_P
3262 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3263 {
3264 t = gimple_assign_rhs1 (def_stmt);
3265 def_stmt = SSA_NAME_DEF_STMT (t);
3266
3267 /* Note we want to register the assert for the
3268 operand of the NOP_EXPR after SI, not after the
3269 conversion. */
3270 if (live.live_on_block_p (t, bb))
3271 register_new_assert_for (t, t, comp_code, value,
3272 bb, NULL, si);
3273 }
3274 }
3275
3276 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3277 }
3278 }
3279
3280 /* Update live. */
3281 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3282 live.set (op, bb);
3283 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3284 live.clear (op, bb);
3285 }
3286
3287 /* Traverse all PHI nodes in BB, updating live. */
3288 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3289 gsi_next (&si))
3290 {
3291 use_operand_p arg_p;
3292 ssa_op_iter i;
3293 gphi *phi = si.phi ();
3294 tree res = gimple_phi_result (phi);
3295
3296 if (virtual_operand_p (res))
3297 continue;
3298
3299 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3300 {
3301 tree arg = USE_FROM_PTR (arg_p);
3302 if (TREE_CODE (arg) == SSA_NAME)
3303 live.set (arg, bb);
3304 }
3305
3306 live.clear (res, bb);
3307 }
3308 }
3309
3310 /* Do an RPO walk over the function computing SSA name liveness
3311 on-the-fly and deciding on assert expressions to insert. */
3312
3313 void
3314 vrp_asserts::find_assert_locations (void)
3315 {
3316 int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3317 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3318 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
3319 int rpo_cnt, i;
3320
3321 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3322 for (i = 0; i < rpo_cnt; ++i)
3323 bb_rpo[rpo[i]] = i;
3324
3325 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3326 the order we compute liveness and insert asserts we otherwise
3327 fail to insert asserts into the loop latch. */
3328 loop_p loop;
3329 FOR_EACH_LOOP (loop, 0)
3330 {
3331 i = loop->latch->index;
3332 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3333 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3334 !gsi_end_p (gsi); gsi_next (&gsi))
3335 {
3336 gphi *phi = gsi.phi ();
3337 if (virtual_operand_p (gimple_phi_result (phi)))
3338 continue;
3339 tree arg = gimple_phi_arg_def (phi, j);
3340 if (TREE_CODE (arg) == SSA_NAME)
3341 live.set (arg, loop->latch);
3342 }
3343 }
3344
3345 for (i = rpo_cnt - 1; i >= 0; --i)
3346 {
3347 basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3348 edge e;
3349 edge_iterator ei;
3350
3351 /* Process BB and update the live information with uses in
3352 this block. */
3353 find_assert_locations_in_bb (bb);
3354
3355 /* Merge liveness into the predecessor blocks and free it. */
3356 if (!live.block_has_live_names_p (bb))
3357 {
3358 int pred_rpo = i;
3359 FOR_EACH_EDGE (e, ei, bb->preds)
3360 {
3361 int pred = e->src->index;
3362 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3363 continue;
3364
3365 live.merge (e->src, bb);
3366
3367 if (bb_rpo[pred] < pred_rpo)
3368 pred_rpo = bb_rpo[pred];
3369 }
3370
3371 /* Record the RPO number of the last visited block that needs
3372 live information from this block. */
3373 last_rpo[rpo[i]] = pred_rpo;
3374 }
3375 else
3376 live.clear_block (bb);
3377
3378 /* We can free all successors live bitmaps if all their
3379 predecessors have been visited already. */
3380 FOR_EACH_EDGE (e, ei, bb->succs)
3381 if (last_rpo[e->dest->index] == i)
3382 live.clear_block (e->dest);
3383 }
3384
3385 XDELETEVEC (rpo);
3386 XDELETEVEC (bb_rpo);
3387 XDELETEVEC (last_rpo);
3388 }
3389
3390 /* Create an ASSERT_EXPR for NAME and insert it in the location
3391 indicated by LOC. Return true if we made any edge insertions. */
3392
3393 bool
3394 vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
3395 {
3396 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3397 gimple *stmt;
3398 tree cond;
3399 gimple *assert_stmt;
3400 edge_iterator ei;
3401 edge e;
3402
3403 /* If we have X <=> X do not insert an assert expr for that. */
3404 if (loc->expr == loc->val)
3405 return false;
3406
3407 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3408 assert_stmt = build_assert_expr_for (cond, name);
3409 if (loc->e)
3410 {
3411 /* We have been asked to insert the assertion on an edge. This
3412 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3413 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3414 || (gimple_code (gsi_stmt (loc->si))
3415 == GIMPLE_SWITCH));
3416
3417 gsi_insert_on_edge (loc->e, assert_stmt);
3418 return true;
3419 }
3420
3421 /* If the stmt iterator points at the end then this is an insertion
3422 at the beginning of a block. */
3423 if (gsi_end_p (loc->si))
3424 {
3425 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3426 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3427 return false;
3428
3429 }
3430 /* Otherwise, we can insert right after LOC->SI iff the
3431 statement must not be the last statement in the block. */
3432 stmt = gsi_stmt (loc->si);
3433 if (!stmt_ends_bb_p (stmt))
3434 {
3435 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3436 return false;
3437 }
3438
3439 /* If STMT must be the last statement in BB, we can only insert new
3440 assertions on the non-abnormal edge out of BB. Note that since
3441 STMT is not control flow, there may only be one non-abnormal/eh edge
3442 out of BB. */
3443 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3444 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3445 {
3446 gsi_insert_on_edge (e, assert_stmt);
3447 return true;
3448 }
3449
3450 gcc_unreachable ();
3451 }
3452
3453 /* Qsort helper for sorting assert locations. If stable is true, don't
3454 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3455 on the other side some pointers might be NULL. */
3456
3457 template <bool stable>
3458 int
3459 vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
3460 {
3461 assert_locus * const a = *(assert_locus * const *)pa;
3462 assert_locus * const b = *(assert_locus * const *)pb;
3463
3464 /* If stable, some asserts might be optimized away already, sort
3465 them last. */
3466 if (stable)
3467 {
3468 if (a == NULL)
3469 return b != NULL;
3470 else if (b == NULL)
3471 return -1;
3472 }
3473
3474 if (a->e == NULL && b->e != NULL)
3475 return 1;
3476 else if (a->e != NULL && b->e == NULL)
3477 return -1;
3478
3479 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3480 no need to test both a->e and b->e. */
3481
3482 /* Sort after destination index. */
3483 if (a->e == NULL)
3484 ;
3485 else if (a->e->dest->index > b->e->dest->index)
3486 return 1;
3487 else if (a->e->dest->index < b->e->dest->index)
3488 return -1;
3489
3490 /* Sort after comp_code. */
3491 if (a->comp_code > b->comp_code)
3492 return 1;
3493 else if (a->comp_code < b->comp_code)
3494 return -1;
3495
3496 hashval_t ha, hb;
3497
3498 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3499 uses DECL_UID of the VAR_DECL, so sorting might differ between
3500 -g and -g0. When doing the removal of redundant assert exprs
3501 and commonization to successors, this does not matter, but for
3502 the final sort needs to be stable. */
3503 if (stable)
3504 {
3505 ha = 0;
3506 hb = 0;
3507 }
3508 else
3509 {
3510 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3511 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3512 }
3513
3514 /* Break the tie using hashing and source/bb index. */
3515 if (ha == hb)
3516 return (a->e != NULL
3517 ? a->e->src->index - b->e->src->index
3518 : a->bb->index - b->bb->index);
3519 return ha > hb ? 1 : -1;
3520 }
3521
3522 /* Process all the insertions registered for every name N_i registered
3523 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3524 found in ASSERTS_FOR[i]. */
3525
3526 void
3527 vrp_asserts::process_assert_insertions ()
3528 {
3529 unsigned i;
3530 bitmap_iterator bi;
3531 bool update_edges_p = false;
3532 int num_asserts = 0;
3533
3534 if (dump_file && (dump_flags & TDF_DETAILS))
3535 dump (dump_file);
3536
3537 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3538 {
3539 assert_locus *loc = asserts_for[i];
3540 gcc_assert (loc);
3541
3542 auto_vec<assert_locus *, 16> asserts;
3543 for (; loc; loc = loc->next)
3544 asserts.safe_push (loc);
3545 asserts.qsort (compare_assert_loc<false>);
3546
3547 /* Push down common asserts to successors and remove redundant ones. */
3548 unsigned ecnt = 0;
3549 assert_locus *common = NULL;
3550 unsigned commonj = 0;
3551 for (unsigned j = 0; j < asserts.length (); ++j)
3552 {
3553 loc = asserts[j];
3554 if (! loc->e)
3555 common = NULL;
3556 else if (! common
3557 || loc->e->dest != common->e->dest
3558 || loc->comp_code != common->comp_code
3559 || ! operand_equal_p (loc->val, common->val, 0)
3560 || ! operand_equal_p (loc->expr, common->expr, 0))
3561 {
3562 commonj = j;
3563 common = loc;
3564 ecnt = 1;
3565 }
3566 else if (loc->e == asserts[j-1]->e)
3567 {
3568 /* Remove duplicate asserts. */
3569 if (commonj == j - 1)
3570 {
3571 commonj = j;
3572 common = loc;
3573 }
3574 free (asserts[j-1]);
3575 asserts[j-1] = NULL;
3576 }
3577 else
3578 {
3579 ecnt++;
3580 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3581 {
3582 /* We have the same assertion on all incoming edges of a BB.
3583 Insert it at the beginning of that block. */
3584 loc->bb = loc->e->dest;
3585 loc->e = NULL;
3586 loc->si = gsi_none ();
3587 common = NULL;
3588 /* Clear asserts commoned. */
3589 for (; commonj != j; ++commonj)
3590 if (asserts[commonj])
3591 {
3592 free (asserts[commonj]);
3593 asserts[commonj] = NULL;
3594 }
3595 }
3596 }
3597 }
3598
3599 /* The asserts vector sorting above might be unstable for
3600 -fcompare-debug, sort again to ensure a stable sort. */
3601 asserts.qsort (compare_assert_loc<true>);
3602 for (unsigned j = 0; j < asserts.length (); ++j)
3603 {
3604 loc = asserts[j];
3605 if (! loc)
3606 break;
3607 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3608 num_asserts++;
3609 free (loc);
3610 }
3611 }
3612
3613 if (update_edges_p)
3614 gsi_commit_edge_inserts ();
3615
3616 statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
3617 num_asserts);
3618 }
3619
3620 /* Traverse the flowgraph looking for conditional jumps to insert range
3621 expressions. These range expressions are meant to provide information
3622 to optimizations that need to reason in terms of value ranges. They
3623 will not be expanded into RTL. For instance, given:
3624
3625 x = ...
3626 y = ...
3627 if (x < y)
3628 y = x - 2;
3629 else
3630 x = y + 3;
3631
3632 this pass will transform the code into:
3633
3634 x = ...
3635 y = ...
3636 if (x < y)
3637 {
3638 x = ASSERT_EXPR <x, x < y>
3639 y = x - 2
3640 }
3641 else
3642 {
3643 y = ASSERT_EXPR <y, x >= y>
3644 x = y + 3
3645 }
3646
3647 The idea is that once copy and constant propagation have run, other
3648 optimizations will be able to determine what ranges of values can 'x'
3649 take in different paths of the code, simply by checking the reaching
3650 definition of 'x'. */
3651
3652 void
3653 vrp_asserts::insert_range_assertions (void)
3654 {
3655 need_assert_for = BITMAP_ALLOC (NULL);
3656 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3657
3658 calculate_dominance_info (CDI_DOMINATORS);
3659
3660 find_assert_locations ();
3661 if (!bitmap_empty_p (need_assert_for))
3662 {
3663 process_assert_insertions ();
3664 update_ssa (TODO_update_ssa_no_phi);
3665 }
3666
3667 if (dump_file && (dump_flags & TDF_DETAILS))
3668 {
3669 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3670 dump_function_to_file (current_function_decl, dump_file, dump_flags);
3671 }
3672
3673 free (asserts_for);
3674 BITMAP_FREE (need_assert_for);
3675 }
3676
3677 /* Return true if all imm uses of VAR are either in STMT, or
3678 feed (optionally through a chain of single imm uses) GIMPLE_COND
3679 in basic block COND_BB. */
3680
3681 bool
3682 vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
3683 gimple *stmt,
3684 basic_block cond_bb)
3685 {
3686 use_operand_p use_p, use2_p;
3687 imm_use_iterator iter;
3688
3689 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3690 if (USE_STMT (use_p) != stmt)
3691 {
3692 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
3693 if (is_gimple_debug (use_stmt))
3694 continue;
3695 while (is_gimple_assign (use_stmt)
3696 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
3697 && single_imm_use (gimple_assign_lhs (use_stmt),
3698 &use2_p, &use_stmt2))
3699 use_stmt = use_stmt2;
3700 if (gimple_code (use_stmt) != GIMPLE_COND
3701 || gimple_bb (use_stmt) != cond_bb)
3702 return false;
3703 }
3704 return true;
3705 }
3706
3707 /* Convert range assertion expressions into the implied copies and
3708 copy propagate away the copies. Doing the trivial copy propagation
3709 here avoids the need to run the full copy propagation pass after
3710 VRP.
3711
3712 FIXME, this will eventually lead to copy propagation removing the
3713 names that had useful range information attached to them. For
3714 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
3715 then N_i will have the range [3, +INF].
3716
3717 However, by converting the assertion into the implied copy
3718 operation N_i = N_j, we will then copy-propagate N_j into the uses
3719 of N_i and lose the range information. We may want to hold on to
3720 ASSERT_EXPRs a little while longer as the ranges could be used in
3721 things like jump threading.
3722
3723 The problem with keeping ASSERT_EXPRs around is that passes after
3724 VRP need to handle them appropriately.
3725
3726 Another approach would be to make the range information a first
3727 class property of the SSA_NAME so that it can be queried from
3728 any pass. This is made somewhat more complex by the need for
3729 multiple ranges to be associated with one SSA_NAME. */
3730
3731 void
3732 vrp_asserts::remove_range_assertions ()
3733 {
3734 basic_block bb;
3735 gimple_stmt_iterator si;
3736 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
3737 a basic block preceeded by GIMPLE_COND branching to it and
3738 __builtin_trap, -1 if not yet checked, 0 otherwise. */
3739 int is_unreachable;
3740
3741 /* Note that the BSI iterator bump happens at the bottom of the
3742 loop and no bump is necessary if we're removing the statement
3743 referenced by the current BSI. */
3744 FOR_EACH_BB_FN (bb, fun)
3745 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
3746 {
3747 gimple *stmt = gsi_stmt (si);
3748
3749 if (is_gimple_assign (stmt)
3750 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
3751 {
3752 tree lhs = gimple_assign_lhs (stmt);
3753 tree rhs = gimple_assign_rhs1 (stmt);
3754 tree var;
3755
3756 var = ASSERT_EXPR_VAR (rhs);
3757
3758 if (TREE_CODE (var) == SSA_NAME
3759 && !POINTER_TYPE_P (TREE_TYPE (lhs))
3760 && SSA_NAME_RANGE_INFO (lhs))
3761 {
3762 if (is_unreachable == -1)
3763 {
3764 is_unreachable = 0;
3765 if (single_pred_p (bb)
3766 && assert_unreachable_fallthru_edge_p
3767 (single_pred_edge (bb)))
3768 is_unreachable = 1;
3769 }
3770 /* Handle
3771 if (x_7 >= 10 && x_7 < 20)
3772 __builtin_unreachable ();
3773 x_8 = ASSERT_EXPR <x_7, ...>;
3774 if the only uses of x_7 are in the ASSERT_EXPR and
3775 in the condition. In that case, we can copy the
3776 range info from x_8 computed in this pass also
3777 for x_7. */
3778 if (is_unreachable
3779 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
3780 single_pred (bb)))
3781 {
3782 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
3783 SSA_NAME_RANGE_INFO (lhs)->get_min (),
3784 SSA_NAME_RANGE_INFO (lhs)->get_max ());
3785 maybe_set_nonzero_bits (single_pred_edge (bb), var);
3786 }
3787 }
3788
3789 /* Propagate the RHS into every use of the LHS. For SSA names
3790 also propagate abnormals as it merely restores the original
3791 IL in this case (an replace_uses_by would assert). */
3792 if (TREE_CODE (var) == SSA_NAME)
3793 {
3794 imm_use_iterator iter;
3795 use_operand_p use_p;
3796 gimple *use_stmt;
3797 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3798 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3799 SET_USE (use_p, var);
3800 }
3801 else
3802 replace_uses_by (lhs, var);
3803
3804 /* And finally, remove the copy, it is not needed. */
3805 gsi_remove (&si, true);
3806 release_defs (stmt);
3807 }
3808 else
3809 {
3810 if (!is_gimple_debug (gsi_stmt (si)))
3811 is_unreachable = 0;
3812 gsi_next (&si);
3813 }
3814 }
3815 }
3816
3817 class vrp_prop : public ssa_propagation_engine
3818 {
3819 public:
3820 vrp_prop (vr_values *v)
3821 : ssa_propagation_engine (),
3822 m_vr_values (v) { }
3823
3824 void initialize (struct function *);
3825 void finalize ();
3826
3827 private:
3828 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
3829 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
3830
3831 struct function *fun;
3832 vr_values *m_vr_values;
3833 };
3834
3835 /* Initialization required by ssa_propagate engine. */
3836
3837 void
3838 vrp_prop::initialize (struct function *fn)
3839 {
3840 basic_block bb;
3841 fun = fn;
3842
3843 FOR_EACH_BB_FN (bb, fun)
3844 {
3845 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3846 gsi_next (&si))
3847 {
3848 gphi *phi = si.phi ();
3849 if (!stmt_interesting_for_vrp (phi))
3850 {
3851 tree lhs = PHI_RESULT (phi);
3852 m_vr_values->set_def_to_varying (lhs);
3853 prop_set_simulate_again (phi, false);
3854 }
3855 else
3856 prop_set_simulate_again (phi, true);
3857 }
3858
3859 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
3860 gsi_next (&si))
3861 {
3862 gimple *stmt = gsi_stmt (si);
3863
3864 /* If the statement is a control insn, then we do not
3865 want to avoid simulating the statement once. Failure
3866 to do so means that those edges will never get added. */
3867 if (stmt_ends_bb_p (stmt))
3868 prop_set_simulate_again (stmt, true);
3869 else if (!stmt_interesting_for_vrp (stmt))
3870 {
3871 m_vr_values->set_defs_to_varying (stmt);
3872 prop_set_simulate_again (stmt, false);
3873 }
3874 else
3875 prop_set_simulate_again (stmt, true);
3876 }
3877 }
3878 }
3879
3880 /* Evaluate statement STMT. If the statement produces a useful range,
3881 return SSA_PROP_INTERESTING and record the SSA name with the
3882 interesting range into *OUTPUT_P.
3883
3884 If STMT is a conditional branch and we can determine its truth
3885 value, the taken edge is recorded in *TAKEN_EDGE_P.
3886
3887 If STMT produces a varying value, return SSA_PROP_VARYING. */
3888
3889 enum ssa_prop_result
3890 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
3891 {
3892 tree lhs = gimple_get_lhs (stmt);
3893 value_range_equiv vr;
3894 m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
3895
3896 if (*output_p)
3897 {
3898 if (m_vr_values->update_value_range (*output_p, &vr))
3899 {
3900 if (dump_file && (dump_flags & TDF_DETAILS))
3901 {
3902 fprintf (dump_file, "Found new range for ");
3903 print_generic_expr (dump_file, *output_p);
3904 fprintf (dump_file, ": ");
3905 dump_value_range (dump_file, &vr);
3906 fprintf (dump_file, "\n");
3907 }
3908
3909 if (vr.varying_p ())
3910 return SSA_PROP_VARYING;
3911
3912 return SSA_PROP_INTERESTING;
3913 }
3914 return SSA_PROP_NOT_INTERESTING;
3915 }
3916
3917 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
3918 switch (gimple_call_internal_fn (stmt))
3919 {
3920 case IFN_ADD_OVERFLOW:
3921 case IFN_SUB_OVERFLOW:
3922 case IFN_MUL_OVERFLOW:
3923 case IFN_ATOMIC_COMPARE_EXCHANGE:
3924 /* These internal calls return _Complex integer type,
3925 which VRP does not track, but the immediate uses
3926 thereof might be interesting. */
3927 if (lhs && TREE_CODE (lhs) == SSA_NAME)
3928 {
3929 imm_use_iterator iter;
3930 use_operand_p use_p;
3931 enum ssa_prop_result res = SSA_PROP_VARYING;
3932
3933 m_vr_values->set_def_to_varying (lhs);
3934
3935 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3936 {
3937 gimple *use_stmt = USE_STMT (use_p);
3938 if (!is_gimple_assign (use_stmt))
3939 continue;
3940 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
3941 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
3942 continue;
3943 tree rhs1 = gimple_assign_rhs1 (use_stmt);
3944 tree use_lhs = gimple_assign_lhs (use_stmt);
3945 if (TREE_CODE (rhs1) != rhs_code
3946 || TREE_OPERAND (rhs1, 0) != lhs
3947 || TREE_CODE (use_lhs) != SSA_NAME
3948 || !stmt_interesting_for_vrp (use_stmt)
3949 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
3950 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
3951 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
3952 continue;
3953
3954 /* If there is a change in the value range for any of the
3955 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
3956 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
3957 or IMAGPART_EXPR immediate uses, but none of them have
3958 a change in their value ranges, return
3959 SSA_PROP_NOT_INTERESTING. If there are no
3960 {REAL,IMAG}PART_EXPR uses at all,
3961 return SSA_PROP_VARYING. */
3962 value_range_equiv new_vr;
3963 m_vr_values->extract_range_basic (&new_vr, use_stmt);
3964 const value_range_equiv *old_vr
3965 = m_vr_values->get_value_range (use_lhs);
3966 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
3967 res = SSA_PROP_INTERESTING;
3968 else
3969 res = SSA_PROP_NOT_INTERESTING;
3970 new_vr.equiv_clear ();
3971 if (res == SSA_PROP_INTERESTING)
3972 {
3973 *output_p = lhs;
3974 return res;
3975 }
3976 }
3977
3978 return res;
3979 }
3980 break;
3981 default:
3982 break;
3983 }
3984
3985 /* All other statements produce nothing of interest for VRP, so mark
3986 their outputs varying and prevent further simulation. */
3987 m_vr_values->set_defs_to_varying (stmt);
3988
3989 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
3990 }
3991
3992 /* Visit all arguments for PHI node PHI that flow through executable
3993 edges. If a valid value range can be derived from all the incoming
3994 value ranges, set a new range for the LHS of PHI. */
3995
3996 enum ssa_prop_result
3997 vrp_prop::visit_phi (gphi *phi)
3998 {
3999 tree lhs = PHI_RESULT (phi);
4000 value_range_equiv vr_result;
4001 m_vr_values->extract_range_from_phi_node (phi, &vr_result);
4002 if (m_vr_values->update_value_range (lhs, &vr_result))
4003 {
4004 if (dump_file && (dump_flags & TDF_DETAILS))
4005 {
4006 fprintf (dump_file, "Found new range for ");
4007 print_generic_expr (dump_file, lhs);
4008 fprintf (dump_file, ": ");
4009 dump_value_range (dump_file, &vr_result);
4010 fprintf (dump_file, "\n");
4011 }
4012
4013 if (vr_result.varying_p ())
4014 return SSA_PROP_VARYING;
4015
4016 return SSA_PROP_INTERESTING;
4017 }
4018
4019 /* Nothing changed, don't add outgoing edges. */
4020 return SSA_PROP_NOT_INTERESTING;
4021 }
4022
4023 /* Traverse all the blocks folding conditionals with known ranges. */
4024
4025 void
4026 vrp_prop::finalize ()
4027 {
4028 size_t i;
4029
4030 /* We have completed propagating through the lattice. */
4031 m_vr_values->set_lattice_propagation_complete ();
4032
4033 if (dump_file)
4034 {
4035 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
4036 m_vr_values->dump_all_value_ranges (dump_file);
4037 fprintf (dump_file, "\n");
4038 }
4039
4040 /* Set value range to non pointer SSA_NAMEs. */
4041 for (i = 0; i < num_ssa_names; i++)
4042 {
4043 tree name = ssa_name (i);
4044 if (!name)
4045 continue;
4046
4047 const value_range_equiv *vr = m_vr_values->get_value_range (name);
4048 if (!name || !vr->constant_p ())
4049 continue;
4050
4051 if (POINTER_TYPE_P (TREE_TYPE (name))
4052 && range_includes_zero_p (vr) == 0)
4053 set_ptr_nonnull (name);
4054 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
4055 set_range_info (name, *vr);
4056 }
4057 }
4058
4059 class vrp_folder : public substitute_and_fold_engine
4060 {
4061 public:
4062 vrp_folder (vr_values *v)
4063 : substitute_and_fold_engine (/* Fold all stmts. */ true),
4064 m_vr_values (v), simplifier (v)
4065 { }
4066
4067 private:
4068 tree value_of_expr (tree name, gimple *stmt) OVERRIDE
4069 {
4070 return m_vr_values->value_of_expr (name, stmt);
4071 }
4072 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
4073 bool fold_predicate_in (gimple_stmt_iterator *);
4074
4075 vr_values *m_vr_values;
4076 simplify_using_ranges simplifier;
4077 };
4078
4079 /* If the statement pointed by SI has a predicate whose value can be
4080 computed using the value range information computed by VRP, compute
4081 its value and return true. Otherwise, return false. */
4082
4083 bool
4084 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4085 {
4086 bool assignment_p = false;
4087 tree val;
4088 gimple *stmt = gsi_stmt (*si);
4089
4090 if (is_gimple_assign (stmt)
4091 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4092 {
4093 assignment_p = true;
4094 val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4095 gimple_assign_rhs1 (stmt),
4096 gimple_assign_rhs2 (stmt),
4097 stmt);
4098 }
4099 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4100 val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4101 gimple_cond_lhs (cond_stmt),
4102 gimple_cond_rhs (cond_stmt),
4103 stmt);
4104 else
4105 return false;
4106
4107 if (val)
4108 {
4109 if (assignment_p)
4110 val = fold_convert (gimple_expr_type (stmt), val);
4111
4112 if (dump_file)
4113 {
4114 fprintf (dump_file, "Folding predicate ");
4115 print_gimple_expr (dump_file, stmt, 0);
4116 fprintf (dump_file, " to ");
4117 print_generic_expr (dump_file, val);
4118 fprintf (dump_file, "\n");
4119 }
4120
4121 if (is_gimple_assign (stmt))
4122 gimple_assign_set_rhs_from_tree (si, val);
4123 else
4124 {
4125 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4126 gcond *cond_stmt = as_a <gcond *> (stmt);
4127 if (integer_zerop (val))
4128 gimple_cond_make_false (cond_stmt);
4129 else if (integer_onep (val))
4130 gimple_cond_make_true (cond_stmt);
4131 else
4132 gcc_unreachable ();
4133 }
4134
4135 return true;
4136 }
4137
4138 return false;
4139 }
4140
4141 /* Callback for substitute_and_fold folding the stmt at *SI. */
4142
4143 bool
4144 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4145 {
4146 if (fold_predicate_in (si))
4147 return true;
4148
4149 return simplifier.simplify (si);
4150 }
4151
4152 /* Blocks which have more than one predecessor and more than
4153 one successor present jump threading opportunities, i.e.,
4154 when the block is reached from a specific predecessor, we
4155 may be able to determine which of the outgoing edges will
4156 be traversed. When this optimization applies, we are able
4157 to avoid conditionals at runtime and we may expose secondary
4158 optimization opportunities.
4159
4160 This class is effectively a driver for the generic jump
4161 threading code. It basically just presents the generic code
4162 with edges that may be suitable for jump threading.
4163
4164 Unlike DOM, we do not iterate VRP if jump threading was successful.
4165 While iterating may expose new opportunities for VRP, it is expected
4166 those opportunities would be very limited and the compile time cost
4167 to expose those opportunities would be significant.
4168
4169 As jump threading opportunities are discovered, they are registered
4170 for later realization. */
4171
4172 class vrp_jump_threader : public dom_walker
4173 {
4174 public:
4175 vrp_jump_threader (struct function *, vr_values *);
4176 ~vrp_jump_threader ();
4177
4178 void thread_jumps ()
4179 {
4180 walk (m_fun->cfg->x_entry_block_ptr);
4181 }
4182
4183 private:
4184 static tree simplify_stmt (gimple *stmt, gimple *within_stmt,
4185 avail_exprs_stack *, basic_block);
4186 virtual edge before_dom_children (basic_block);
4187 virtual void after_dom_children (basic_block);
4188
4189 function *m_fun;
4190 vr_values *m_vr_values;
4191 const_and_copies *m_const_and_copies;
4192 avail_exprs_stack *m_avail_exprs_stack;
4193 hash_table<expr_elt_hasher> *m_avail_exprs;
4194 gcond *m_dummy_cond;
4195 };
4196
4197 vrp_jump_threader::vrp_jump_threader (struct function *fun, vr_values *v)
4198 : dom_walker (CDI_DOMINATORS, REACHABLE_BLOCKS)
4199 {
4200 /* Ugh. When substituting values earlier in this pass we can wipe
4201 the dominance information. So rebuild the dominator information
4202 as we need it within the jump threading code. */
4203 calculate_dominance_info (CDI_DOMINATORS);
4204
4205 /* We do not allow VRP information to be used for jump threading
4206 across a back edge in the CFG. Otherwise it becomes too
4207 difficult to avoid eliminating loop exit tests. Of course
4208 EDGE_DFS_BACK is not accurate at this time so we have to
4209 recompute it. */
4210 mark_dfs_back_edges ();
4211
4212 /* Allocate our unwinder stack to unwind any temporary equivalences
4213 that might be recorded. */
4214 m_const_and_copies = new const_and_copies ();
4215
4216 m_dummy_cond = NULL;
4217 m_fun = fun;
4218 m_vr_values = v;
4219 m_avail_exprs = new hash_table<expr_elt_hasher> (1024);
4220 m_avail_exprs_stack = new avail_exprs_stack (m_avail_exprs);
4221 }
4222
4223 vrp_jump_threader::~vrp_jump_threader ()
4224 {
4225 /* We do not actually update the CFG or SSA graphs at this point as
4226 ASSERT_EXPRs are still in the IL and cfg cleanup code does not
4227 yet handle ASSERT_EXPRs gracefully. */
4228 delete m_const_and_copies;
4229 delete m_avail_exprs;
4230 delete m_avail_exprs_stack;
4231 }
4232
4233 /* Called before processing dominator children of BB. We want to look
4234 at ASSERT_EXPRs and record information from them in the appropriate
4235 tables.
4236
4237 We could look at other statements here. It's not seen as likely
4238 to significantly increase the jump threads we discover. */
4239
4240 edge
4241 vrp_jump_threader::before_dom_children (basic_block bb)
4242 {
4243 gimple_stmt_iterator gsi;
4244
4245 m_avail_exprs_stack->push_marker ();
4246 m_const_and_copies->push_marker ();
4247 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4248 {
4249 gimple *stmt = gsi_stmt (gsi);
4250 if (gimple_assign_single_p (stmt)
4251 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
4252 {
4253 tree rhs1 = gimple_assign_rhs1 (stmt);
4254 tree cond = TREE_OPERAND (rhs1, 1);
4255 tree inverted = invert_truthvalue (cond);
4256 vec<cond_equivalence> p;
4257 p.create (3);
4258 record_conditions (&p, cond, inverted);
4259 for (unsigned int i = 0; i < p.length (); i++)
4260 m_avail_exprs_stack->record_cond (&p[i]);
4261
4262 tree lhs = gimple_assign_lhs (stmt);
4263 m_const_and_copies->record_const_or_copy (lhs,
4264 TREE_OPERAND (rhs1, 0));
4265 p.release ();
4266 continue;
4267 }
4268 break;
4269 }
4270 return NULL;
4271 }
4272
4273 /* A trivial wrapper so that we can present the generic jump threading
4274 code with a simple API for simplifying statements. STMT is the
4275 statement we want to simplify, WITHIN_STMT provides the location
4276 for any overflow warnings.
4277
4278 ?? This should be cleaned up. There's a virtually identical copy
4279 of this function in tree-ssa-dom.c. */
4280
4281 tree
4282 vrp_jump_threader::simplify_stmt (gimple *stmt,
4283 gimple *within_stmt,
4284 avail_exprs_stack *avail_exprs_stack,
4285 basic_block bb)
4286 {
4287 /* First see if the conditional is in the hash table. */
4288 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
4289 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
4290 return cached_lhs;
4291
4292 class vr_values *vr_values = x_vr_values;
4293 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4294 {
4295 tree op0 = gimple_cond_lhs (cond_stmt);
4296 op0 = lhs_of_dominating_assert (op0, bb, stmt);
4297
4298 tree op1 = gimple_cond_rhs (cond_stmt);
4299 op1 = lhs_of_dominating_assert (op1, bb, stmt);
4300
4301 simplify_using_ranges simplifier (vr_values);
4302 return simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4303 op0, op1, within_stmt);
4304 }
4305
4306 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
4307 {
4308 tree op = gimple_switch_index (switch_stmt);
4309 if (TREE_CODE (op) != SSA_NAME)
4310 return NULL_TREE;
4311
4312 op = lhs_of_dominating_assert (op, bb, stmt);
4313
4314 const value_range_equiv *vr = vr_values->get_value_range (op);
4315 return find_case_label_range (switch_stmt, vr);
4316 }
4317
4318 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
4319 {
4320 tree lhs = gimple_assign_lhs (assign_stmt);
4321 if (TREE_CODE (lhs) == SSA_NAME
4322 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4323 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4324 && stmt_interesting_for_vrp (stmt))
4325 {
4326 edge dummy_e;
4327 tree dummy_tree;
4328 value_range_equiv new_vr;
4329 vr_values->extract_range_from_stmt (stmt, &dummy_e,
4330 &dummy_tree, &new_vr);
4331 tree singleton;
4332 if (new_vr.singleton_p (&singleton))
4333 return singleton;
4334 }
4335 }
4336
4337 return NULL_TREE;
4338 }
4339
4340 /* Called after processing dominator children of BB. This is where we
4341 actually call into the threader. */
4342 void
4343 vrp_jump_threader::after_dom_children (basic_block bb)
4344 {
4345 if (!m_dummy_cond)
4346 m_dummy_cond = gimple_build_cond (NE_EXPR,
4347 integer_zero_node, integer_zero_node,
4348 NULL, NULL);
4349
4350 x_vr_values = m_vr_values;
4351 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
4352 m_avail_exprs_stack, NULL,
4353 simplify_stmt);
4354 x_vr_values = NULL;
4355
4356 m_avail_exprs_stack->pop_to_marker ();
4357 m_const_and_copies->pop_to_marker ();
4358 }
4359
4360 /* STMT is a conditional at the end of a basic block.
4361
4362 If the conditional is of the form SSA_NAME op constant and the SSA_NAME
4363 was set via a type conversion, try to replace the SSA_NAME with the RHS
4364 of the type conversion. Doing so makes the conversion dead which helps
4365 subsequent passes. */
4366
4367 static void
4368 vrp_simplify_cond_using_ranges (vr_values *query, gcond *stmt)
4369 {
4370 tree op0 = gimple_cond_lhs (stmt);
4371 tree op1 = gimple_cond_rhs (stmt);
4372
4373 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
4374 see if OP0 was set by a type conversion where the source of
4375 the conversion is another SSA_NAME with a range that fits
4376 into the range of OP0's type.
4377
4378 If so, the conversion is redundant as the earlier SSA_NAME can be
4379 used for the comparison directly if we just massage the constant in the
4380 comparison. */
4381 if (TREE_CODE (op0) == SSA_NAME
4382 && TREE_CODE (op1) == INTEGER_CST)
4383 {
4384 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
4385 tree innerop;
4386
4387 if (!is_gimple_assign (def_stmt)
4388 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
4389 return;
4390
4391 innerop = gimple_assign_rhs1 (def_stmt);
4392
4393 if (TREE_CODE (innerop) == SSA_NAME
4394 && !POINTER_TYPE_P (TREE_TYPE (innerop))
4395 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
4396 && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
4397 {
4398 const value_range *vr = query->get_value_range (innerop);
4399
4400 if (range_int_cst_p (vr)
4401 && range_fits_type_p (vr,
4402 TYPE_PRECISION (TREE_TYPE (op0)),
4403 TYPE_SIGN (TREE_TYPE (op0)))
4404 && int_fits_type_p (op1, TREE_TYPE (innerop)))
4405 {
4406 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
4407 gimple_cond_set_lhs (stmt, innerop);
4408 gimple_cond_set_rhs (stmt, newconst);
4409 update_stmt (stmt);
4410 if (dump_file && (dump_flags & TDF_DETAILS))
4411 {
4412 fprintf (dump_file, "Folded into: ");
4413 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
4414 fprintf (dump_file, "\n");
4415 }
4416 }
4417 }
4418 }
4419 }
4420
4421 /* Main entry point to VRP (Value Range Propagation). This pass is
4422 loosely based on J. R. C. Patterson, ``Accurate Static Branch
4423 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
4424 Programming Language Design and Implementation, pp. 67-78, 1995.
4425 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
4426
4427 This is essentially an SSA-CCP pass modified to deal with ranges
4428 instead of constants.
4429
4430 While propagating ranges, we may find that two or more SSA name
4431 have equivalent, though distinct ranges. For instance,
4432
4433 1 x_9 = p_3->a;
4434 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
4435 3 if (p_4 == q_2)
4436 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
4437 5 endif
4438 6 if (q_2)
4439
4440 In the code above, pointer p_5 has range [q_2, q_2], but from the
4441 code we can also determine that p_5 cannot be NULL and, if q_2 had
4442 a non-varying range, p_5's range should also be compatible with it.
4443
4444 These equivalences are created by two expressions: ASSERT_EXPR and
4445 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
4446 result of another assertion, then we can use the fact that p_5 and
4447 p_4 are equivalent when evaluating p_5's range.
4448
4449 Together with value ranges, we also propagate these equivalences
4450 between names so that we can take advantage of information from
4451 multiple ranges when doing final replacement. Note that this
4452 equivalency relation is transitive but not symmetric.
4453
4454 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
4455 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
4456 in contexts where that assertion does not hold (e.g., in line 6).
4457
4458 TODO, the main difference between this pass and Patterson's is that
4459 we do not propagate edge probabilities. We only compute whether
4460 edges can be taken or not. That is, instead of having a spectrum
4461 of jump probabilities between 0 and 1, we only deal with 0, 1 and
4462 DON'T KNOW. In the future, it may be worthwhile to propagate
4463 probabilities to aid branch prediction. */
4464
4465 static unsigned int
4466 execute_vrp (struct function *fun, bool warn_array_bounds_p)
4467 {
4468 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4469 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4470 scev_initialize ();
4471
4472 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
4473 Inserting assertions may split edges which will invalidate
4474 EDGE_DFS_BACK. */
4475 vrp_asserts assert_engine (fun);
4476 assert_engine.insert_range_assertions ();
4477
4478 threadedge_initialize_values ();
4479
4480 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
4481 mark_dfs_back_edges ();
4482
4483 vr_values vrp_vr_values;
4484
4485 class vrp_prop vrp_prop (&vrp_vr_values);
4486 vrp_prop.initialize (fun);
4487 vrp_prop.ssa_propagate ();
4488
4489 /* Instantiate the folder here, so that edge cleanups happen at the
4490 end of this function. */
4491 vrp_folder folder (&vrp_vr_values);
4492 vrp_prop.finalize ();
4493
4494 /* If we're checking array refs, we want to merge information on
4495 the executability of each edge between vrp_folder and the
4496 check_array_bounds_dom_walker: each can clear the
4497 EDGE_EXECUTABLE flag on edges, in different ways.
4498
4499 Hence, if we're going to call check_all_array_refs, set
4500 the flag on every edge now, rather than in
4501 check_array_bounds_dom_walker's ctor; vrp_folder may clear
4502 it from some edges. */
4503 if (warn_array_bounds && warn_array_bounds_p)
4504 set_all_edges_as_executable (fun);
4505
4506 folder.substitute_and_fold ();
4507
4508 if (warn_array_bounds && warn_array_bounds_p)
4509 {
4510 array_bounds_checker array_checker (fun, &vrp_vr_values);
4511 array_checker.check ();
4512 }
4513
4514 /* We must identify jump threading opportunities before we release
4515 the datastructures built by VRP. */
4516 vrp_jump_threader threader (fun, &vrp_vr_values);
4517 threader.thread_jumps ();
4518
4519 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
4520 was set by a type conversion can often be rewritten to use the
4521 RHS of the type conversion.
4522
4523 However, doing so inhibits jump threading through the comparison.
4524 So that transformation is not performed until after jump threading
4525 is complete. */
4526 basic_block bb;
4527 FOR_EACH_BB_FN (bb, fun)
4528 {
4529 gimple *last = last_stmt (bb);
4530 if (last && gimple_code (last) == GIMPLE_COND)
4531 vrp_simplify_cond_using_ranges (&vrp_vr_values,
4532 as_a <gcond *> (last));
4533 }
4534
4535 free_numbers_of_iterations_estimates (fun);
4536
4537 /* ASSERT_EXPRs must be removed before finalizing jump threads
4538 as finalizing jump threads calls the CFG cleanup code which
4539 does not properly handle ASSERT_EXPRs. */
4540 assert_engine.remove_range_assertions ();
4541
4542 /* If we exposed any new variables, go ahead and put them into
4543 SSA form now, before we handle jump threading. This simplifies
4544 interactions between rewriting of _DECL nodes into SSA form
4545 and rewriting SSA_NAME nodes into SSA form after block
4546 duplication and CFG manipulation. */
4547 update_ssa (TODO_update_ssa);
4548
4549 /* We identified all the jump threading opportunities earlier, but could
4550 not transform the CFG at that time. This routine transforms the
4551 CFG and arranges for the dominator tree to be rebuilt if necessary.
4552
4553 Note the SSA graph update will occur during the normal TODO
4554 processing by the pass manager. */
4555 thread_through_all_blocks (false);
4556
4557 threadedge_finalize_values ();
4558
4559 scev_finalize ();
4560 loop_optimizer_finalize ();
4561 return 0;
4562 }
4563
4564 namespace {
4565
4566 const pass_data pass_data_vrp =
4567 {
4568 GIMPLE_PASS, /* type */
4569 "vrp", /* name */
4570 OPTGROUP_NONE, /* optinfo_flags */
4571 TV_TREE_VRP, /* tv_id */
4572 PROP_ssa, /* properties_required */
4573 0, /* properties_provided */
4574 0, /* properties_destroyed */
4575 0, /* todo_flags_start */
4576 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
4577 };
4578
4579 class pass_vrp : public gimple_opt_pass
4580 {
4581 public:
4582 pass_vrp (gcc::context *ctxt)
4583 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
4584 {}
4585
4586 /* opt_pass methods: */
4587 opt_pass * clone () { return new pass_vrp (m_ctxt); }
4588 void set_pass_param (unsigned int n, bool param)
4589 {
4590 gcc_assert (n == 0);
4591 warn_array_bounds_p = param;
4592 }
4593 virtual bool gate (function *) { return flag_tree_vrp != 0; }
4594 virtual unsigned int execute (function *fun)
4595 { return execute_vrp (fun, warn_array_bounds_p); }
4596
4597 private:
4598 bool warn_array_bounds_p;
4599 }; // class pass_vrp
4600
4601 } // anon namespace
4602
4603 gimple_opt_pass *
4604 make_pass_vrp (gcc::context *ctxt)
4605 {
4606 return new pass_vrp (ctxt);
4607 }
4608
4609
4610 /* Worker for determine_value_range. */
4611
4612 static void
4613 determine_value_range_1 (value_range *vr, tree expr)
4614 {
4615 if (BINARY_CLASS_P (expr))
4616 {
4617 value_range vr0, vr1;
4618 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
4619 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
4620 range_fold_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
4621 &vr0, &vr1);
4622 }
4623 else if (UNARY_CLASS_P (expr))
4624 {
4625 value_range vr0;
4626 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
4627 range_fold_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
4628 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
4629 }
4630 else if (TREE_CODE (expr) == INTEGER_CST)
4631 vr->set (expr);
4632 else
4633 {
4634 value_range_kind kind;
4635 wide_int min, max;
4636 /* For SSA names try to extract range info computed by VRP. Otherwise
4637 fall back to varying. */
4638 if (TREE_CODE (expr) == SSA_NAME
4639 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
4640 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
4641 vr->set (wide_int_to_tree (TREE_TYPE (expr), min),
4642 wide_int_to_tree (TREE_TYPE (expr), max),
4643 kind);
4644 else
4645 vr->set_varying (TREE_TYPE (expr));
4646 }
4647 }
4648
4649 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
4650 the determined range type. */
4651
4652 value_range_kind
4653 determine_value_range (tree expr, wide_int *min, wide_int *max)
4654 {
4655 value_range vr;
4656 determine_value_range_1 (&vr, expr);
4657 if (vr.constant_p ())
4658 {
4659 *min = wi::to_wide (vr.min ());
4660 *max = wi::to_wide (vr.max ());
4661 return vr.kind ();
4662 }
4663
4664 return VR_VARYING;
4665 }