c++: Handle COMPOUND_EXPRs in ocp_convert [PR94339]
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "alloc-pool.h"
63 #include "domwalk.h"
64 #include "tree-cfgcleanup.h"
65 #include "stringpool.h"
66 #include "attribs.h"
67 #include "vr-values.h"
68 #include "builtins.h"
69 #include "range-op.h"
70
71 /* Set of SSA names found live during the RPO traversal of the function
72 for still active basic-blocks. */
73 static sbitmap *live;
74
75 void
76 value_range_equiv::set_equiv (bitmap equiv)
77 {
78 if (undefined_p () || varying_p ())
79 equiv = NULL;
80 /* Since updating the equivalence set involves deep copying the
81 bitmaps, only do it if absolutely necessary.
82
83 All equivalence bitmaps are allocated from the same obstack. So
84 we can use the obstack associated with EQUIV to allocate vr->equiv. */
85 if (m_equiv == NULL
86 && equiv != NULL)
87 m_equiv = BITMAP_ALLOC (equiv->obstack);
88
89 if (equiv != m_equiv)
90 {
91 if (equiv && !bitmap_empty_p (equiv))
92 bitmap_copy (m_equiv, equiv);
93 else
94 bitmap_clear (m_equiv);
95 }
96 }
97
98 /* Initialize value_range. */
99
100 void
101 value_range_equiv::set (tree min, tree max, bitmap equiv,
102 value_range_kind kind)
103 {
104 value_range::set (min, max, kind);
105 set_equiv (equiv);
106 if (flag_checking)
107 check ();
108 }
109
110 value_range_equiv::value_range_equiv (tree min, tree max, bitmap equiv,
111 value_range_kind kind)
112 {
113 m_equiv = NULL;
114 set (min, max, equiv, kind);
115 }
116
117 value_range_equiv::value_range_equiv (const value_range &other)
118 {
119 m_equiv = NULL;
120 set (other.min(), other.max (), NULL, other.kind ());
121 }
122
123 /* Like set, but keep the equivalences in place. */
124
125 void
126 value_range_equiv::update (tree min, tree max, value_range_kind kind)
127 {
128 set (min, max,
129 (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL, kind);
130 }
131
132 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
133
134 Note: The code that avoids the bitmap sharing looks at the existing
135 this->m_equiv, so this function cannot be used to initalize an
136 object. Use the constructors for initialization. */
137
138 void
139 value_range_equiv::deep_copy (const value_range_equiv *from)
140 {
141 set (from->min (), from->max (), from->m_equiv, from->m_kind);
142 }
143
144 void
145 value_range_equiv::move (value_range_equiv *from)
146 {
147 set (from->min (), from->max (), NULL, from->m_kind);
148 m_equiv = from->m_equiv;
149 from->m_equiv = NULL;
150 }
151
152 void
153 value_range_equiv::check ()
154 {
155 value_range::check ();
156 switch (m_kind)
157 {
158 case VR_UNDEFINED:
159 case VR_VARYING:
160 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
161 default:;
162 }
163 }
164
165 /* Return true if the bitmaps B1 and B2 are equal. */
166
167 static bool
168 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
169 {
170 return (b1 == b2
171 || ((!b1 || bitmap_empty_p (b1))
172 && (!b2 || bitmap_empty_p (b2)))
173 || (b1 && b2
174 && bitmap_equal_p (b1, b2)));
175 }
176
177 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
178 IGNORE_EQUIVS is TRUE. */
179
180 bool
181 value_range_equiv::equal_p (const value_range_equiv &other,
182 bool ignore_equivs) const
183 {
184 return (value_range::equal_p (other)
185 && (ignore_equivs
186 || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
187 }
188
189 void
190 value_range_equiv::set_undefined ()
191 {
192 set (NULL, NULL, NULL, VR_UNDEFINED);
193 }
194
195 void
196 value_range_equiv::set_varying (tree type)
197 {
198 value_range::set_varying (type);
199 equiv_clear ();
200 }
201
202 void
203 value_range_equiv::equiv_clear ()
204 {
205 if (m_equiv)
206 bitmap_clear (m_equiv);
207 }
208
209 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
210 bitmap. If no equivalence table has been created, OBSTACK is the
211 obstack to use (NULL for the default obstack).
212
213 This is the central point where equivalence processing can be
214 turned on/off. */
215
216 void
217 value_range_equiv::equiv_add (const_tree var,
218 const value_range_equiv *var_vr,
219 bitmap_obstack *obstack)
220 {
221 if (!m_equiv)
222 m_equiv = BITMAP_ALLOC (obstack);
223 unsigned ver = SSA_NAME_VERSION (var);
224 bitmap_set_bit (m_equiv, ver);
225 if (var_vr && var_vr->m_equiv)
226 bitmap_ior_into (m_equiv, var_vr->m_equiv);
227 }
228
229 void
230 value_range_equiv::dump (FILE *file) const
231 {
232 value_range::dump (file);
233 if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
234 && m_equiv)
235 {
236 bitmap_iterator bi;
237 unsigned i, c = 0;
238
239 fprintf (file, " EQUIVALENCES: { ");
240
241 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
242 {
243 print_generic_expr (file, ssa_name (i));
244 fprintf (file, " ");
245 c++;
246 }
247
248 fprintf (file, "} (%u elements)", c);
249 }
250 }
251
252 void
253 value_range_equiv::dump () const
254 {
255 dump (stderr);
256 }
257
258 void
259 dump_value_range (FILE *file, const value_range_equiv *vr)
260 {
261 if (!vr)
262 fprintf (file, "[]");
263 else
264 vr->dump (file);
265 }
266
267 DEBUG_FUNCTION void
268 debug (const value_range_equiv *vr)
269 {
270 dump_value_range (stderr, vr);
271 }
272
273 DEBUG_FUNCTION void
274 debug (const value_range_equiv &vr)
275 {
276 dump_value_range (stderr, &vr);
277 }
278
279 /* Return true if the SSA name NAME is live on the edge E. */
280
281 static bool
282 live_on_edge (edge e, tree name)
283 {
284 return (live[e->dest->index]
285 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
286 }
287
288 /* Location information for ASSERT_EXPRs. Each instance of this
289 structure describes an ASSERT_EXPR for an SSA name. Since a single
290 SSA name may have more than one assertion associated with it, these
291 locations are kept in a linked list attached to the corresponding
292 SSA name. */
293 struct assert_locus
294 {
295 /* Basic block where the assertion would be inserted. */
296 basic_block bb;
297
298 /* Some assertions need to be inserted on an edge (e.g., assertions
299 generated by COND_EXPRs). In those cases, BB will be NULL. */
300 edge e;
301
302 /* Pointer to the statement that generated this assertion. */
303 gimple_stmt_iterator si;
304
305 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
306 enum tree_code comp_code;
307
308 /* Value being compared against. */
309 tree val;
310
311 /* Expression to compare. */
312 tree expr;
313
314 /* Next node in the linked list. */
315 assert_locus *next;
316 };
317
318 /* If bit I is present, it means that SSA name N_i has a list of
319 assertions that should be inserted in the IL. */
320 static bitmap need_assert_for;
321
322 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
323 holds a list of ASSERT_LOCUS_T nodes that describe where
324 ASSERT_EXPRs for SSA name N_I should be inserted. */
325 static assert_locus **asserts_for;
326
327 /* VR_TYPE describes a range with mininum value *MIN and maximum
328 value *MAX. Restrict the range to the set of values that have
329 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
330 return the new range type.
331
332 SGN gives the sign of the values described by the range. */
333
334 enum value_range_kind
335 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
336 wide_int *min, wide_int *max,
337 const wide_int &nonzero_bits,
338 signop sgn)
339 {
340 if (vr_type == VR_ANTI_RANGE)
341 {
342 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
343 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
344 to create an inclusive upper bound for A and an inclusive lower
345 bound for B. */
346 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
347 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
348
349 /* If the calculation of A_MAX wrapped, A is effectively empty
350 and A_MAX is the highest value that satisfies NONZERO_BITS.
351 Likewise if the calculation of B_MIN wrapped, B is effectively
352 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
353 bool a_empty = wi::ge_p (a_max, *min, sgn);
354 bool b_empty = wi::le_p (b_min, *max, sgn);
355
356 /* If both A and B are empty, there are no valid values. */
357 if (a_empty && b_empty)
358 return VR_UNDEFINED;
359
360 /* If exactly one of A or B is empty, return a VR_RANGE for the
361 other one. */
362 if (a_empty || b_empty)
363 {
364 *min = b_min;
365 *max = a_max;
366 gcc_checking_assert (wi::le_p (*min, *max, sgn));
367 return VR_RANGE;
368 }
369
370 /* Update the VR_ANTI_RANGE bounds. */
371 *min = a_max + 1;
372 *max = b_min - 1;
373 gcc_checking_assert (wi::le_p (*min, *max, sgn));
374
375 /* Now check whether the excluded range includes any values that
376 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
377 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
378 {
379 unsigned int precision = min->get_precision ();
380 *min = wi::min_value (precision, sgn);
381 *max = wi::max_value (precision, sgn);
382 vr_type = VR_RANGE;
383 }
384 }
385 if (vr_type == VR_RANGE)
386 {
387 *max = wi::round_down_for_mask (*max, nonzero_bits);
388
389 /* Check that the range contains at least one valid value. */
390 if (wi::gt_p (*min, *max, sgn))
391 return VR_UNDEFINED;
392
393 *min = wi::round_up_for_mask (*min, nonzero_bits);
394 gcc_checking_assert (wi::le_p (*min, *max, sgn));
395 }
396 return vr_type;
397 }
398
399 void
400 value_range_equiv::set (tree val)
401 {
402 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
403 if (TREE_OVERFLOW_P (val))
404 val = drop_tree_overflow (val);
405 set (val, val);
406 }
407
408 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
409 a singleton. */
410
411 bool
412 range_int_cst_p (const value_range *vr)
413 {
414 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
415 }
416
417 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
418 otherwise. We only handle additive operations and set NEG to true if the
419 symbol is negated and INV to the invariant part, if any. */
420
421 tree
422 get_single_symbol (tree t, bool *neg, tree *inv)
423 {
424 bool neg_;
425 tree inv_;
426
427 *inv = NULL_TREE;
428 *neg = false;
429
430 if (TREE_CODE (t) == PLUS_EXPR
431 || TREE_CODE (t) == POINTER_PLUS_EXPR
432 || TREE_CODE (t) == MINUS_EXPR)
433 {
434 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
435 {
436 neg_ = (TREE_CODE (t) == MINUS_EXPR);
437 inv_ = TREE_OPERAND (t, 0);
438 t = TREE_OPERAND (t, 1);
439 }
440 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
441 {
442 neg_ = false;
443 inv_ = TREE_OPERAND (t, 1);
444 t = TREE_OPERAND (t, 0);
445 }
446 else
447 return NULL_TREE;
448 }
449 else
450 {
451 neg_ = false;
452 inv_ = NULL_TREE;
453 }
454
455 if (TREE_CODE (t) == NEGATE_EXPR)
456 {
457 t = TREE_OPERAND (t, 0);
458 neg_ = !neg_;
459 }
460
461 if (TREE_CODE (t) != SSA_NAME)
462 return NULL_TREE;
463
464 if (inv_ && TREE_OVERFLOW_P (inv_))
465 inv_ = drop_tree_overflow (inv_);
466
467 *neg = neg_;
468 *inv = inv_;
469 return t;
470 }
471
472 /* The reverse operation: build a symbolic expression with TYPE
473 from symbol SYM, negated according to NEG, and invariant INV. */
474
475 static tree
476 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
477 {
478 const bool pointer_p = POINTER_TYPE_P (type);
479 tree t = sym;
480
481 if (neg)
482 t = build1 (NEGATE_EXPR, type, t);
483
484 if (integer_zerop (inv))
485 return t;
486
487 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
488 }
489
490 /* Return
491 1 if VAL < VAL2
492 0 if !(VAL < VAL2)
493 -2 if those are incomparable. */
494 int
495 operand_less_p (tree val, tree val2)
496 {
497 /* LT is folded faster than GE and others. Inline the common case. */
498 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
499 return tree_int_cst_lt (val, val2);
500 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
501 return val == val2 ? 0 : -2;
502 else
503 {
504 int cmp = compare_values (val, val2);
505 if (cmp == -1)
506 return 1;
507 else if (cmp == 0 || cmp == 1)
508 return 0;
509 else
510 return -2;
511 }
512
513 return 0;
514 }
515
516 /* Compare two values VAL1 and VAL2. Return
517
518 -2 if VAL1 and VAL2 cannot be compared at compile-time,
519 -1 if VAL1 < VAL2,
520 0 if VAL1 == VAL2,
521 +1 if VAL1 > VAL2, and
522 +2 if VAL1 != VAL2
523
524 This is similar to tree_int_cst_compare but supports pointer values
525 and values that cannot be compared at compile time.
526
527 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
528 true if the return value is only valid if we assume that signed
529 overflow is undefined. */
530
531 int
532 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
533 {
534 if (val1 == val2)
535 return 0;
536
537 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
538 both integers. */
539 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
540 == POINTER_TYPE_P (TREE_TYPE (val2)));
541
542 /* Convert the two values into the same type. This is needed because
543 sizetype causes sign extension even for unsigned types. */
544 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
545 val2 = fold_convert (TREE_TYPE (val1), val2);
546
547 const bool overflow_undefined
548 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
549 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
550 tree inv1, inv2;
551 bool neg1, neg2;
552 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
553 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
554
555 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
556 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
557 if (sym1 && sym2)
558 {
559 /* Both values must use the same name with the same sign. */
560 if (sym1 != sym2 || neg1 != neg2)
561 return -2;
562
563 /* [-]NAME + CST == [-]NAME + CST. */
564 if (inv1 == inv2)
565 return 0;
566
567 /* If overflow is defined we cannot simplify more. */
568 if (!overflow_undefined)
569 return -2;
570
571 if (strict_overflow_p != NULL
572 /* Symbolic range building sets TREE_NO_WARNING to declare
573 that overflow doesn't happen. */
574 && (!inv1 || !TREE_NO_WARNING (val1))
575 && (!inv2 || !TREE_NO_WARNING (val2)))
576 *strict_overflow_p = true;
577
578 if (!inv1)
579 inv1 = build_int_cst (TREE_TYPE (val1), 0);
580 if (!inv2)
581 inv2 = build_int_cst (TREE_TYPE (val2), 0);
582
583 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
584 TYPE_SIGN (TREE_TYPE (val1)));
585 }
586
587 const bool cst1 = is_gimple_min_invariant (val1);
588 const bool cst2 = is_gimple_min_invariant (val2);
589
590 /* If one is of the form '[-]NAME + CST' and the other is constant, then
591 it might be possible to say something depending on the constants. */
592 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
593 {
594 if (!overflow_undefined)
595 return -2;
596
597 if (strict_overflow_p != NULL
598 /* Symbolic range building sets TREE_NO_WARNING to declare
599 that overflow doesn't happen. */
600 && (!sym1 || !TREE_NO_WARNING (val1))
601 && (!sym2 || !TREE_NO_WARNING (val2)))
602 *strict_overflow_p = true;
603
604 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
605 tree cst = cst1 ? val1 : val2;
606 tree inv = cst1 ? inv2 : inv1;
607
608 /* Compute the difference between the constants. If it overflows or
609 underflows, this means that we can trivially compare the NAME with
610 it and, consequently, the two values with each other. */
611 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
612 if (wi::cmp (0, wi::to_wide (inv), sgn)
613 != wi::cmp (diff, wi::to_wide (cst), sgn))
614 {
615 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
616 return cst1 ? res : -res;
617 }
618
619 return -2;
620 }
621
622 /* We cannot say anything more for non-constants. */
623 if (!cst1 || !cst2)
624 return -2;
625
626 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
627 {
628 /* We cannot compare overflowed values. */
629 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
630 return -2;
631
632 if (TREE_CODE (val1) == INTEGER_CST
633 && TREE_CODE (val2) == INTEGER_CST)
634 return tree_int_cst_compare (val1, val2);
635
636 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
637 {
638 if (known_eq (wi::to_poly_widest (val1),
639 wi::to_poly_widest (val2)))
640 return 0;
641 if (known_lt (wi::to_poly_widest (val1),
642 wi::to_poly_widest (val2)))
643 return -1;
644 if (known_gt (wi::to_poly_widest (val1),
645 wi::to_poly_widest (val2)))
646 return 1;
647 }
648
649 return -2;
650 }
651 else
652 {
653 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
654 {
655 /* We cannot compare overflowed values. */
656 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
657 return -2;
658
659 return tree_int_cst_compare (val1, val2);
660 }
661
662 /* First see if VAL1 and VAL2 are not the same. */
663 if (operand_equal_p (val1, val2, 0))
664 return 0;
665
666 fold_defer_overflow_warnings ();
667
668 /* If VAL1 is a lower address than VAL2, return -1. */
669 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
670 if (t && integer_onep (t))
671 {
672 fold_undefer_and_ignore_overflow_warnings ();
673 return -1;
674 }
675
676 /* If VAL1 is a higher address than VAL2, return +1. */
677 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
678 if (t && integer_onep (t))
679 {
680 fold_undefer_and_ignore_overflow_warnings ();
681 return 1;
682 }
683
684 /* If VAL1 is different than VAL2, return +2. */
685 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
686 fold_undefer_and_ignore_overflow_warnings ();
687 if (t && integer_onep (t))
688 return 2;
689
690 return -2;
691 }
692 }
693
694 /* Compare values like compare_values_warnv. */
695
696 int
697 compare_values (tree val1, tree val2)
698 {
699 bool sop;
700 return compare_values_warnv (val1, val2, &sop);
701 }
702
703 /* If BOUND will include a symbolic bound, adjust it accordingly,
704 otherwise leave it as is.
705
706 CODE is the original operation that combined the bounds (PLUS_EXPR
707 or MINUS_EXPR).
708
709 TYPE is the type of the original operation.
710
711 SYM_OPn is the symbolic for OPn if it has a symbolic.
712
713 NEG_OPn is TRUE if the OPn was negated. */
714
715 static void
716 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
717 tree sym_op0, tree sym_op1,
718 bool neg_op0, bool neg_op1)
719 {
720 bool minus_p = (code == MINUS_EXPR);
721 /* If the result bound is constant, we're done; otherwise, build the
722 symbolic lower bound. */
723 if (sym_op0 == sym_op1)
724 ;
725 else if (sym_op0)
726 bound = build_symbolic_expr (type, sym_op0,
727 neg_op0, bound);
728 else if (sym_op1)
729 {
730 /* We may not negate if that might introduce
731 undefined overflow. */
732 if (!minus_p
733 || neg_op1
734 || TYPE_OVERFLOW_WRAPS (type))
735 bound = build_symbolic_expr (type, sym_op1,
736 neg_op1 ^ minus_p, bound);
737 else
738 bound = NULL_TREE;
739 }
740 }
741
742 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
743 int bound according to CODE. CODE is the operation combining the
744 bound (either a PLUS_EXPR or a MINUS_EXPR).
745
746 TYPE is the type of the combine operation.
747
748 WI is the wide int to store the result.
749
750 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
751 if over/underflow occurred. */
752
753 static void
754 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
755 tree type, tree op0, tree op1)
756 {
757 bool minus_p = (code == MINUS_EXPR);
758 const signop sgn = TYPE_SIGN (type);
759 const unsigned int prec = TYPE_PRECISION (type);
760
761 /* Combine the bounds, if any. */
762 if (op0 && op1)
763 {
764 if (minus_p)
765 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
766 else
767 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
768 }
769 else if (op0)
770 wi = wi::to_wide (op0);
771 else if (op1)
772 {
773 if (minus_p)
774 wi = wi::neg (wi::to_wide (op1), &ovf);
775 else
776 wi = wi::to_wide (op1);
777 }
778 else
779 wi = wi::shwi (0, prec);
780 }
781
782 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
783 put the result in VR.
784
785 TYPE is the type of the range.
786
787 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
788 occurred while originally calculating WMIN or WMAX. -1 indicates
789 underflow. +1 indicates overflow. 0 indicates neither. */
790
791 static void
792 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
793 tree type,
794 const wide_int &wmin, const wide_int &wmax,
795 wi::overflow_type min_ovf,
796 wi::overflow_type max_ovf)
797 {
798 const signop sgn = TYPE_SIGN (type);
799 const unsigned int prec = TYPE_PRECISION (type);
800
801 /* For one bit precision if max < min, then the swapped
802 range covers all values. */
803 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
804 {
805 kind = VR_VARYING;
806 return;
807 }
808
809 if (TYPE_OVERFLOW_WRAPS (type))
810 {
811 /* If overflow wraps, truncate the values and adjust the
812 range kind and bounds appropriately. */
813 wide_int tmin = wide_int::from (wmin, prec, sgn);
814 wide_int tmax = wide_int::from (wmax, prec, sgn);
815 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
816 {
817 /* If the limits are swapped, we wrapped around and cover
818 the entire range. */
819 if (wi::gt_p (tmin, tmax, sgn))
820 kind = VR_VARYING;
821 else
822 {
823 kind = VR_RANGE;
824 /* No overflow or both overflow or underflow. The
825 range kind stays VR_RANGE. */
826 min = wide_int_to_tree (type, tmin);
827 max = wide_int_to_tree (type, tmax);
828 }
829 return;
830 }
831 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
832 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
833 {
834 /* Min underflow or max overflow. The range kind
835 changes to VR_ANTI_RANGE. */
836 bool covers = false;
837 wide_int tem = tmin;
838 tmin = tmax + 1;
839 if (wi::cmp (tmin, tmax, sgn) < 0)
840 covers = true;
841 tmax = tem - 1;
842 if (wi::cmp (tmax, tem, sgn) > 0)
843 covers = true;
844 /* If the anti-range would cover nothing, drop to varying.
845 Likewise if the anti-range bounds are outside of the
846 types values. */
847 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
848 {
849 kind = VR_VARYING;
850 return;
851 }
852 kind = VR_ANTI_RANGE;
853 min = wide_int_to_tree (type, tmin);
854 max = wide_int_to_tree (type, tmax);
855 return;
856 }
857 else
858 {
859 /* Other underflow and/or overflow, drop to VR_VARYING. */
860 kind = VR_VARYING;
861 return;
862 }
863 }
864 else
865 {
866 /* If overflow does not wrap, saturate to the types min/max
867 value. */
868 wide_int type_min = wi::min_value (prec, sgn);
869 wide_int type_max = wi::max_value (prec, sgn);
870 kind = VR_RANGE;
871 if (min_ovf == wi::OVF_UNDERFLOW)
872 min = wide_int_to_tree (type, type_min);
873 else if (min_ovf == wi::OVF_OVERFLOW)
874 min = wide_int_to_tree (type, type_max);
875 else
876 min = wide_int_to_tree (type, wmin);
877
878 if (max_ovf == wi::OVF_UNDERFLOW)
879 max = wide_int_to_tree (type, type_min);
880 else if (max_ovf == wi::OVF_OVERFLOW)
881 max = wide_int_to_tree (type, type_max);
882 else
883 max = wide_int_to_tree (type, wmax);
884 }
885 }
886
887 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
888
889 static void
890 extract_range_from_pointer_plus_expr (value_range *vr,
891 enum tree_code code,
892 tree expr_type,
893 const value_range *vr0,
894 const value_range *vr1)
895 {
896 gcc_checking_assert (POINTER_TYPE_P (expr_type)
897 && code == POINTER_PLUS_EXPR);
898 /* For pointer types, we are really only interested in asserting
899 whether the expression evaluates to non-NULL.
900 With -fno-delete-null-pointer-checks we need to be more
901 conservative. As some object might reside at address 0,
902 then some offset could be added to it and the same offset
903 subtracted again and the result would be NULL.
904 E.g.
905 static int a[12]; where &a[0] is NULL and
906 ptr = &a[6];
907 ptr -= 6;
908 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
909 where the first range doesn't include zero and the second one
910 doesn't either. As the second operand is sizetype (unsigned),
911 consider all ranges where the MSB could be set as possible
912 subtractions where the result might be NULL. */
913 if ((!range_includes_zero_p (vr0)
914 || !range_includes_zero_p (vr1))
915 && !TYPE_OVERFLOW_WRAPS (expr_type)
916 && (flag_delete_null_pointer_checks
917 || (range_int_cst_p (vr1)
918 && !tree_int_cst_sign_bit (vr1->max ()))))
919 vr->set_nonzero (expr_type);
920 else if (vr0->zero_p () && vr1->zero_p ())
921 vr->set_zero (expr_type);
922 else
923 vr->set_varying (expr_type);
924 }
925
926 /* Extract range information from a PLUS/MINUS_EXPR and store the
927 result in *VR. */
928
929 static void
930 extract_range_from_plus_minus_expr (value_range *vr,
931 enum tree_code code,
932 tree expr_type,
933 const value_range *vr0_,
934 const value_range *vr1_)
935 {
936 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
937
938 value_range vr0 = *vr0_, vr1 = *vr1_;
939 value_range vrtem0, vrtem1;
940
941 /* Now canonicalize anti-ranges to ranges when they are not symbolic
942 and express ~[] op X as ([]' op X) U ([]'' op X). */
943 if (vr0.kind () == VR_ANTI_RANGE
944 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
945 {
946 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
947 if (!vrtem1.undefined_p ())
948 {
949 value_range vrres;
950 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
951 &vrtem1, vr1_);
952 vr->union_ (&vrres);
953 }
954 return;
955 }
956 /* Likewise for X op ~[]. */
957 if (vr1.kind () == VR_ANTI_RANGE
958 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
959 {
960 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
961 if (!vrtem1.undefined_p ())
962 {
963 value_range vrres;
964 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
965 vr0_, &vrtem1);
966 vr->union_ (&vrres);
967 }
968 return;
969 }
970
971 value_range_kind kind;
972 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
973 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
974 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
975 tree min = NULL_TREE, max = NULL_TREE;
976
977 /* This will normalize things such that calculating
978 [0,0] - VR_VARYING is not dropped to varying, but is
979 calculated as [MIN+1, MAX]. */
980 if (vr0.varying_p ())
981 {
982 vr0_kind = VR_RANGE;
983 vr0_min = vrp_val_min (expr_type);
984 vr0_max = vrp_val_max (expr_type);
985 }
986 if (vr1.varying_p ())
987 {
988 vr1_kind = VR_RANGE;
989 vr1_min = vrp_val_min (expr_type);
990 vr1_max = vrp_val_max (expr_type);
991 }
992
993 const bool minus_p = (code == MINUS_EXPR);
994 tree min_op0 = vr0_min;
995 tree min_op1 = minus_p ? vr1_max : vr1_min;
996 tree max_op0 = vr0_max;
997 tree max_op1 = minus_p ? vr1_min : vr1_max;
998 tree sym_min_op0 = NULL_TREE;
999 tree sym_min_op1 = NULL_TREE;
1000 tree sym_max_op0 = NULL_TREE;
1001 tree sym_max_op1 = NULL_TREE;
1002 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1003
1004 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1005
1006 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1007 single-symbolic ranges, try to compute the precise resulting range,
1008 but only if we know that this resulting range will also be constant
1009 or single-symbolic. */
1010 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
1011 && (TREE_CODE (min_op0) == INTEGER_CST
1012 || (sym_min_op0
1013 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1014 && (TREE_CODE (min_op1) == INTEGER_CST
1015 || (sym_min_op1
1016 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1017 && (!(sym_min_op0 && sym_min_op1)
1018 || (sym_min_op0 == sym_min_op1
1019 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1020 && (TREE_CODE (max_op0) == INTEGER_CST
1021 || (sym_max_op0
1022 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1023 && (TREE_CODE (max_op1) == INTEGER_CST
1024 || (sym_max_op1
1025 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1026 && (!(sym_max_op0 && sym_max_op1)
1027 || (sym_max_op0 == sym_max_op1
1028 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1029 {
1030 wide_int wmin, wmax;
1031 wi::overflow_type min_ovf = wi::OVF_NONE;
1032 wi::overflow_type max_ovf = wi::OVF_NONE;
1033
1034 /* Build the bounds. */
1035 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1036 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1037
1038 /* If the resulting range will be symbolic, we need to eliminate any
1039 explicit or implicit overflow introduced in the above computation
1040 because compare_values could make an incorrect use of it. That's
1041 why we require one of the ranges to be a singleton. */
1042 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
1043 && ((bool)min_ovf || (bool)max_ovf
1044 || (min_op0 != max_op0 && min_op1 != max_op1)))
1045 {
1046 vr->set_varying (expr_type);
1047 return;
1048 }
1049
1050 /* Adjust the range for possible overflow. */
1051 set_value_range_with_overflow (kind, min, max, expr_type,
1052 wmin, wmax, min_ovf, max_ovf);
1053 if (kind == VR_VARYING)
1054 {
1055 vr->set_varying (expr_type);
1056 return;
1057 }
1058
1059 /* Build the symbolic bounds if needed. */
1060 adjust_symbolic_bound (min, code, expr_type,
1061 sym_min_op0, sym_min_op1,
1062 neg_min_op0, neg_min_op1);
1063 adjust_symbolic_bound (max, code, expr_type,
1064 sym_max_op0, sym_max_op1,
1065 neg_max_op0, neg_max_op1);
1066 }
1067 else
1068 {
1069 /* For other cases, for example if we have a PLUS_EXPR with two
1070 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1071 to compute a precise range for such a case.
1072 ??? General even mixed range kind operations can be expressed
1073 by for example transforming ~[3, 5] + [1, 2] to range-only
1074 operations and a union primitive:
1075 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1076 [-INF+1, 4] U [6, +INF(OVF)]
1077 though usually the union is not exactly representable with
1078 a single range or anti-range as the above is
1079 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1080 but one could use a scheme similar to equivalences for this. */
1081 vr->set_varying (expr_type);
1082 return;
1083 }
1084
1085 /* If either MIN or MAX overflowed, then set the resulting range to
1086 VARYING. */
1087 if (min == NULL_TREE
1088 || TREE_OVERFLOW_P (min)
1089 || max == NULL_TREE
1090 || TREE_OVERFLOW_P (max))
1091 {
1092 vr->set_varying (expr_type);
1093 return;
1094 }
1095
1096 int cmp = compare_values (min, max);
1097 if (cmp == -2 || cmp == 1)
1098 {
1099 /* If the new range has its limits swapped around (MIN > MAX),
1100 then the operation caused one of them to wrap around, mark
1101 the new range VARYING. */
1102 vr->set_varying (expr_type);
1103 }
1104 else
1105 vr->set (min, max, kind);
1106 }
1107
1108 /* Return the range-ops handler for CODE and EXPR_TYPE. If no
1109 suitable operator is found, return NULL and set VR to VARYING. */
1110
1111 static const range_operator *
1112 get_range_op_handler (value_range *vr,
1113 enum tree_code code,
1114 tree expr_type)
1115 {
1116 const range_operator *op = range_op_handler (code, expr_type);
1117 if (!op)
1118 vr->set_varying (expr_type);
1119 return op;
1120 }
1121
1122 /* If the types passed are supported, return TRUE, otherwise set VR to
1123 VARYING and return FALSE. */
1124
1125 static bool
1126 supported_types_p (value_range *vr,
1127 tree type0,
1128 tree type1 = NULL)
1129 {
1130 if (!value_range::supports_type_p (type0)
1131 || (type1 && !value_range::supports_type_p (type1)))
1132 {
1133 vr->set_varying (type0);
1134 return false;
1135 }
1136 return true;
1137 }
1138
1139 /* If any of the ranges passed are defined, return TRUE, otherwise set
1140 VR to UNDEFINED and return FALSE. */
1141
1142 static bool
1143 defined_ranges_p (value_range *vr,
1144 const value_range *vr0, const value_range *vr1 = NULL)
1145 {
1146 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
1147 {
1148 vr->set_undefined ();
1149 return false;
1150 }
1151 return true;
1152 }
1153
1154 static value_range
1155 drop_undefines_to_varying (const value_range *vr, tree expr_type)
1156 {
1157 if (vr->undefined_p ())
1158 return value_range (expr_type);
1159 else
1160 return *vr;
1161 }
1162
1163 /* If any operand is symbolic, perform a binary operation on them and
1164 return TRUE, otherwise return FALSE. */
1165
1166 static bool
1167 range_fold_binary_symbolics_p (value_range *vr,
1168 tree_code code,
1169 tree expr_type,
1170 const value_range *vr0, const value_range *vr1)
1171 {
1172 if (vr0->symbolic_p () || vr1->symbolic_p ())
1173 {
1174 if ((code == PLUS_EXPR || code == MINUS_EXPR))
1175 {
1176 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0, vr1);
1177 return true;
1178 }
1179 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1180 {
1181 extract_range_from_pointer_plus_expr (vr, code, expr_type, vr0, vr1);
1182 return true;
1183 }
1184 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1185 value_range vr0_cst (*vr0), vr1_cst (*vr1);
1186 vr0_cst.normalize_symbolics ();
1187 vr1_cst.normalize_symbolics ();
1188 return op->fold_range (*vr, expr_type, vr0_cst, vr1_cst);
1189 }
1190 return false;
1191 }
1192
1193 /* If operand is symbolic, perform a unary operation on it and return
1194 TRUE, otherwise return FALSE. */
1195
1196 static bool
1197 range_fold_unary_symbolics_p (value_range *vr,
1198 tree_code code,
1199 tree expr_type,
1200 const value_range *vr0)
1201 {
1202 if (vr0->symbolic_p ())
1203 {
1204 if (code == NEGATE_EXPR)
1205 {
1206 /* -X is simply 0 - X. */
1207 value_range zero;
1208 zero.set_zero (vr0->type ());
1209 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1210 return true;
1211 }
1212 if (code == BIT_NOT_EXPR)
1213 {
1214 /* ~X is simply -1 - X. */
1215 value_range minusone;
1216 minusone.set (build_int_cst (vr0->type (), -1));
1217 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1218 return true;
1219 }
1220 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1221 value_range vr0_cst (*vr0);
1222 vr0_cst.normalize_symbolics ();
1223 return op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1224 }
1225 return false;
1226 }
1227
1228 /* Perform a binary operation on a pair of ranges. */
1229
1230 void
1231 range_fold_binary_expr (value_range *vr,
1232 enum tree_code code,
1233 tree expr_type,
1234 const value_range *vr0_,
1235 const value_range *vr1_)
1236 {
1237 if (!supported_types_p (vr, expr_type)
1238 || !defined_ranges_p (vr, vr0_, vr1_))
1239 return;
1240 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1241 if (!op)
1242 return;
1243
1244 value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
1245 value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
1246 if (range_fold_binary_symbolics_p (vr, code, expr_type, &vr0, &vr1))
1247 return;
1248
1249 vr0.normalize_addresses ();
1250 vr1.normalize_addresses ();
1251 op->fold_range (*vr, expr_type, vr0, vr1);
1252 }
1253
1254 /* Perform a unary operation on a range. */
1255
1256 void
1257 range_fold_unary_expr (value_range *vr,
1258 enum tree_code code, tree expr_type,
1259 const value_range *vr0,
1260 tree vr0_type)
1261 {
1262 if (!supported_types_p (vr, expr_type, vr0_type)
1263 || !defined_ranges_p (vr, vr0))
1264 return;
1265 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1266 if (!op)
1267 return;
1268
1269 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1270 return;
1271
1272 value_range vr0_cst (*vr0);
1273 vr0_cst.normalize_addresses ();
1274 op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1275 }
1276
1277 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1278 create a new SSA name N and return the assertion assignment
1279 'N = ASSERT_EXPR <V, V OP W>'. */
1280
1281 static gimple *
1282 build_assert_expr_for (tree cond, tree v)
1283 {
1284 tree a;
1285 gassign *assertion;
1286
1287 gcc_assert (TREE_CODE (v) == SSA_NAME
1288 && COMPARISON_CLASS_P (cond));
1289
1290 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
1291 assertion = gimple_build_assign (NULL_TREE, a);
1292
1293 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
1294 operand of the ASSERT_EXPR. Create it so the new name and the old one
1295 are registered in the replacement table so that we can fix the SSA web
1296 after adding all the ASSERT_EXPRs. */
1297 tree new_def = create_new_def_for (v, assertion, NULL);
1298 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
1299 given we have to be able to fully propagate those out to re-create
1300 valid SSA when removing the asserts. */
1301 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
1302 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
1303
1304 return assertion;
1305 }
1306
1307
1308 /* Return false if EXPR is a predicate expression involving floating
1309 point values. */
1310
1311 static inline bool
1312 fp_predicate (gimple *stmt)
1313 {
1314 GIMPLE_CHECK (stmt, GIMPLE_COND);
1315
1316 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
1317 }
1318
1319 /* If the range of values taken by OP can be inferred after STMT executes,
1320 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1321 describes the inferred range. Return true if a range could be
1322 inferred. */
1323
1324 bool
1325 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1326 {
1327 *val_p = NULL_TREE;
1328 *comp_code_p = ERROR_MARK;
1329
1330 /* Do not attempt to infer anything in names that flow through
1331 abnormal edges. */
1332 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1333 return false;
1334
1335 /* If STMT is the last statement of a basic block with no normal
1336 successors, there is no point inferring anything about any of its
1337 operands. We would not be able to find a proper insertion point
1338 for the assertion, anyway. */
1339 if (stmt_ends_bb_p (stmt))
1340 {
1341 edge_iterator ei;
1342 edge e;
1343
1344 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1345 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1346 break;
1347 if (e == NULL)
1348 return false;
1349 }
1350
1351 if (infer_nonnull_range (stmt, op))
1352 {
1353 *val_p = build_int_cst (TREE_TYPE (op), 0);
1354 *comp_code_p = NE_EXPR;
1355 return true;
1356 }
1357
1358 return false;
1359 }
1360
1361
1362 void dump_asserts_for (FILE *, tree);
1363 void debug_asserts_for (tree);
1364 void dump_all_asserts (FILE *);
1365 void debug_all_asserts (void);
1366
1367 /* Dump all the registered assertions for NAME to FILE. */
1368
1369 void
1370 dump_asserts_for (FILE *file, tree name)
1371 {
1372 assert_locus *loc;
1373
1374 fprintf (file, "Assertions to be inserted for ");
1375 print_generic_expr (file, name);
1376 fprintf (file, "\n");
1377
1378 loc = asserts_for[SSA_NAME_VERSION (name)];
1379 while (loc)
1380 {
1381 fprintf (file, "\t");
1382 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
1383 fprintf (file, "\n\tBB #%d", loc->bb->index);
1384 if (loc->e)
1385 {
1386 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
1387 loc->e->dest->index);
1388 dump_edge_info (file, loc->e, dump_flags, 0);
1389 }
1390 fprintf (file, "\n\tPREDICATE: ");
1391 print_generic_expr (file, loc->expr);
1392 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
1393 print_generic_expr (file, loc->val);
1394 fprintf (file, "\n\n");
1395 loc = loc->next;
1396 }
1397
1398 fprintf (file, "\n");
1399 }
1400
1401
1402 /* Dump all the registered assertions for NAME to stderr. */
1403
1404 DEBUG_FUNCTION void
1405 debug_asserts_for (tree name)
1406 {
1407 dump_asserts_for (stderr, name);
1408 }
1409
1410
1411 /* Dump all the registered assertions for all the names to FILE. */
1412
1413 void
1414 dump_all_asserts (FILE *file)
1415 {
1416 unsigned i;
1417 bitmap_iterator bi;
1418
1419 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
1420 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
1421 dump_asserts_for (file, ssa_name (i));
1422 fprintf (file, "\n");
1423 }
1424
1425
1426 /* Dump all the registered assertions for all the names to stderr. */
1427
1428 DEBUG_FUNCTION void
1429 debug_all_asserts (void)
1430 {
1431 dump_all_asserts (stderr);
1432 }
1433
1434 /* Dump assert_info structure. */
1435
1436 void
1437 dump_assert_info (FILE *file, const assert_info &assert)
1438 {
1439 fprintf (file, "Assert for: ");
1440 print_generic_expr (file, assert.name);
1441 fprintf (file, "\n\tPREDICATE: expr=[");
1442 print_generic_expr (file, assert.expr);
1443 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1444 fprintf (file, "val=[");
1445 print_generic_expr (file, assert.val);
1446 fprintf (file, "]\n\n");
1447 }
1448
1449 DEBUG_FUNCTION void
1450 debug (const assert_info &assert)
1451 {
1452 dump_assert_info (stderr, assert);
1453 }
1454
1455 /* Dump a vector of assert_info's. */
1456
1457 void
1458 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1459 {
1460 for (unsigned i = 0; i < asserts.length (); ++i)
1461 {
1462 dump_assert_info (file, asserts[i]);
1463 fprintf (file, "\n");
1464 }
1465 }
1466
1467 DEBUG_FUNCTION void
1468 debug (const vec<assert_info> &asserts)
1469 {
1470 dump_asserts_info (stderr, asserts);
1471 }
1472
1473 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
1474
1475 static void
1476 add_assert_info (vec<assert_info> &asserts,
1477 tree name, tree expr, enum tree_code comp_code, tree val)
1478 {
1479 assert_info info;
1480 info.comp_code = comp_code;
1481 info.name = name;
1482 if (TREE_OVERFLOW_P (val))
1483 val = drop_tree_overflow (val);
1484 info.val = val;
1485 info.expr = expr;
1486 asserts.safe_push (info);
1487 if (dump_enabled_p ())
1488 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1489 "Adding assert for %T from %T %s %T\n",
1490 name, expr, op_symbol_code (comp_code), val);
1491 }
1492
1493 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
1494 'EXPR COMP_CODE VAL' at a location that dominates block BB or
1495 E->DEST, then register this location as a possible insertion point
1496 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
1497
1498 BB, E and SI provide the exact insertion point for the new
1499 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
1500 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
1501 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
1502 must not be NULL. */
1503
1504 static void
1505 register_new_assert_for (tree name, tree expr,
1506 enum tree_code comp_code,
1507 tree val,
1508 basic_block bb,
1509 edge e,
1510 gimple_stmt_iterator si)
1511 {
1512 assert_locus *n, *loc, *last_loc;
1513 basic_block dest_bb;
1514
1515 gcc_checking_assert (bb == NULL || e == NULL);
1516
1517 if (e == NULL)
1518 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
1519 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
1520
1521 /* Never build an assert comparing against an integer constant with
1522 TREE_OVERFLOW set. This confuses our undefined overflow warning
1523 machinery. */
1524 if (TREE_OVERFLOW_P (val))
1525 val = drop_tree_overflow (val);
1526
1527 /* The new assertion A will be inserted at BB or E. We need to
1528 determine if the new location is dominated by a previously
1529 registered location for A. If we are doing an edge insertion,
1530 assume that A will be inserted at E->DEST. Note that this is not
1531 necessarily true.
1532
1533 If E is a critical edge, it will be split. But even if E is
1534 split, the new block will dominate the same set of blocks that
1535 E->DEST dominates.
1536
1537 The reverse, however, is not true, blocks dominated by E->DEST
1538 will not be dominated by the new block created to split E. So,
1539 if the insertion location is on a critical edge, we will not use
1540 the new location to move another assertion previously registered
1541 at a block dominated by E->DEST. */
1542 dest_bb = (bb) ? bb : e->dest;
1543
1544 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
1545 VAL at a block dominating DEST_BB, then we don't need to insert a new
1546 one. Similarly, if the same assertion already exists at a block
1547 dominated by DEST_BB and the new location is not on a critical
1548 edge, then update the existing location for the assertion (i.e.,
1549 move the assertion up in the dominance tree).
1550
1551 Note, this is implemented as a simple linked list because there
1552 should not be more than a handful of assertions registered per
1553 name. If this becomes a performance problem, a table hashed by
1554 COMP_CODE and VAL could be implemented. */
1555 loc = asserts_for[SSA_NAME_VERSION (name)];
1556 last_loc = loc;
1557 while (loc)
1558 {
1559 if (loc->comp_code == comp_code
1560 && (loc->val == val
1561 || operand_equal_p (loc->val, val, 0))
1562 && (loc->expr == expr
1563 || operand_equal_p (loc->expr, expr, 0)))
1564 {
1565 /* If E is not a critical edge and DEST_BB
1566 dominates the existing location for the assertion, move
1567 the assertion up in the dominance tree by updating its
1568 location information. */
1569 if ((e == NULL || !EDGE_CRITICAL_P (e))
1570 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
1571 {
1572 loc->bb = dest_bb;
1573 loc->e = e;
1574 loc->si = si;
1575 return;
1576 }
1577 }
1578
1579 /* Update the last node of the list and move to the next one. */
1580 last_loc = loc;
1581 loc = loc->next;
1582 }
1583
1584 /* If we didn't find an assertion already registered for
1585 NAME COMP_CODE VAL, add a new one at the end of the list of
1586 assertions associated with NAME. */
1587 n = XNEW (struct assert_locus);
1588 n->bb = dest_bb;
1589 n->e = e;
1590 n->si = si;
1591 n->comp_code = comp_code;
1592 n->val = val;
1593 n->expr = expr;
1594 n->next = NULL;
1595
1596 if (last_loc)
1597 last_loc->next = n;
1598 else
1599 asserts_for[SSA_NAME_VERSION (name)] = n;
1600
1601 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
1602 }
1603
1604 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1605 Extract a suitable test code and value and store them into *CODE_P and
1606 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1607
1608 If no extraction was possible, return FALSE, otherwise return TRUE.
1609
1610 If INVERT is true, then we invert the result stored into *CODE_P. */
1611
1612 static bool
1613 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1614 tree cond_op0, tree cond_op1,
1615 bool invert, enum tree_code *code_p,
1616 tree *val_p)
1617 {
1618 enum tree_code comp_code;
1619 tree val;
1620
1621 /* Otherwise, we have a comparison of the form NAME COMP VAL
1622 or VAL COMP NAME. */
1623 if (name == cond_op1)
1624 {
1625 /* If the predicate is of the form VAL COMP NAME, flip
1626 COMP around because we need to register NAME as the
1627 first operand in the predicate. */
1628 comp_code = swap_tree_comparison (cond_code);
1629 val = cond_op0;
1630 }
1631 else if (name == cond_op0)
1632 {
1633 /* The comparison is of the form NAME COMP VAL, so the
1634 comparison code remains unchanged. */
1635 comp_code = cond_code;
1636 val = cond_op1;
1637 }
1638 else
1639 gcc_unreachable ();
1640
1641 /* Invert the comparison code as necessary. */
1642 if (invert)
1643 comp_code = invert_tree_comparison (comp_code, 0);
1644
1645 /* VRP only handles integral and pointer types. */
1646 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1647 && ! POINTER_TYPE_P (TREE_TYPE (val)))
1648 return false;
1649
1650 /* Do not register always-false predicates.
1651 FIXME: this works around a limitation in fold() when dealing with
1652 enumerations. Given 'enum { N1, N2 } x;', fold will not
1653 fold 'if (x > N2)' to 'if (0)'. */
1654 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1655 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1656 {
1657 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1658 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1659
1660 if (comp_code == GT_EXPR
1661 && (!max
1662 || compare_values (val, max) == 0))
1663 return false;
1664
1665 if (comp_code == LT_EXPR
1666 && (!min
1667 || compare_values (val, min) == 0))
1668 return false;
1669 }
1670 *code_p = comp_code;
1671 *val_p = val;
1672 return true;
1673 }
1674
1675 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1676 (otherwise return VAL). VAL and MASK must be zero-extended for
1677 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
1678 (to transform signed values into unsigned) and at the end xor
1679 SGNBIT back. */
1680
1681 static wide_int
1682 masked_increment (const wide_int &val_in, const wide_int &mask,
1683 const wide_int &sgnbit, unsigned int prec)
1684 {
1685 wide_int bit = wi::one (prec), res;
1686 unsigned int i;
1687
1688 wide_int val = val_in ^ sgnbit;
1689 for (i = 0; i < prec; i++, bit += bit)
1690 {
1691 res = mask;
1692 if ((res & bit) == 0)
1693 continue;
1694 res = bit - 1;
1695 res = wi::bit_and_not (val + bit, res);
1696 res &= mask;
1697 if (wi::gtu_p (res, val))
1698 return res ^ sgnbit;
1699 }
1700 return val ^ sgnbit;
1701 }
1702
1703 /* Helper for overflow_comparison_p
1704
1705 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1706 OP1's defining statement to see if it ultimately has the form
1707 OP0 CODE (OP0 PLUS INTEGER_CST)
1708
1709 If so, return TRUE indicating this is an overflow test and store into
1710 *NEW_CST an updated constant that can be used in a narrowed range test.
1711
1712 REVERSED indicates if the comparison was originally:
1713
1714 OP1 CODE' OP0.
1715
1716 This affects how we build the updated constant. */
1717
1718 static bool
1719 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1720 bool follow_assert_exprs, bool reversed, tree *new_cst)
1721 {
1722 /* See if this is a relational operation between two SSA_NAMES with
1723 unsigned, overflow wrapping values. If so, check it more deeply. */
1724 if ((code == LT_EXPR || code == LE_EXPR
1725 || code == GE_EXPR || code == GT_EXPR)
1726 && TREE_CODE (op0) == SSA_NAME
1727 && TREE_CODE (op1) == SSA_NAME
1728 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1729 && TYPE_UNSIGNED (TREE_TYPE (op0))
1730 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1731 {
1732 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1733
1734 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
1735 if (follow_assert_exprs)
1736 {
1737 while (gimple_assign_single_p (op1_def)
1738 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1739 {
1740 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1741 if (TREE_CODE (op1) != SSA_NAME)
1742 break;
1743 op1_def = SSA_NAME_DEF_STMT (op1);
1744 }
1745 }
1746
1747 /* Now look at the defining statement of OP1 to see if it adds
1748 or subtracts a nonzero constant from another operand. */
1749 if (op1_def
1750 && is_gimple_assign (op1_def)
1751 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1752 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1753 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1754 {
1755 tree target = gimple_assign_rhs1 (op1_def);
1756
1757 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1758 for one where TARGET appears on the RHS. */
1759 if (follow_assert_exprs)
1760 {
1761 /* Now see if that "other operand" is op0, following the chain
1762 of ASSERT_EXPRs if necessary. */
1763 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1764 while (op0 != target
1765 && gimple_assign_single_p (op0_def)
1766 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1767 {
1768 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1769 if (TREE_CODE (op0) != SSA_NAME)
1770 break;
1771 op0_def = SSA_NAME_DEF_STMT (op0);
1772 }
1773 }
1774
1775 /* If we did not find our target SSA_NAME, then this is not
1776 an overflow test. */
1777 if (op0 != target)
1778 return false;
1779
1780 tree type = TREE_TYPE (op0);
1781 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1782 tree inc = gimple_assign_rhs2 (op1_def);
1783 if (reversed)
1784 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1785 else
1786 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1787 return true;
1788 }
1789 }
1790 return false;
1791 }
1792
1793 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1794 OP1's defining statement to see if it ultimately has the form
1795 OP0 CODE (OP0 PLUS INTEGER_CST)
1796
1797 If so, return TRUE indicating this is an overflow test and store into
1798 *NEW_CST an updated constant that can be used in a narrowed range test.
1799
1800 These statements are left as-is in the IL to facilitate discovery of
1801 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
1802 the alternate range representation is often useful within VRP. */
1803
1804 bool
1805 overflow_comparison_p (tree_code code, tree name, tree val,
1806 bool use_equiv_p, tree *new_cst)
1807 {
1808 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1809 return true;
1810 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1811 use_equiv_p, true, new_cst);
1812 }
1813
1814
1815 /* Try to register an edge assertion for SSA name NAME on edge E for
1816 the condition COND contributing to the conditional jump pointed to by BSI.
1817 Invert the condition COND if INVERT is true. */
1818
1819 static void
1820 register_edge_assert_for_2 (tree name, edge e,
1821 enum tree_code cond_code,
1822 tree cond_op0, tree cond_op1, bool invert,
1823 vec<assert_info> &asserts)
1824 {
1825 tree val;
1826 enum tree_code comp_code;
1827
1828 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1829 cond_op0,
1830 cond_op1,
1831 invert, &comp_code, &val))
1832 return;
1833
1834 /* Queue the assert. */
1835 tree x;
1836 if (overflow_comparison_p (comp_code, name, val, false, &x))
1837 {
1838 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1839 ? GT_EXPR : LE_EXPR);
1840 add_assert_info (asserts, name, name, new_code, x);
1841 }
1842 add_assert_info (asserts, name, name, comp_code, val);
1843
1844 /* In the case of NAME <= CST and NAME being defined as
1845 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1846 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
1847 This catches range and anti-range tests. */
1848 if ((comp_code == LE_EXPR
1849 || comp_code == GT_EXPR)
1850 && TREE_CODE (val) == INTEGER_CST
1851 && TYPE_UNSIGNED (TREE_TYPE (val)))
1852 {
1853 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1854 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1855
1856 /* Extract CST2 from the (optional) addition. */
1857 if (is_gimple_assign (def_stmt)
1858 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1859 {
1860 name2 = gimple_assign_rhs1 (def_stmt);
1861 cst2 = gimple_assign_rhs2 (def_stmt);
1862 if (TREE_CODE (name2) == SSA_NAME
1863 && TREE_CODE (cst2) == INTEGER_CST)
1864 def_stmt = SSA_NAME_DEF_STMT (name2);
1865 }
1866
1867 /* Extract NAME2 from the (optional) sign-changing cast. */
1868 if (gimple_assign_cast_p (def_stmt))
1869 {
1870 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
1871 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1872 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
1873 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
1874 name3 = gimple_assign_rhs1 (def_stmt);
1875 }
1876
1877 /* If name3 is used later, create an ASSERT_EXPR for it. */
1878 if (name3 != NULL_TREE
1879 && TREE_CODE (name3) == SSA_NAME
1880 && (cst2 == NULL_TREE
1881 || TREE_CODE (cst2) == INTEGER_CST)
1882 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1883 {
1884 tree tmp;
1885
1886 /* Build an expression for the range test. */
1887 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1888 if (cst2 != NULL_TREE)
1889 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1890 add_assert_info (asserts, name3, tmp, comp_code, val);
1891 }
1892
1893 /* If name2 is used later, create an ASSERT_EXPR for it. */
1894 if (name2 != NULL_TREE
1895 && TREE_CODE (name2) == SSA_NAME
1896 && TREE_CODE (cst2) == INTEGER_CST
1897 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1898 {
1899 tree tmp;
1900
1901 /* Build an expression for the range test. */
1902 tmp = name2;
1903 if (TREE_TYPE (name) != TREE_TYPE (name2))
1904 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1905 if (cst2 != NULL_TREE)
1906 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1907 add_assert_info (asserts, name2, tmp, comp_code, val);
1908 }
1909 }
1910
1911 /* In the case of post-in/decrement tests like if (i++) ... and uses
1912 of the in/decremented value on the edge the extra name we want to
1913 assert for is not on the def chain of the name compared. Instead
1914 it is in the set of use stmts.
1915 Similar cases happen for conversions that were simplified through
1916 fold_{sign_changed,widened}_comparison. */
1917 if ((comp_code == NE_EXPR
1918 || comp_code == EQ_EXPR)
1919 && TREE_CODE (val) == INTEGER_CST)
1920 {
1921 imm_use_iterator ui;
1922 gimple *use_stmt;
1923 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1924 {
1925 if (!is_gimple_assign (use_stmt))
1926 continue;
1927
1928 /* Cut off to use-stmts that are dominating the predecessor. */
1929 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1930 continue;
1931
1932 tree name2 = gimple_assign_lhs (use_stmt);
1933 if (TREE_CODE (name2) != SSA_NAME)
1934 continue;
1935
1936 enum tree_code code = gimple_assign_rhs_code (use_stmt);
1937 tree cst;
1938 if (code == PLUS_EXPR
1939 || code == MINUS_EXPR)
1940 {
1941 cst = gimple_assign_rhs2 (use_stmt);
1942 if (TREE_CODE (cst) != INTEGER_CST)
1943 continue;
1944 cst = int_const_binop (code, val, cst);
1945 }
1946 else if (CONVERT_EXPR_CODE_P (code))
1947 {
1948 /* For truncating conversions we cannot record
1949 an inequality. */
1950 if (comp_code == NE_EXPR
1951 && (TYPE_PRECISION (TREE_TYPE (name2))
1952 < TYPE_PRECISION (TREE_TYPE (name))))
1953 continue;
1954 cst = fold_convert (TREE_TYPE (name2), val);
1955 }
1956 else
1957 continue;
1958
1959 if (TREE_OVERFLOW_P (cst))
1960 cst = drop_tree_overflow (cst);
1961 add_assert_info (asserts, name2, name2, comp_code, cst);
1962 }
1963 }
1964
1965 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1966 && TREE_CODE (val) == INTEGER_CST)
1967 {
1968 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1969 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1970 tree val2 = NULL_TREE;
1971 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1972 wide_int mask = wi::zero (prec);
1973 unsigned int nprec = prec;
1974 enum tree_code rhs_code = ERROR_MARK;
1975
1976 if (is_gimple_assign (def_stmt))
1977 rhs_code = gimple_assign_rhs_code (def_stmt);
1978
1979 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1980 assert that A != CST1 -+ CST2. */
1981 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1982 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1983 {
1984 tree op0 = gimple_assign_rhs1 (def_stmt);
1985 tree op1 = gimple_assign_rhs2 (def_stmt);
1986 if (TREE_CODE (op0) == SSA_NAME
1987 && TREE_CODE (op1) == INTEGER_CST)
1988 {
1989 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1990 ? MINUS_EXPR : PLUS_EXPR);
1991 op1 = int_const_binop (reverse_op, val, op1);
1992 if (TREE_OVERFLOW (op1))
1993 op1 = drop_tree_overflow (op1);
1994 add_assert_info (asserts, op0, op0, comp_code, op1);
1995 }
1996 }
1997
1998 /* Add asserts for NAME cmp CST and NAME being defined
1999 as NAME = (int) NAME2. */
2000 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2001 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2002 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2003 && gimple_assign_cast_p (def_stmt))
2004 {
2005 name2 = gimple_assign_rhs1 (def_stmt);
2006 if (CONVERT_EXPR_CODE_P (rhs_code)
2007 && TREE_CODE (name2) == SSA_NAME
2008 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2009 && TYPE_UNSIGNED (TREE_TYPE (name2))
2010 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2011 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2012 || !tree_int_cst_equal (val,
2013 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2014 {
2015 tree tmp, cst;
2016 enum tree_code new_comp_code = comp_code;
2017
2018 cst = fold_convert (TREE_TYPE (name2),
2019 TYPE_MIN_VALUE (TREE_TYPE (val)));
2020 /* Build an expression for the range test. */
2021 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2022 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2023 fold_convert (TREE_TYPE (name2), val));
2024 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2025 {
2026 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2027 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2028 build_int_cst (TREE_TYPE (name2), 1));
2029 }
2030 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2031 }
2032 }
2033
2034 /* Add asserts for NAME cmp CST and NAME being defined as
2035 NAME = NAME2 >> CST2.
2036
2037 Extract CST2 from the right shift. */
2038 if (rhs_code == RSHIFT_EXPR)
2039 {
2040 name2 = gimple_assign_rhs1 (def_stmt);
2041 cst2 = gimple_assign_rhs2 (def_stmt);
2042 if (TREE_CODE (name2) == SSA_NAME
2043 && tree_fits_uhwi_p (cst2)
2044 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2045 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2046 && type_has_mode_precision_p (TREE_TYPE (val)))
2047 {
2048 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2049 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2050 }
2051 }
2052 if (val2 != NULL_TREE
2053 && TREE_CODE (val2) == INTEGER_CST
2054 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2055 TREE_TYPE (val),
2056 val2, cst2), val))
2057 {
2058 enum tree_code new_comp_code = comp_code;
2059 tree tmp, new_val;
2060
2061 tmp = name2;
2062 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2063 {
2064 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2065 {
2066 tree type = build_nonstandard_integer_type (prec, 1);
2067 tmp = build1 (NOP_EXPR, type, name2);
2068 val2 = fold_convert (type, val2);
2069 }
2070 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2071 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2072 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2073 }
2074 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2075 {
2076 wide_int minval
2077 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2078 new_val = val2;
2079 if (minval == wi::to_wide (new_val))
2080 new_val = NULL_TREE;
2081 }
2082 else
2083 {
2084 wide_int maxval
2085 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2086 mask |= wi::to_wide (val2);
2087 if (wi::eq_p (mask, maxval))
2088 new_val = NULL_TREE;
2089 else
2090 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2091 }
2092
2093 if (new_val)
2094 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2095 }
2096
2097 /* If we have a conversion that doesn't change the value of the source
2098 simply register the same assert for it. */
2099 if (CONVERT_EXPR_CODE_P (rhs_code))
2100 {
2101 wide_int rmin, rmax;
2102 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2103 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2104 && TREE_CODE (rhs1) == SSA_NAME
2105 /* Make sure the relation preserves the upper/lower boundary of
2106 the range conservatively. */
2107 && (comp_code == NE_EXPR
2108 || comp_code == EQ_EXPR
2109 || (TYPE_SIGN (TREE_TYPE (name))
2110 == TYPE_SIGN (TREE_TYPE (rhs1)))
2111 || ((comp_code == LE_EXPR
2112 || comp_code == LT_EXPR)
2113 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2114 || ((comp_code == GE_EXPR
2115 || comp_code == GT_EXPR)
2116 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
2117 /* And the conversion does not alter the value we compare
2118 against and all values in rhs1 can be represented in
2119 the converted to type. */
2120 && int_fits_type_p (val, TREE_TYPE (rhs1))
2121 && ((TYPE_PRECISION (TREE_TYPE (name))
2122 > TYPE_PRECISION (TREE_TYPE (rhs1)))
2123 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
2124 && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
2125 && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
2126 add_assert_info (asserts, rhs1, rhs1,
2127 comp_code, fold_convert (TREE_TYPE (rhs1), val));
2128 }
2129
2130 /* Add asserts for NAME cmp CST and NAME being defined as
2131 NAME = NAME2 & CST2.
2132
2133 Extract CST2 from the and.
2134
2135 Also handle
2136 NAME = (unsigned) NAME2;
2137 casts where NAME's type is unsigned and has smaller precision
2138 than NAME2's type as if it was NAME = NAME2 & MASK. */
2139 names[0] = NULL_TREE;
2140 names[1] = NULL_TREE;
2141 cst2 = NULL_TREE;
2142 if (rhs_code == BIT_AND_EXPR
2143 || (CONVERT_EXPR_CODE_P (rhs_code)
2144 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2145 && TYPE_UNSIGNED (TREE_TYPE (val))
2146 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2147 > prec))
2148 {
2149 name2 = gimple_assign_rhs1 (def_stmt);
2150 if (rhs_code == BIT_AND_EXPR)
2151 cst2 = gimple_assign_rhs2 (def_stmt);
2152 else
2153 {
2154 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2155 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2156 }
2157 if (TREE_CODE (name2) == SSA_NAME
2158 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2159 && TREE_CODE (cst2) == INTEGER_CST
2160 && !integer_zerop (cst2)
2161 && (nprec > 1
2162 || TYPE_UNSIGNED (TREE_TYPE (val))))
2163 {
2164 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2165 if (gimple_assign_cast_p (def_stmt2))
2166 {
2167 names[1] = gimple_assign_rhs1 (def_stmt2);
2168 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2169 || TREE_CODE (names[1]) != SSA_NAME
2170 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2171 || (TYPE_PRECISION (TREE_TYPE (name2))
2172 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2173 names[1] = NULL_TREE;
2174 }
2175 names[0] = name2;
2176 }
2177 }
2178 if (names[0] || names[1])
2179 {
2180 wide_int minv, maxv, valv, cst2v;
2181 wide_int tem, sgnbit;
2182 bool valid_p = false, valn, cst2n;
2183 enum tree_code ccode = comp_code;
2184
2185 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2186 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2187 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2188 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2189 /* If CST2 doesn't have most significant bit set,
2190 but VAL is negative, we have comparison like
2191 if ((x & 0x123) > -4) (always true). Just give up. */
2192 if (!cst2n && valn)
2193 ccode = ERROR_MARK;
2194 if (cst2n)
2195 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2196 else
2197 sgnbit = wi::zero (nprec);
2198 minv = valv & cst2v;
2199 switch (ccode)
2200 {
2201 case EQ_EXPR:
2202 /* Minimum unsigned value for equality is VAL & CST2
2203 (should be equal to VAL, otherwise we probably should
2204 have folded the comparison into false) and
2205 maximum unsigned value is VAL | ~CST2. */
2206 maxv = valv | ~cst2v;
2207 valid_p = true;
2208 break;
2209
2210 case NE_EXPR:
2211 tem = valv | ~cst2v;
2212 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2213 if (valv == 0)
2214 {
2215 cst2n = false;
2216 sgnbit = wi::zero (nprec);
2217 goto gt_expr;
2218 }
2219 /* If (VAL | ~CST2) is all ones, handle it as
2220 (X & CST2) < VAL. */
2221 if (tem == -1)
2222 {
2223 cst2n = false;
2224 valn = false;
2225 sgnbit = wi::zero (nprec);
2226 goto lt_expr;
2227 }
2228 if (!cst2n && wi::neg_p (cst2v))
2229 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2230 if (sgnbit != 0)
2231 {
2232 if (valv == sgnbit)
2233 {
2234 cst2n = true;
2235 valn = true;
2236 goto gt_expr;
2237 }
2238 if (tem == wi::mask (nprec - 1, false, nprec))
2239 {
2240 cst2n = true;
2241 goto lt_expr;
2242 }
2243 if (!cst2n)
2244 sgnbit = wi::zero (nprec);
2245 }
2246 break;
2247
2248 case GE_EXPR:
2249 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2250 is VAL and maximum unsigned value is ~0. For signed
2251 comparison, if CST2 doesn't have most significant bit
2252 set, handle it similarly. If CST2 has MSB set,
2253 the minimum is the same, and maximum is ~0U/2. */
2254 if (minv != valv)
2255 {
2256 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2257 VAL. */
2258 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2259 if (minv == valv)
2260 break;
2261 }
2262 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2263 valid_p = true;
2264 break;
2265
2266 case GT_EXPR:
2267 gt_expr:
2268 /* Find out smallest MINV where MINV > VAL
2269 && (MINV & CST2) == MINV, if any. If VAL is signed and
2270 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2271 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2272 if (minv == valv)
2273 break;
2274 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2275 valid_p = true;
2276 break;
2277
2278 case LE_EXPR:
2279 /* Minimum unsigned value for <= is 0 and maximum
2280 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2281 Otherwise, find smallest VAL2 where VAL2 > VAL
2282 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2283 as maximum.
2284 For signed comparison, if CST2 doesn't have most
2285 significant bit set, handle it similarly. If CST2 has
2286 MSB set, the maximum is the same and minimum is INT_MIN. */
2287 if (minv == valv)
2288 maxv = valv;
2289 else
2290 {
2291 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2292 if (maxv == valv)
2293 break;
2294 maxv -= 1;
2295 }
2296 maxv |= ~cst2v;
2297 minv = sgnbit;
2298 valid_p = true;
2299 break;
2300
2301 case LT_EXPR:
2302 lt_expr:
2303 /* Minimum unsigned value for < is 0 and maximum
2304 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2305 Otherwise, find smallest VAL2 where VAL2 > VAL
2306 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2307 as maximum.
2308 For signed comparison, if CST2 doesn't have most
2309 significant bit set, handle it similarly. If CST2 has
2310 MSB set, the maximum is the same and minimum is INT_MIN. */
2311 if (minv == valv)
2312 {
2313 if (valv == sgnbit)
2314 break;
2315 maxv = valv;
2316 }
2317 else
2318 {
2319 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2320 if (maxv == valv)
2321 break;
2322 }
2323 maxv -= 1;
2324 maxv |= ~cst2v;
2325 minv = sgnbit;
2326 valid_p = true;
2327 break;
2328
2329 default:
2330 break;
2331 }
2332 if (valid_p
2333 && (maxv - minv) != -1)
2334 {
2335 tree tmp, new_val, type;
2336 int i;
2337
2338 for (i = 0; i < 2; i++)
2339 if (names[i])
2340 {
2341 wide_int maxv2 = maxv;
2342 tmp = names[i];
2343 type = TREE_TYPE (names[i]);
2344 if (!TYPE_UNSIGNED (type))
2345 {
2346 type = build_nonstandard_integer_type (nprec, 1);
2347 tmp = build1 (NOP_EXPR, type, names[i]);
2348 }
2349 if (minv != 0)
2350 {
2351 tmp = build2 (PLUS_EXPR, type, tmp,
2352 wide_int_to_tree (type, -minv));
2353 maxv2 = maxv - minv;
2354 }
2355 new_val = wide_int_to_tree (type, maxv2);
2356 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
2357 }
2358 }
2359 }
2360 }
2361 }
2362
2363 /* OP is an operand of a truth value expression which is known to have
2364 a particular value. Register any asserts for OP and for any
2365 operands in OP's defining statement.
2366
2367 If CODE is EQ_EXPR, then we want to register OP is zero (false),
2368 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
2369
2370 static void
2371 register_edge_assert_for_1 (tree op, enum tree_code code,
2372 edge e, vec<assert_info> &asserts)
2373 {
2374 gimple *op_def;
2375 tree val;
2376 enum tree_code rhs_code;
2377
2378 /* We only care about SSA_NAMEs. */
2379 if (TREE_CODE (op) != SSA_NAME)
2380 return;
2381
2382 /* We know that OP will have a zero or nonzero value. */
2383 val = build_int_cst (TREE_TYPE (op), 0);
2384 add_assert_info (asserts, op, op, code, val);
2385
2386 /* Now look at how OP is set. If it's set from a comparison,
2387 a truth operation or some bit operations, then we may be able
2388 to register information about the operands of that assignment. */
2389 op_def = SSA_NAME_DEF_STMT (op);
2390 if (gimple_code (op_def) != GIMPLE_ASSIGN)
2391 return;
2392
2393 rhs_code = gimple_assign_rhs_code (op_def);
2394
2395 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2396 {
2397 bool invert = (code == EQ_EXPR ? true : false);
2398 tree op0 = gimple_assign_rhs1 (op_def);
2399 tree op1 = gimple_assign_rhs2 (op_def);
2400
2401 if (TREE_CODE (op0) == SSA_NAME)
2402 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2403 if (TREE_CODE (op1) == SSA_NAME)
2404 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2405 }
2406 else if ((code == NE_EXPR
2407 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2408 || (code == EQ_EXPR
2409 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2410 {
2411 /* Recurse on each operand. */
2412 tree op0 = gimple_assign_rhs1 (op_def);
2413 tree op1 = gimple_assign_rhs2 (op_def);
2414 if (TREE_CODE (op0) == SSA_NAME
2415 && has_single_use (op0))
2416 register_edge_assert_for_1 (op0, code, e, asserts);
2417 if (TREE_CODE (op1) == SSA_NAME
2418 && has_single_use (op1))
2419 register_edge_assert_for_1 (op1, code, e, asserts);
2420 }
2421 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2422 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2423 {
2424 /* Recurse, flipping CODE. */
2425 code = invert_tree_comparison (code, false);
2426 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2427 }
2428 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2429 {
2430 /* Recurse through the copy. */
2431 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2432 }
2433 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2434 {
2435 /* Recurse through the type conversion, unless it is a narrowing
2436 conversion or conversion from non-integral type. */
2437 tree rhs = gimple_assign_rhs1 (op_def);
2438 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2439 && (TYPE_PRECISION (TREE_TYPE (rhs))
2440 <= TYPE_PRECISION (TREE_TYPE (op))))
2441 register_edge_assert_for_1 (rhs, code, e, asserts);
2442 }
2443 }
2444
2445 /* Check if comparison
2446 NAME COND_OP INTEGER_CST
2447 has a form of
2448 (X & 11...100..0) COND_OP XX...X00...0
2449 Such comparison can yield assertions like
2450 X >= XX...X00...0
2451 X <= XX...X11...1
2452 in case of COND_OP being EQ_EXPR or
2453 X < XX...X00...0
2454 X > XX...X11...1
2455 in case of NE_EXPR. */
2456
2457 static bool
2458 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2459 tree *new_name, tree *low, enum tree_code *low_code,
2460 tree *high, enum tree_code *high_code)
2461 {
2462 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2463
2464 if (!is_gimple_assign (def_stmt)
2465 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2466 return false;
2467
2468 tree t = gimple_assign_rhs1 (def_stmt);
2469 tree maskt = gimple_assign_rhs2 (def_stmt);
2470 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2471 return false;
2472
2473 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2474 wide_int inv_mask = ~mask;
2475 /* Must have been removed by now so don't bother optimizing. */
2476 if (mask == 0 || inv_mask == 0)
2477 return false;
2478
2479 /* Assume VALT is INTEGER_CST. */
2480 wi::tree_to_wide_ref val = wi::to_wide (valt);
2481
2482 if ((inv_mask & (inv_mask + 1)) != 0
2483 || (val & mask) != val)
2484 return false;
2485
2486 bool is_range = cond_code == EQ_EXPR;
2487
2488 tree type = TREE_TYPE (t);
2489 wide_int min = wi::min_value (type),
2490 max = wi::max_value (type);
2491
2492 if (is_range)
2493 {
2494 *low_code = val == min ? ERROR_MARK : GE_EXPR;
2495 *high_code = val == max ? ERROR_MARK : LE_EXPR;
2496 }
2497 else
2498 {
2499 /* We can still generate assertion if one of alternatives
2500 is known to always be false. */
2501 if (val == min)
2502 {
2503 *low_code = (enum tree_code) 0;
2504 *high_code = GT_EXPR;
2505 }
2506 else if ((val | inv_mask) == max)
2507 {
2508 *low_code = LT_EXPR;
2509 *high_code = (enum tree_code) 0;
2510 }
2511 else
2512 return false;
2513 }
2514
2515 *new_name = t;
2516 *low = wide_int_to_tree (type, val);
2517 *high = wide_int_to_tree (type, val | inv_mask);
2518
2519 return true;
2520 }
2521
2522 /* Try to register an edge assertion for SSA name NAME on edge E for
2523 the condition COND contributing to the conditional jump pointed to by
2524 SI. */
2525
2526 void
2527 register_edge_assert_for (tree name, edge e,
2528 enum tree_code cond_code, tree cond_op0,
2529 tree cond_op1, vec<assert_info> &asserts)
2530 {
2531 tree val;
2532 enum tree_code comp_code;
2533 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2534
2535 /* Do not attempt to infer anything in names that flow through
2536 abnormal edges. */
2537 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2538 return;
2539
2540 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2541 cond_op0, cond_op1,
2542 is_else_edge,
2543 &comp_code, &val))
2544 return;
2545
2546 /* Register ASSERT_EXPRs for name. */
2547 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2548 cond_op1, is_else_edge, asserts);
2549
2550
2551 /* If COND is effectively an equality test of an SSA_NAME against
2552 the value zero or one, then we may be able to assert values
2553 for SSA_NAMEs which flow into COND. */
2554
2555 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2556 statement of NAME we can assert both operands of the BIT_AND_EXPR
2557 have nonzero value. */
2558 if (((comp_code == EQ_EXPR && integer_onep (val))
2559 || (comp_code == NE_EXPR && integer_zerop (val))))
2560 {
2561 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2562
2563 if (is_gimple_assign (def_stmt)
2564 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2565 {
2566 tree op0 = gimple_assign_rhs1 (def_stmt);
2567 tree op1 = gimple_assign_rhs2 (def_stmt);
2568 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2569 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2570 }
2571 }
2572
2573 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2574 statement of NAME we can assert both operands of the BIT_IOR_EXPR
2575 have zero value. */
2576 if (((comp_code == EQ_EXPR && integer_zerop (val))
2577 || (comp_code == NE_EXPR && integer_onep (val))))
2578 {
2579 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2580
2581 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2582 necessarily zero value, or if type-precision is one. */
2583 if (is_gimple_assign (def_stmt)
2584 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
2585 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
2586 || comp_code == EQ_EXPR)))
2587 {
2588 tree op0 = gimple_assign_rhs1 (def_stmt);
2589 tree op1 = gimple_assign_rhs2 (def_stmt);
2590 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2591 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2592 }
2593 }
2594
2595 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
2596 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2597 && TREE_CODE (val) == INTEGER_CST)
2598 {
2599 enum tree_code low_code, high_code;
2600 tree low, high;
2601 if (is_masked_range_test (name, val, comp_code, &name, &low,
2602 &low_code, &high, &high_code))
2603 {
2604 if (low_code != ERROR_MARK)
2605 register_edge_assert_for_2 (name, e, low_code, name,
2606 low, /*invert*/false, asserts);
2607 if (high_code != ERROR_MARK)
2608 register_edge_assert_for_2 (name, e, high_code, name,
2609 high, /*invert*/false, asserts);
2610 }
2611 }
2612 }
2613
2614 /* Finish found ASSERTS for E and register them at GSI. */
2615
2616 static void
2617 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2618 vec<assert_info> &asserts)
2619 {
2620 for (unsigned i = 0; i < asserts.length (); ++i)
2621 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2622 reachable from E. */
2623 if (live_on_edge (e, asserts[i].name))
2624 register_new_assert_for (asserts[i].name, asserts[i].expr,
2625 asserts[i].comp_code, asserts[i].val,
2626 NULL, e, gsi);
2627 }
2628
2629
2630
2631 /* Determine whether the outgoing edges of BB should receive an
2632 ASSERT_EXPR for each of the operands of BB's LAST statement.
2633 The last statement of BB must be a COND_EXPR.
2634
2635 If any of the sub-graphs rooted at BB have an interesting use of
2636 the predicate operands, an assert location node is added to the
2637 list of assertions for the corresponding operands. */
2638
2639 static void
2640 find_conditional_asserts (basic_block bb, gcond *last)
2641 {
2642 gimple_stmt_iterator bsi;
2643 tree op;
2644 edge_iterator ei;
2645 edge e;
2646 ssa_op_iter iter;
2647
2648 bsi = gsi_for_stmt (last);
2649
2650 /* Look for uses of the operands in each of the sub-graphs
2651 rooted at BB. We need to check each of the outgoing edges
2652 separately, so that we know what kind of ASSERT_EXPR to
2653 insert. */
2654 FOR_EACH_EDGE (e, ei, bb->succs)
2655 {
2656 if (e->dest == bb)
2657 continue;
2658
2659 /* Register the necessary assertions for each operand in the
2660 conditional predicate. */
2661 auto_vec<assert_info, 8> asserts;
2662 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2663 register_edge_assert_for (op, e,
2664 gimple_cond_code (last),
2665 gimple_cond_lhs (last),
2666 gimple_cond_rhs (last), asserts);
2667 finish_register_edge_assert_for (e, bsi, asserts);
2668 }
2669 }
2670
2671 struct case_info
2672 {
2673 tree expr;
2674 basic_block bb;
2675 };
2676
2677 /* Compare two case labels sorting first by the destination bb index
2678 and then by the case value. */
2679
2680 static int
2681 compare_case_labels (const void *p1, const void *p2)
2682 {
2683 const struct case_info *ci1 = (const struct case_info *) p1;
2684 const struct case_info *ci2 = (const struct case_info *) p2;
2685 int idx1 = ci1->bb->index;
2686 int idx2 = ci2->bb->index;
2687
2688 if (idx1 < idx2)
2689 return -1;
2690 else if (idx1 == idx2)
2691 {
2692 /* Make sure the default label is first in a group. */
2693 if (!CASE_LOW (ci1->expr))
2694 return -1;
2695 else if (!CASE_LOW (ci2->expr))
2696 return 1;
2697 else
2698 return tree_int_cst_compare (CASE_LOW (ci1->expr),
2699 CASE_LOW (ci2->expr));
2700 }
2701 else
2702 return 1;
2703 }
2704
2705 /* Determine whether the outgoing edges of BB should receive an
2706 ASSERT_EXPR for each of the operands of BB's LAST statement.
2707 The last statement of BB must be a SWITCH_EXPR.
2708
2709 If any of the sub-graphs rooted at BB have an interesting use of
2710 the predicate operands, an assert location node is added to the
2711 list of assertions for the corresponding operands. */
2712
2713 static void
2714 find_switch_asserts (basic_block bb, gswitch *last)
2715 {
2716 gimple_stmt_iterator bsi;
2717 tree op;
2718 edge e;
2719 struct case_info *ci;
2720 size_t n = gimple_switch_num_labels (last);
2721 #if GCC_VERSION >= 4000
2722 unsigned int idx;
2723 #else
2724 /* Work around GCC 3.4 bug (PR 37086). */
2725 volatile unsigned int idx;
2726 #endif
2727
2728 bsi = gsi_for_stmt (last);
2729 op = gimple_switch_index (last);
2730 if (TREE_CODE (op) != SSA_NAME)
2731 return;
2732
2733 /* Build a vector of case labels sorted by destination label. */
2734 ci = XNEWVEC (struct case_info, n);
2735 for (idx = 0; idx < n; ++idx)
2736 {
2737 ci[idx].expr = gimple_switch_label (last, idx);
2738 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
2739 }
2740 edge default_edge = find_edge (bb, ci[0].bb);
2741 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
2742
2743 for (idx = 0; idx < n; ++idx)
2744 {
2745 tree min, max;
2746 tree cl = ci[idx].expr;
2747 basic_block cbb = ci[idx].bb;
2748
2749 min = CASE_LOW (cl);
2750 max = CASE_HIGH (cl);
2751
2752 /* If there are multiple case labels with the same destination
2753 we need to combine them to a single value range for the edge. */
2754 if (idx + 1 < n && cbb == ci[idx + 1].bb)
2755 {
2756 /* Skip labels until the last of the group. */
2757 do {
2758 ++idx;
2759 } while (idx < n && cbb == ci[idx].bb);
2760 --idx;
2761
2762 /* Pick up the maximum of the case label range. */
2763 if (CASE_HIGH (ci[idx].expr))
2764 max = CASE_HIGH (ci[idx].expr);
2765 else
2766 max = CASE_LOW (ci[idx].expr);
2767 }
2768
2769 /* Can't extract a useful assertion out of a range that includes the
2770 default label. */
2771 if (min == NULL_TREE)
2772 continue;
2773
2774 /* Find the edge to register the assert expr on. */
2775 e = find_edge (bb, cbb);
2776
2777 /* Register the necessary assertions for the operand in the
2778 SWITCH_EXPR. */
2779 auto_vec<assert_info, 8> asserts;
2780 register_edge_assert_for (op, e,
2781 max ? GE_EXPR : EQ_EXPR,
2782 op, fold_convert (TREE_TYPE (op), min),
2783 asserts);
2784 if (max)
2785 register_edge_assert_for (op, e, LE_EXPR, op,
2786 fold_convert (TREE_TYPE (op), max),
2787 asserts);
2788 finish_register_edge_assert_for (e, bsi, asserts);
2789 }
2790
2791 XDELETEVEC (ci);
2792
2793 if (!live_on_edge (default_edge, op))
2794 return;
2795
2796 /* Now register along the default label assertions that correspond to the
2797 anti-range of each label. */
2798 int insertion_limit = param_max_vrp_switch_assertions;
2799 if (insertion_limit == 0)
2800 return;
2801
2802 /* We can't do this if the default case shares a label with another case. */
2803 tree default_cl = gimple_switch_default_label (last);
2804 for (idx = 1; idx < n; idx++)
2805 {
2806 tree min, max;
2807 tree cl = gimple_switch_label (last, idx);
2808 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
2809 continue;
2810
2811 min = CASE_LOW (cl);
2812 max = CASE_HIGH (cl);
2813
2814 /* Combine contiguous case ranges to reduce the number of assertions
2815 to insert. */
2816 for (idx = idx + 1; idx < n; idx++)
2817 {
2818 tree next_min, next_max;
2819 tree next_cl = gimple_switch_label (last, idx);
2820 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
2821 break;
2822
2823 next_min = CASE_LOW (next_cl);
2824 next_max = CASE_HIGH (next_cl);
2825
2826 wide_int difference = (wi::to_wide (next_min)
2827 - wi::to_wide (max ? max : min));
2828 if (wi::eq_p (difference, 1))
2829 max = next_max ? next_max : next_min;
2830 else
2831 break;
2832 }
2833 idx--;
2834
2835 if (max == NULL_TREE)
2836 {
2837 /* Register the assertion OP != MIN. */
2838 auto_vec<assert_info, 8> asserts;
2839 min = fold_convert (TREE_TYPE (op), min);
2840 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
2841 asserts);
2842 finish_register_edge_assert_for (default_edge, bsi, asserts);
2843 }
2844 else
2845 {
2846 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
2847 which will give OP the anti-range ~[MIN,MAX]. */
2848 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
2849 min = fold_convert (TREE_TYPE (uop), min);
2850 max = fold_convert (TREE_TYPE (uop), max);
2851
2852 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
2853 tree rhs = int_const_binop (MINUS_EXPR, max, min);
2854 register_new_assert_for (op, lhs, GT_EXPR, rhs,
2855 NULL, default_edge, bsi);
2856 }
2857
2858 if (--insertion_limit == 0)
2859 break;
2860 }
2861 }
2862
2863
2864 /* Traverse all the statements in block BB looking for statements that
2865 may generate useful assertions for the SSA names in their operand.
2866 If a statement produces a useful assertion A for name N_i, then the
2867 list of assertions already generated for N_i is scanned to
2868 determine if A is actually needed.
2869
2870 If N_i already had the assertion A at a location dominating the
2871 current location, then nothing needs to be done. Otherwise, the
2872 new location for A is recorded instead.
2873
2874 1- For every statement S in BB, all the variables used by S are
2875 added to bitmap FOUND_IN_SUBGRAPH.
2876
2877 2- If statement S uses an operand N in a way that exposes a known
2878 value range for N, then if N was not already generated by an
2879 ASSERT_EXPR, create a new assert location for N. For instance,
2880 if N is a pointer and the statement dereferences it, we can
2881 assume that N is not NULL.
2882
2883 3- COND_EXPRs are a special case of #2. We can derive range
2884 information from the predicate but need to insert different
2885 ASSERT_EXPRs for each of the sub-graphs rooted at the
2886 conditional block. If the last statement of BB is a conditional
2887 expression of the form 'X op Y', then
2888
2889 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
2890
2891 b) If the conditional is the only entry point to the sub-graph
2892 corresponding to the THEN_CLAUSE, recurse into it. On
2893 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
2894 an ASSERT_EXPR is added for the corresponding variable.
2895
2896 c) Repeat step (b) on the ELSE_CLAUSE.
2897
2898 d) Mark X and Y in FOUND_IN_SUBGRAPH.
2899
2900 For instance,
2901
2902 if (a == 9)
2903 b = a;
2904 else
2905 b = c + 1;
2906
2907 In this case, an assertion on the THEN clause is useful to
2908 determine that 'a' is always 9 on that edge. However, an assertion
2909 on the ELSE clause would be unnecessary.
2910
2911 4- If BB does not end in a conditional expression, then we recurse
2912 into BB's dominator children.
2913
2914 At the end of the recursive traversal, every SSA name will have a
2915 list of locations where ASSERT_EXPRs should be added. When a new
2916 location for name N is found, it is registered by calling
2917 register_new_assert_for. That function keeps track of all the
2918 registered assertions to prevent adding unnecessary assertions.
2919 For instance, if a pointer P_4 is dereferenced more than once in a
2920 dominator tree, only the location dominating all the dereference of
2921 P_4 will receive an ASSERT_EXPR. */
2922
2923 static void
2924 find_assert_locations_1 (basic_block bb, sbitmap live)
2925 {
2926 gimple *last;
2927
2928 last = last_stmt (bb);
2929
2930 /* If BB's last statement is a conditional statement involving integer
2931 operands, determine if we need to add ASSERT_EXPRs. */
2932 if (last
2933 && gimple_code (last) == GIMPLE_COND
2934 && !fp_predicate (last)
2935 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
2936 find_conditional_asserts (bb, as_a <gcond *> (last));
2937
2938 /* If BB's last statement is a switch statement involving integer
2939 operands, determine if we need to add ASSERT_EXPRs. */
2940 if (last
2941 && gimple_code (last) == GIMPLE_SWITCH
2942 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
2943 find_switch_asserts (bb, as_a <gswitch *> (last));
2944
2945 /* Traverse all the statements in BB marking used names and looking
2946 for statements that may infer assertions for their used operands. */
2947 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
2948 gsi_prev (&si))
2949 {
2950 gimple *stmt;
2951 tree op;
2952 ssa_op_iter i;
2953
2954 stmt = gsi_stmt (si);
2955
2956 if (is_gimple_debug (stmt))
2957 continue;
2958
2959 /* See if we can derive an assertion for any of STMT's operands. */
2960 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
2961 {
2962 tree value;
2963 enum tree_code comp_code;
2964
2965 /* If op is not live beyond this stmt, do not bother to insert
2966 asserts for it. */
2967 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
2968 continue;
2969
2970 /* If OP is used in such a way that we can infer a value
2971 range for it, and we don't find a previous assertion for
2972 it, create a new assertion location node for OP. */
2973 if (infer_value_range (stmt, op, &comp_code, &value))
2974 {
2975 /* If we are able to infer a nonzero value range for OP,
2976 then walk backwards through the use-def chain to see if OP
2977 was set via a typecast.
2978
2979 If so, then we can also infer a nonzero value range
2980 for the operand of the NOP_EXPR. */
2981 if (comp_code == NE_EXPR && integer_zerop (value))
2982 {
2983 tree t = op;
2984 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
2985
2986 while (is_gimple_assign (def_stmt)
2987 && CONVERT_EXPR_CODE_P
2988 (gimple_assign_rhs_code (def_stmt))
2989 && TREE_CODE
2990 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
2991 && POINTER_TYPE_P
2992 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
2993 {
2994 t = gimple_assign_rhs1 (def_stmt);
2995 def_stmt = SSA_NAME_DEF_STMT (t);
2996
2997 /* Note we want to register the assert for the
2998 operand of the NOP_EXPR after SI, not after the
2999 conversion. */
3000 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3001 register_new_assert_for (t, t, comp_code, value,
3002 bb, NULL, si);
3003 }
3004 }
3005
3006 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3007 }
3008 }
3009
3010 /* Update live. */
3011 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3012 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3013 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3014 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3015 }
3016
3017 /* Traverse all PHI nodes in BB, updating live. */
3018 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3019 gsi_next (&si))
3020 {
3021 use_operand_p arg_p;
3022 ssa_op_iter i;
3023 gphi *phi = si.phi ();
3024 tree res = gimple_phi_result (phi);
3025
3026 if (virtual_operand_p (res))
3027 continue;
3028
3029 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3030 {
3031 tree arg = USE_FROM_PTR (arg_p);
3032 if (TREE_CODE (arg) == SSA_NAME)
3033 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3034 }
3035
3036 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3037 }
3038 }
3039
3040 /* Do an RPO walk over the function computing SSA name liveness
3041 on-the-fly and deciding on assert expressions to insert. */
3042
3043 static void
3044 find_assert_locations (void)
3045 {
3046 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3047 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3048 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3049 int rpo_cnt, i;
3050
3051 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3052 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3053 for (i = 0; i < rpo_cnt; ++i)
3054 bb_rpo[rpo[i]] = i;
3055
3056 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3057 the order we compute liveness and insert asserts we otherwise
3058 fail to insert asserts into the loop latch. */
3059 loop_p loop;
3060 FOR_EACH_LOOP (loop, 0)
3061 {
3062 i = loop->latch->index;
3063 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3064 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3065 !gsi_end_p (gsi); gsi_next (&gsi))
3066 {
3067 gphi *phi = gsi.phi ();
3068 if (virtual_operand_p (gimple_phi_result (phi)))
3069 continue;
3070 tree arg = gimple_phi_arg_def (phi, j);
3071 if (TREE_CODE (arg) == SSA_NAME)
3072 {
3073 if (live[i] == NULL)
3074 {
3075 live[i] = sbitmap_alloc (num_ssa_names);
3076 bitmap_clear (live[i]);
3077 }
3078 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3079 }
3080 }
3081 }
3082
3083 for (i = rpo_cnt - 1; i >= 0; --i)
3084 {
3085 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3086 edge e;
3087 edge_iterator ei;
3088
3089 if (!live[rpo[i]])
3090 {
3091 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3092 bitmap_clear (live[rpo[i]]);
3093 }
3094
3095 /* Process BB and update the live information with uses in
3096 this block. */
3097 find_assert_locations_1 (bb, live[rpo[i]]);
3098
3099 /* Merge liveness into the predecessor blocks and free it. */
3100 if (!bitmap_empty_p (live[rpo[i]]))
3101 {
3102 int pred_rpo = i;
3103 FOR_EACH_EDGE (e, ei, bb->preds)
3104 {
3105 int pred = e->src->index;
3106 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3107 continue;
3108
3109 if (!live[pred])
3110 {
3111 live[pred] = sbitmap_alloc (num_ssa_names);
3112 bitmap_clear (live[pred]);
3113 }
3114 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3115
3116 if (bb_rpo[pred] < pred_rpo)
3117 pred_rpo = bb_rpo[pred];
3118 }
3119
3120 /* Record the RPO number of the last visited block that needs
3121 live information from this block. */
3122 last_rpo[rpo[i]] = pred_rpo;
3123 }
3124 else
3125 {
3126 sbitmap_free (live[rpo[i]]);
3127 live[rpo[i]] = NULL;
3128 }
3129
3130 /* We can free all successors live bitmaps if all their
3131 predecessors have been visited already. */
3132 FOR_EACH_EDGE (e, ei, bb->succs)
3133 if (last_rpo[e->dest->index] == i
3134 && live[e->dest->index])
3135 {
3136 sbitmap_free (live[e->dest->index]);
3137 live[e->dest->index] = NULL;
3138 }
3139 }
3140
3141 XDELETEVEC (rpo);
3142 XDELETEVEC (bb_rpo);
3143 XDELETEVEC (last_rpo);
3144 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3145 if (live[i])
3146 sbitmap_free (live[i]);
3147 XDELETEVEC (live);
3148 }
3149
3150 /* Create an ASSERT_EXPR for NAME and insert it in the location
3151 indicated by LOC. Return true if we made any edge insertions. */
3152
3153 static bool
3154 process_assert_insertions_for (tree name, assert_locus *loc)
3155 {
3156 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3157 gimple *stmt;
3158 tree cond;
3159 gimple *assert_stmt;
3160 edge_iterator ei;
3161 edge e;
3162
3163 /* If we have X <=> X do not insert an assert expr for that. */
3164 if (loc->expr == loc->val)
3165 return false;
3166
3167 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3168 assert_stmt = build_assert_expr_for (cond, name);
3169 if (loc->e)
3170 {
3171 /* We have been asked to insert the assertion on an edge. This
3172 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3173 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3174 || (gimple_code (gsi_stmt (loc->si))
3175 == GIMPLE_SWITCH));
3176
3177 gsi_insert_on_edge (loc->e, assert_stmt);
3178 return true;
3179 }
3180
3181 /* If the stmt iterator points at the end then this is an insertion
3182 at the beginning of a block. */
3183 if (gsi_end_p (loc->si))
3184 {
3185 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3186 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3187 return false;
3188
3189 }
3190 /* Otherwise, we can insert right after LOC->SI iff the
3191 statement must not be the last statement in the block. */
3192 stmt = gsi_stmt (loc->si);
3193 if (!stmt_ends_bb_p (stmt))
3194 {
3195 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3196 return false;
3197 }
3198
3199 /* If STMT must be the last statement in BB, we can only insert new
3200 assertions on the non-abnormal edge out of BB. Note that since
3201 STMT is not control flow, there may only be one non-abnormal/eh edge
3202 out of BB. */
3203 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3204 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3205 {
3206 gsi_insert_on_edge (e, assert_stmt);
3207 return true;
3208 }
3209
3210 gcc_unreachable ();
3211 }
3212
3213 /* Qsort helper for sorting assert locations. If stable is true, don't
3214 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3215 on the other side some pointers might be NULL. */
3216
3217 template <bool stable>
3218 static int
3219 compare_assert_loc (const void *pa, const void *pb)
3220 {
3221 assert_locus * const a = *(assert_locus * const *)pa;
3222 assert_locus * const b = *(assert_locus * const *)pb;
3223
3224 /* If stable, some asserts might be optimized away already, sort
3225 them last. */
3226 if (stable)
3227 {
3228 if (a == NULL)
3229 return b != NULL;
3230 else if (b == NULL)
3231 return -1;
3232 }
3233
3234 if (a->e == NULL && b->e != NULL)
3235 return 1;
3236 else if (a->e != NULL && b->e == NULL)
3237 return -1;
3238
3239 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3240 no need to test both a->e and b->e. */
3241
3242 /* Sort after destination index. */
3243 if (a->e == NULL)
3244 ;
3245 else if (a->e->dest->index > b->e->dest->index)
3246 return 1;
3247 else if (a->e->dest->index < b->e->dest->index)
3248 return -1;
3249
3250 /* Sort after comp_code. */
3251 if (a->comp_code > b->comp_code)
3252 return 1;
3253 else if (a->comp_code < b->comp_code)
3254 return -1;
3255
3256 hashval_t ha, hb;
3257
3258 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3259 uses DECL_UID of the VAR_DECL, so sorting might differ between
3260 -g and -g0. When doing the removal of redundant assert exprs
3261 and commonization to successors, this does not matter, but for
3262 the final sort needs to be stable. */
3263 if (stable)
3264 {
3265 ha = 0;
3266 hb = 0;
3267 }
3268 else
3269 {
3270 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3271 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3272 }
3273
3274 /* Break the tie using hashing and source/bb index. */
3275 if (ha == hb)
3276 return (a->e != NULL
3277 ? a->e->src->index - b->e->src->index
3278 : a->bb->index - b->bb->index);
3279 return ha > hb ? 1 : -1;
3280 }
3281
3282 /* Process all the insertions registered for every name N_i registered
3283 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3284 found in ASSERTS_FOR[i]. */
3285
3286 static void
3287 process_assert_insertions (void)
3288 {
3289 unsigned i;
3290 bitmap_iterator bi;
3291 bool update_edges_p = false;
3292 int num_asserts = 0;
3293
3294 if (dump_file && (dump_flags & TDF_DETAILS))
3295 dump_all_asserts (dump_file);
3296
3297 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3298 {
3299 assert_locus *loc = asserts_for[i];
3300 gcc_assert (loc);
3301
3302 auto_vec<assert_locus *, 16> asserts;
3303 for (; loc; loc = loc->next)
3304 asserts.safe_push (loc);
3305 asserts.qsort (compare_assert_loc<false>);
3306
3307 /* Push down common asserts to successors and remove redundant ones. */
3308 unsigned ecnt = 0;
3309 assert_locus *common = NULL;
3310 unsigned commonj = 0;
3311 for (unsigned j = 0; j < asserts.length (); ++j)
3312 {
3313 loc = asserts[j];
3314 if (! loc->e)
3315 common = NULL;
3316 else if (! common
3317 || loc->e->dest != common->e->dest
3318 || loc->comp_code != common->comp_code
3319 || ! operand_equal_p (loc->val, common->val, 0)
3320 || ! operand_equal_p (loc->expr, common->expr, 0))
3321 {
3322 commonj = j;
3323 common = loc;
3324 ecnt = 1;
3325 }
3326 else if (loc->e == asserts[j-1]->e)
3327 {
3328 /* Remove duplicate asserts. */
3329 if (commonj == j - 1)
3330 {
3331 commonj = j;
3332 common = loc;
3333 }
3334 free (asserts[j-1]);
3335 asserts[j-1] = NULL;
3336 }
3337 else
3338 {
3339 ecnt++;
3340 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3341 {
3342 /* We have the same assertion on all incoming edges of a BB.
3343 Insert it at the beginning of that block. */
3344 loc->bb = loc->e->dest;
3345 loc->e = NULL;
3346 loc->si = gsi_none ();
3347 common = NULL;
3348 /* Clear asserts commoned. */
3349 for (; commonj != j; ++commonj)
3350 if (asserts[commonj])
3351 {
3352 free (asserts[commonj]);
3353 asserts[commonj] = NULL;
3354 }
3355 }
3356 }
3357 }
3358
3359 /* The asserts vector sorting above might be unstable for
3360 -fcompare-debug, sort again to ensure a stable sort. */
3361 asserts.qsort (compare_assert_loc<true>);
3362 for (unsigned j = 0; j < asserts.length (); ++j)
3363 {
3364 loc = asserts[j];
3365 if (! loc)
3366 break;
3367 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3368 num_asserts++;
3369 free (loc);
3370 }
3371 }
3372
3373 if (update_edges_p)
3374 gsi_commit_edge_inserts ();
3375
3376 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
3377 num_asserts);
3378 }
3379
3380
3381 /* Traverse the flowgraph looking for conditional jumps to insert range
3382 expressions. These range expressions are meant to provide information
3383 to optimizations that need to reason in terms of value ranges. They
3384 will not be expanded into RTL. For instance, given:
3385
3386 x = ...
3387 y = ...
3388 if (x < y)
3389 y = x - 2;
3390 else
3391 x = y + 3;
3392
3393 this pass will transform the code into:
3394
3395 x = ...
3396 y = ...
3397 if (x < y)
3398 {
3399 x = ASSERT_EXPR <x, x < y>
3400 y = x - 2
3401 }
3402 else
3403 {
3404 y = ASSERT_EXPR <y, x >= y>
3405 x = y + 3
3406 }
3407
3408 The idea is that once copy and constant propagation have run, other
3409 optimizations will be able to determine what ranges of values can 'x'
3410 take in different paths of the code, simply by checking the reaching
3411 definition of 'x'. */
3412
3413 static void
3414 insert_range_assertions (void)
3415 {
3416 need_assert_for = BITMAP_ALLOC (NULL);
3417 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3418
3419 calculate_dominance_info (CDI_DOMINATORS);
3420
3421 find_assert_locations ();
3422 if (!bitmap_empty_p (need_assert_for))
3423 {
3424 process_assert_insertions ();
3425 update_ssa (TODO_update_ssa_no_phi);
3426 }
3427
3428 if (dump_file && (dump_flags & TDF_DETAILS))
3429 {
3430 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3431 dump_function_to_file (current_function_decl, dump_file, dump_flags);
3432 }
3433
3434 free (asserts_for);
3435 BITMAP_FREE (need_assert_for);
3436 }
3437
3438 class vrp_prop : public ssa_propagation_engine
3439 {
3440 public:
3441 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
3442 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
3443
3444 void vrp_initialize (void);
3445 void vrp_finalize (bool);
3446 void check_all_array_refs (void);
3447 bool check_array_ref (location_t, tree, bool);
3448 bool check_mem_ref (location_t, tree, bool);
3449 void search_for_addr_array (tree, location_t);
3450
3451 class vr_values vr_values;
3452 /* Temporary delegator to minimize code churn. */
3453 const value_range_equiv *get_value_range (const_tree op)
3454 { return vr_values.get_value_range (op); }
3455 void set_def_to_varying (const_tree def)
3456 { vr_values.set_def_to_varying (def); }
3457 void set_defs_to_varying (gimple *stmt)
3458 { vr_values.set_defs_to_varying (stmt); }
3459 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
3460 tree *output_p, value_range_equiv *vr)
3461 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
3462 bool update_value_range (const_tree op, value_range_equiv *vr)
3463 { return vr_values.update_value_range (op, vr); }
3464 void extract_range_basic (value_range_equiv *vr, gimple *stmt)
3465 { vr_values.extract_range_basic (vr, stmt); }
3466 void extract_range_from_phi_node (gphi *phi, value_range_equiv *vr)
3467 { vr_values.extract_range_from_phi_node (phi, vr); }
3468 };
3469 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
3470 and "struct" hacks. If VRP can determine that the
3471 array subscript is a constant, check if it is outside valid
3472 range. If the array subscript is a RANGE, warn if it is
3473 non-overlapping with valid range.
3474 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.
3475 Returns true if a warning has been issued. */
3476
3477 bool
3478 vrp_prop::check_array_ref (location_t location, tree ref,
3479 bool ignore_off_by_one)
3480 {
3481 if (TREE_NO_WARNING (ref))
3482 return false;
3483
3484 tree low_sub = TREE_OPERAND (ref, 1);
3485 tree up_sub = low_sub;
3486 tree up_bound = array_ref_up_bound (ref);
3487
3488 /* Referenced decl if one can be determined. */
3489 tree decl = NULL_TREE;
3490
3491 /* Set for accesses to interior zero-length arrays. */
3492 bool interior_zero_len = false;
3493
3494 tree up_bound_p1;
3495
3496 if (!up_bound
3497 || TREE_CODE (up_bound) != INTEGER_CST
3498 || (warn_array_bounds < 2
3499 && array_at_struct_end_p (ref)))
3500 {
3501 /* Accesses to trailing arrays via pointers may access storage
3502 beyond the types array bounds. For such arrays, or for flexible
3503 array members, as well as for other arrays of an unknown size,
3504 replace the upper bound with a more permissive one that assumes
3505 the size of the largest object is PTRDIFF_MAX. */
3506 tree eltsize = array_ref_element_size (ref);
3507
3508 if (TREE_CODE (eltsize) != INTEGER_CST
3509 || integer_zerop (eltsize))
3510 {
3511 up_bound = NULL_TREE;
3512 up_bound_p1 = NULL_TREE;
3513 }
3514 else
3515 {
3516 tree ptrdiff_max = TYPE_MAX_VALUE (ptrdiff_type_node);
3517 tree maxbound = ptrdiff_max;
3518 tree arg = TREE_OPERAND (ref, 0);
3519
3520 const bool compref = TREE_CODE (arg) == COMPONENT_REF;
3521 if (compref)
3522 {
3523 /* Try to determine the size of the trailing array from
3524 its initializer (if it has one). */
3525 if (tree refsize = component_ref_size (arg, &interior_zero_len))
3526 if (TREE_CODE (refsize) == INTEGER_CST)
3527 maxbound = refsize;
3528 }
3529
3530 if (maxbound == ptrdiff_max)
3531 {
3532 /* Try to determine the size of the base object. Avoid
3533 COMPONENT_REF already tried above. Using its DECL_SIZE
3534 size wouldn't necessarily be correct if the reference is
3535 to its flexible array member initialized in a different
3536 translation unit. */
3537 poly_int64 off;
3538 if (tree base = get_addr_base_and_unit_offset (arg, &off))
3539 {
3540 if (!compref && DECL_P (base))
3541 if (tree basesize = DECL_SIZE_UNIT (base))
3542 if (TREE_CODE (basesize) == INTEGER_CST)
3543 {
3544 maxbound = basesize;
3545 decl = base;
3546 }
3547
3548 if (known_gt (off, 0))
3549 maxbound = wide_int_to_tree (sizetype,
3550 wi::sub (wi::to_wide (maxbound),
3551 off));
3552 }
3553 }
3554 else
3555 maxbound = fold_convert (sizetype, maxbound);
3556
3557 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
3558
3559 if (up_bound_p1 != NULL_TREE)
3560 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
3561 build_int_cst (ptrdiff_type_node, 1));
3562 else
3563 up_bound = NULL_TREE;
3564 }
3565 }
3566 else
3567 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
3568 build_int_cst (TREE_TYPE (up_bound), 1));
3569
3570 tree low_bound = array_ref_low_bound (ref);
3571
3572 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
3573
3574 bool warned = false;
3575
3576 /* Empty array. */
3577 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
3578 warned = warning_at (location, OPT_Warray_bounds,
3579 "array subscript %E is outside array bounds of %qT",
3580 low_sub, artype);
3581
3582 const value_range_equiv *vr = NULL;
3583 if (TREE_CODE (low_sub) == SSA_NAME)
3584 {
3585 vr = get_value_range (low_sub);
3586 if (!vr->undefined_p () && !vr->varying_p ())
3587 {
3588 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
3589 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
3590 }
3591 }
3592
3593 if (warned)
3594 ; /* Do nothing. */
3595 else if (vr && vr->kind () == VR_ANTI_RANGE)
3596 {
3597 if (up_bound
3598 && TREE_CODE (up_sub) == INTEGER_CST
3599 && (ignore_off_by_one
3600 ? tree_int_cst_lt (up_bound, up_sub)
3601 : tree_int_cst_le (up_bound, up_sub))
3602 && TREE_CODE (low_sub) == INTEGER_CST
3603 && tree_int_cst_le (low_sub, low_bound))
3604 warned = warning_at (location, OPT_Warray_bounds,
3605 "array subscript [%E, %E] is outside "
3606 "array bounds of %qT",
3607 low_sub, up_sub, artype);
3608 }
3609 else if (up_bound
3610 && TREE_CODE (up_sub) == INTEGER_CST
3611 && (ignore_off_by_one
3612 ? !tree_int_cst_le (up_sub, up_bound_p1)
3613 : !tree_int_cst_le (up_sub, up_bound)))
3614 warned = warning_at (location, OPT_Warray_bounds,
3615 "array subscript %E is above array bounds of %qT",
3616 up_sub, artype);
3617 else if (TREE_CODE (low_sub) == INTEGER_CST
3618 && tree_int_cst_lt (low_sub, low_bound))
3619 warned = warning_at (location, OPT_Warray_bounds,
3620 "array subscript %E is below array bounds of %qT",
3621 low_sub, artype);
3622
3623 if (!warned && interior_zero_len)
3624 warned = warning_at (location, OPT_Wzero_length_bounds,
3625 (TREE_CODE (low_sub) == INTEGER_CST
3626 ? G_("array subscript %E is outside the bounds "
3627 "of an interior zero-length array %qT")
3628 : G_("array subscript %qE is outside the bounds "
3629 "of an interior zero-length array %qT")),
3630 low_sub, artype);
3631
3632 if (warned)
3633 {
3634 if (dump_file && (dump_flags & TDF_DETAILS))
3635 {
3636 fprintf (dump_file, "Array bound warning for ");
3637 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
3638 fprintf (dump_file, "\n");
3639 }
3640
3641 ref = decl ? decl : TREE_OPERAND (ref, 0);
3642
3643 tree rec = NULL_TREE;
3644 if (TREE_CODE (ref) == COMPONENT_REF)
3645 {
3646 /* For a reference to a member of a struct object also mention
3647 the object if it's known. It may be defined in a different
3648 function than the out-of-bounds access. */
3649 rec = TREE_OPERAND (ref, 0);
3650 if (!VAR_P (rec))
3651 rec = NULL_TREE;
3652 ref = TREE_OPERAND (ref, 1);
3653 }
3654
3655 if (DECL_P (ref))
3656 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
3657 if (rec && DECL_P (rec))
3658 inform (DECL_SOURCE_LOCATION (rec), "defined here %qD", rec);
3659
3660 TREE_NO_WARNING (ref) = 1;
3661 }
3662
3663 return warned;
3664 }
3665
3666 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
3667 references to string constants. If VRP can determine that the array
3668 subscript is a constant, check if it is outside valid range.
3669 If the array subscript is a RANGE, warn if it is non-overlapping
3670 with valid range.
3671 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
3672 (used to allow one-past-the-end indices for code that takes
3673 the address of the just-past-the-end element of an array).
3674 Returns true if a warning has been issued. */
3675
3676 bool
3677 vrp_prop::check_mem_ref (location_t location, tree ref,
3678 bool ignore_off_by_one)
3679 {
3680 if (TREE_NO_WARNING (ref))
3681 return false;
3682
3683 tree arg = TREE_OPERAND (ref, 0);
3684 /* The constant and variable offset of the reference. */
3685 tree cstoff = TREE_OPERAND (ref, 1);
3686 tree varoff = NULL_TREE;
3687
3688 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
3689
3690 /* The array or string constant bounds in bytes. Initially set
3691 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
3692 determined. */
3693 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
3694
3695 /* The minimum and maximum intermediate offset. For a reference
3696 to be valid, not only does the final offset/subscript must be
3697 in bounds but all intermediate offsets should be as well.
3698 GCC may be able to deal gracefully with such out-of-bounds
3699 offsets so the checking is only enbaled at -Warray-bounds=2
3700 where it may help detect bugs in uses of the intermediate
3701 offsets that could otherwise not be detectable. */
3702 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
3703 offset_int extrema[2] = { 0, wi::abs (ioff) };
3704
3705 /* The range of the byte offset into the reference. */
3706 offset_int offrange[2] = { 0, 0 };
3707
3708 const value_range_equiv *vr = NULL;
3709
3710 /* Determine the offsets and increment OFFRANGE for the bounds of each.
3711 The loop computes the range of the final offset for expressions such
3712 as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
3713 some range. */
3714 const unsigned limit = param_ssa_name_def_chain_limit;
3715 for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
3716 {
3717 gimple *def = SSA_NAME_DEF_STMT (arg);
3718 if (!is_gimple_assign (def))
3719 break;
3720
3721 tree_code code = gimple_assign_rhs_code (def);
3722 if (code == POINTER_PLUS_EXPR)
3723 {
3724 arg = gimple_assign_rhs1 (def);
3725 varoff = gimple_assign_rhs2 (def);
3726 }
3727 else if (code == ASSERT_EXPR)
3728 {
3729 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
3730 continue;
3731 }
3732 else
3733 return false;
3734
3735 /* VAROFF should always be a SSA_NAME here (and not even
3736 INTEGER_CST) but there's no point in taking chances. */
3737 if (TREE_CODE (varoff) != SSA_NAME)
3738 break;
3739
3740 vr = get_value_range (varoff);
3741 if (!vr || vr->undefined_p () || vr->varying_p ())
3742 break;
3743
3744 if (!vr->constant_p ())
3745 break;
3746
3747 if (vr->kind () == VR_RANGE)
3748 {
3749 offset_int min
3750 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
3751 offset_int max
3752 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
3753 if (min < max)
3754 {
3755 offrange[0] += min;
3756 offrange[1] += max;
3757 }
3758 else
3759 {
3760 /* When MIN >= MAX, the offset is effectively in a union
3761 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
3762 Since there is no way to represent such a range across
3763 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
3764 to OFFRANGE. */
3765 offrange[0] += arrbounds[0];
3766 offrange[1] += arrbounds[1];
3767 }
3768 }
3769 else
3770 {
3771 /* For an anti-range, analogously to the above, conservatively
3772 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
3773 offrange[0] += arrbounds[0];
3774 offrange[1] += arrbounds[1];
3775 }
3776
3777 /* Keep track of the minimum and maximum offset. */
3778 if (offrange[1] < 0 && offrange[1] < extrema[0])
3779 extrema[0] = offrange[1];
3780 if (offrange[0] > 0 && offrange[0] > extrema[1])
3781 extrema[1] = offrange[0];
3782
3783 if (offrange[0] < arrbounds[0])
3784 offrange[0] = arrbounds[0];
3785
3786 if (offrange[1] > arrbounds[1])
3787 offrange[1] = arrbounds[1];
3788 }
3789
3790 if (TREE_CODE (arg) == ADDR_EXPR)
3791 {
3792 arg = TREE_OPERAND (arg, 0);
3793 if (TREE_CODE (arg) != STRING_CST
3794 && TREE_CODE (arg) != PARM_DECL
3795 && TREE_CODE (arg) != VAR_DECL)
3796 return false;
3797 }
3798 else
3799 return false;
3800
3801 /* The type of the object being referred to. It can be an array,
3802 string literal, or a non-array type when the MEM_REF represents
3803 a reference/subscript via a pointer to an object that is not
3804 an element of an array. Incomplete types are excluded as well
3805 because their size is not known. */
3806 tree reftype = TREE_TYPE (arg);
3807 if (POINTER_TYPE_P (reftype)
3808 || !COMPLETE_TYPE_P (reftype)
3809 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST)
3810 return false;
3811
3812 /* Except in declared objects, references to trailing array members
3813 of structs and union objects are excluded because MEM_REF doesn't
3814 make it possible to identify the member where the reference
3815 originated. */
3816 if (RECORD_OR_UNION_TYPE_P (reftype)
3817 && (!VAR_P (arg)
3818 || (DECL_EXTERNAL (arg) && array_at_struct_end_p (ref))))
3819 return false;
3820
3821 arrbounds[0] = 0;
3822
3823 offset_int eltsize;
3824 if (TREE_CODE (reftype) == ARRAY_TYPE)
3825 {
3826 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
3827 if (tree dom = TYPE_DOMAIN (reftype))
3828 {
3829 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
3830 if (TREE_CODE (arg) == COMPONENT_REF)
3831 {
3832 offset_int size = maxobjsize;
3833 if (tree fldsize = component_ref_size (arg))
3834 size = wi::to_offset (fldsize);
3835 arrbounds[1] = wi::lrshift (size, wi::floor_log2 (eltsize));
3836 }
3837 else if (array_at_struct_end_p (arg) || !bnds[0] || !bnds[1])
3838 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
3839 else
3840 arrbounds[1] = (wi::to_offset (bnds[1]) - wi::to_offset (bnds[0])
3841 + 1) * eltsize;
3842 }
3843 else
3844 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
3845
3846 if (TREE_CODE (ref) == MEM_REF)
3847 {
3848 /* For MEM_REF determine a tighter bound of the non-array
3849 element type. */
3850 tree eltype = TREE_TYPE (reftype);
3851 while (TREE_CODE (eltype) == ARRAY_TYPE)
3852 eltype = TREE_TYPE (eltype);
3853 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
3854 }
3855 }
3856 else
3857 {
3858 eltsize = 1;
3859 tree size = TYPE_SIZE_UNIT (reftype);
3860 if (VAR_P (arg))
3861 if (tree initsize = DECL_SIZE_UNIT (arg))
3862 if (tree_int_cst_lt (size, initsize))
3863 size = initsize;
3864
3865 arrbounds[1] = wi::to_offset (size);
3866 }
3867
3868 offrange[0] += ioff;
3869 offrange[1] += ioff;
3870
3871 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
3872 is set (when taking the address of the one-past-last element
3873 of an array) but always use the stricter bound in diagnostics. */
3874 offset_int ubound = arrbounds[1];
3875 if (ignore_off_by_one)
3876 ubound += 1;
3877
3878 if (arrbounds[0] == arrbounds[1]
3879 || offrange[0] >= ubound
3880 || offrange[1] < arrbounds[0])
3881 {
3882 /* Treat a reference to a non-array object as one to an array
3883 of a single element. */
3884 if (TREE_CODE (reftype) != ARRAY_TYPE)
3885 reftype = build_array_type_nelts (reftype, 1);
3886
3887 if (TREE_CODE (ref) == MEM_REF)
3888 {
3889 /* Extract the element type out of MEM_REF and use its size
3890 to compute the index to print in the diagnostic; arrays
3891 in MEM_REF don't mean anything. A type with no size like
3892 void is as good as having a size of 1. */
3893 tree type = TREE_TYPE (ref);
3894 while (TREE_CODE (type) == ARRAY_TYPE)
3895 type = TREE_TYPE (type);
3896 if (tree size = TYPE_SIZE_UNIT (type))
3897 {
3898 offrange[0] = offrange[0] / wi::to_offset (size);
3899 offrange[1] = offrange[1] / wi::to_offset (size);
3900 }
3901 }
3902 else
3903 {
3904 /* For anything other than MEM_REF, compute the index to
3905 print in the diagnostic as the offset over element size. */
3906 offrange[0] = offrange[0] / eltsize;
3907 offrange[1] = offrange[1] / eltsize;
3908 }
3909
3910 bool warned;
3911 if (offrange[0] == offrange[1])
3912 warned = warning_at (location, OPT_Warray_bounds,
3913 "array subscript %wi is outside array bounds "
3914 "of %qT",
3915 offrange[0].to_shwi (), reftype);
3916 else
3917 warned = warning_at (location, OPT_Warray_bounds,
3918 "array subscript [%wi, %wi] is outside "
3919 "array bounds of %qT",
3920 offrange[0].to_shwi (),
3921 offrange[1].to_shwi (), reftype);
3922 if (warned && DECL_P (arg))
3923 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
3924
3925 if (warned)
3926 TREE_NO_WARNING (ref) = 1;
3927 return warned;
3928 }
3929
3930 if (warn_array_bounds < 2)
3931 return false;
3932
3933 /* At level 2 check also intermediate offsets. */
3934 int i = 0;
3935 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
3936 {
3937 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
3938
3939 if (warning_at (location, OPT_Warray_bounds,
3940 "intermediate array offset %wi is outside array bounds "
3941 "of %qT", tmpidx, reftype))
3942 {
3943 TREE_NO_WARNING (ref) = 1;
3944 return true;
3945 }
3946 }
3947
3948 return false;
3949 }
3950
3951 /* Searches if the expr T, located at LOCATION computes
3952 address of an ARRAY_REF, and call check_array_ref on it. */
3953
3954 void
3955 vrp_prop::search_for_addr_array (tree t, location_t location)
3956 {
3957 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
3958 do
3959 {
3960 bool warned = false;
3961 if (TREE_CODE (t) == ARRAY_REF)
3962 warned = check_array_ref (location, t, true /*ignore_off_by_one*/);
3963 else if (TREE_CODE (t) == MEM_REF)
3964 warned = check_mem_ref (location, t, true /*ignore_off_by_one*/);
3965
3966 if (warned)
3967 TREE_NO_WARNING (t) = true;
3968
3969 t = TREE_OPERAND (t, 0);
3970 }
3971 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
3972
3973 if (TREE_CODE (t) != MEM_REF
3974 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
3975 || TREE_NO_WARNING (t))
3976 return;
3977
3978 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3979 tree low_bound, up_bound, el_sz;
3980 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
3981 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
3982 || !TYPE_DOMAIN (TREE_TYPE (tem)))
3983 return;
3984
3985 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
3986 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
3987 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
3988 if (!low_bound
3989 || TREE_CODE (low_bound) != INTEGER_CST
3990 || !up_bound
3991 || TREE_CODE (up_bound) != INTEGER_CST
3992 || !el_sz
3993 || TREE_CODE (el_sz) != INTEGER_CST)
3994 return;
3995
3996 offset_int idx;
3997 if (!mem_ref_offset (t).is_constant (&idx))
3998 return;
3999
4000 bool warned = false;
4001 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4002 if (idx < 0)
4003 {
4004 if (dump_file && (dump_flags & TDF_DETAILS))
4005 {
4006 fprintf (dump_file, "Array bound warning for ");
4007 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4008 fprintf (dump_file, "\n");
4009 }
4010 warned = warning_at (location, OPT_Warray_bounds,
4011 "array subscript %wi is below "
4012 "array bounds of %qT",
4013 idx.to_shwi (), TREE_TYPE (tem));
4014 }
4015 else if (idx > (wi::to_offset (up_bound)
4016 - wi::to_offset (low_bound) + 1))
4017 {
4018 if (dump_file && (dump_flags & TDF_DETAILS))
4019 {
4020 fprintf (dump_file, "Array bound warning for ");
4021 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4022 fprintf (dump_file, "\n");
4023 }
4024 warned = warning_at (location, OPT_Warray_bounds,
4025 "array subscript %wu is above "
4026 "array bounds of %qT",
4027 idx.to_uhwi (), TREE_TYPE (tem));
4028 }
4029
4030 if (warned)
4031 {
4032 if (DECL_P (t))
4033 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4034
4035 TREE_NO_WARNING (t) = 1;
4036 }
4037 }
4038
4039 /* walk_tree() callback that checks if *TP is
4040 an ARRAY_REF inside an ADDR_EXPR (in which an array
4041 subscript one outside the valid range is allowed). Call
4042 check_array_ref for each ARRAY_REF found. The location is
4043 passed in DATA. */
4044
4045 static tree
4046 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4047 {
4048 tree t = *tp;
4049 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4050 location_t location;
4051
4052 if (EXPR_HAS_LOCATION (t))
4053 location = EXPR_LOCATION (t);
4054 else
4055 location = gimple_location (wi->stmt);
4056
4057 *walk_subtree = TRUE;
4058
4059 bool warned = false;
4060 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4061 if (TREE_CODE (t) == ARRAY_REF)
4062 warned = vrp_prop->check_array_ref (location, t, false/*ignore_off_by_one*/);
4063 else if (TREE_CODE (t) == MEM_REF)
4064 warned = vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4065 else if (TREE_CODE (t) == ADDR_EXPR)
4066 {
4067 vrp_prop->search_for_addr_array (t, location);
4068 *walk_subtree = FALSE;
4069 }
4070 /* Propagate the no-warning bit to the outer expression. */
4071 if (warned)
4072 TREE_NO_WARNING (t) = true;
4073
4074 return NULL_TREE;
4075 }
4076
4077 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4078 to walk over all statements of all reachable BBs and call
4079 check_array_bounds on them. */
4080
4081 class check_array_bounds_dom_walker : public dom_walker
4082 {
4083 public:
4084 check_array_bounds_dom_walker (vrp_prop *prop)
4085 : dom_walker (CDI_DOMINATORS,
4086 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4087 flags, so that we can merge in information on
4088 non-executable edges from vrp_folder . */
4089 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4090 m_prop (prop) {}
4091 ~check_array_bounds_dom_walker () {}
4092
4093 edge before_dom_children (basic_block) FINAL OVERRIDE;
4094
4095 private:
4096 vrp_prop *m_prop;
4097 };
4098
4099 /* Implementation of dom_walker::before_dom_children.
4100
4101 Walk over all statements of BB and call check_array_bounds on them,
4102 and determine if there's a unique successor edge. */
4103
4104 edge
4105 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4106 {
4107 gimple_stmt_iterator si;
4108 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4109 {
4110 gimple *stmt = gsi_stmt (si);
4111 struct walk_stmt_info wi;
4112 if (!gimple_has_location (stmt)
4113 || is_gimple_debug (stmt))
4114 continue;
4115
4116 memset (&wi, 0, sizeof (wi));
4117
4118 wi.info = m_prop;
4119
4120 walk_gimple_op (stmt, check_array_bounds, &wi);
4121 }
4122
4123 /* Determine if there's a unique successor edge, and if so, return
4124 that back to dom_walker, ensuring that we don't visit blocks that
4125 became unreachable during the VRP propagation
4126 (PR tree-optimization/83312). */
4127 return find_taken_edge (bb, NULL_TREE);
4128 }
4129
4130 /* Walk over all statements of all reachable BBs and call check_array_bounds
4131 on them. */
4132
4133 void
4134 vrp_prop::check_all_array_refs ()
4135 {
4136 check_array_bounds_dom_walker w (this);
4137 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4138 }
4139
4140 /* Return true if all imm uses of VAR are either in STMT, or
4141 feed (optionally through a chain of single imm uses) GIMPLE_COND
4142 in basic block COND_BB. */
4143
4144 static bool
4145 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4146 {
4147 use_operand_p use_p, use2_p;
4148 imm_use_iterator iter;
4149
4150 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4151 if (USE_STMT (use_p) != stmt)
4152 {
4153 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4154 if (is_gimple_debug (use_stmt))
4155 continue;
4156 while (is_gimple_assign (use_stmt)
4157 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4158 && single_imm_use (gimple_assign_lhs (use_stmt),
4159 &use2_p, &use_stmt2))
4160 use_stmt = use_stmt2;
4161 if (gimple_code (use_stmt) != GIMPLE_COND
4162 || gimple_bb (use_stmt) != cond_bb)
4163 return false;
4164 }
4165 return true;
4166 }
4167
4168 /* Handle
4169 _4 = x_3 & 31;
4170 if (_4 != 0)
4171 goto <bb 6>;
4172 else
4173 goto <bb 7>;
4174 <bb 6>:
4175 __builtin_unreachable ();
4176 <bb 7>:
4177 x_5 = ASSERT_EXPR <x_3, ...>;
4178 If x_3 has no other immediate uses (checked by caller),
4179 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4180 from the non-zero bitmask. */
4181
4182 void
4183 maybe_set_nonzero_bits (edge e, tree var)
4184 {
4185 basic_block cond_bb = e->src;
4186 gimple *stmt = last_stmt (cond_bb);
4187 tree cst;
4188
4189 if (stmt == NULL
4190 || gimple_code (stmt) != GIMPLE_COND
4191 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4192 ? EQ_EXPR : NE_EXPR)
4193 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4194 || !integer_zerop (gimple_cond_rhs (stmt)))
4195 return;
4196
4197 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4198 if (!is_gimple_assign (stmt)
4199 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4200 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4201 return;
4202 if (gimple_assign_rhs1 (stmt) != var)
4203 {
4204 gimple *stmt2;
4205
4206 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4207 return;
4208 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4209 if (!gimple_assign_cast_p (stmt2)
4210 || gimple_assign_rhs1 (stmt2) != var
4211 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4212 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4213 != TYPE_PRECISION (TREE_TYPE (var))))
4214 return;
4215 }
4216 cst = gimple_assign_rhs2 (stmt);
4217 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4218 wi::to_wide (cst)));
4219 }
4220
4221 /* Convert range assertion expressions into the implied copies and
4222 copy propagate away the copies. Doing the trivial copy propagation
4223 here avoids the need to run the full copy propagation pass after
4224 VRP.
4225
4226 FIXME, this will eventually lead to copy propagation removing the
4227 names that had useful range information attached to them. For
4228 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4229 then N_i will have the range [3, +INF].
4230
4231 However, by converting the assertion into the implied copy
4232 operation N_i = N_j, we will then copy-propagate N_j into the uses
4233 of N_i and lose the range information. We may want to hold on to
4234 ASSERT_EXPRs a little while longer as the ranges could be used in
4235 things like jump threading.
4236
4237 The problem with keeping ASSERT_EXPRs around is that passes after
4238 VRP need to handle them appropriately.
4239
4240 Another approach would be to make the range information a first
4241 class property of the SSA_NAME so that it can be queried from
4242 any pass. This is made somewhat more complex by the need for
4243 multiple ranges to be associated with one SSA_NAME. */
4244
4245 static void
4246 remove_range_assertions (void)
4247 {
4248 basic_block bb;
4249 gimple_stmt_iterator si;
4250 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4251 a basic block preceeded by GIMPLE_COND branching to it and
4252 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4253 int is_unreachable;
4254
4255 /* Note that the BSI iterator bump happens at the bottom of the
4256 loop and no bump is necessary if we're removing the statement
4257 referenced by the current BSI. */
4258 FOR_EACH_BB_FN (bb, cfun)
4259 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4260 {
4261 gimple *stmt = gsi_stmt (si);
4262
4263 if (is_gimple_assign (stmt)
4264 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4265 {
4266 tree lhs = gimple_assign_lhs (stmt);
4267 tree rhs = gimple_assign_rhs1 (stmt);
4268 tree var;
4269
4270 var = ASSERT_EXPR_VAR (rhs);
4271
4272 if (TREE_CODE (var) == SSA_NAME
4273 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4274 && SSA_NAME_RANGE_INFO (lhs))
4275 {
4276 if (is_unreachable == -1)
4277 {
4278 is_unreachable = 0;
4279 if (single_pred_p (bb)
4280 && assert_unreachable_fallthru_edge_p
4281 (single_pred_edge (bb)))
4282 is_unreachable = 1;
4283 }
4284 /* Handle
4285 if (x_7 >= 10 && x_7 < 20)
4286 __builtin_unreachable ();
4287 x_8 = ASSERT_EXPR <x_7, ...>;
4288 if the only uses of x_7 are in the ASSERT_EXPR and
4289 in the condition. In that case, we can copy the
4290 range info from x_8 computed in this pass also
4291 for x_7. */
4292 if (is_unreachable
4293 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4294 single_pred (bb)))
4295 {
4296 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4297 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4298 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4299 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4300 }
4301 }
4302
4303 /* Propagate the RHS into every use of the LHS. For SSA names
4304 also propagate abnormals as it merely restores the original
4305 IL in this case (an replace_uses_by would assert). */
4306 if (TREE_CODE (var) == SSA_NAME)
4307 {
4308 imm_use_iterator iter;
4309 use_operand_p use_p;
4310 gimple *use_stmt;
4311 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4312 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4313 SET_USE (use_p, var);
4314 }
4315 else
4316 replace_uses_by (lhs, var);
4317
4318 /* And finally, remove the copy, it is not needed. */
4319 gsi_remove (&si, true);
4320 release_defs (stmt);
4321 }
4322 else
4323 {
4324 if (!is_gimple_debug (gsi_stmt (si)))
4325 is_unreachable = 0;
4326 gsi_next (&si);
4327 }
4328 }
4329 }
4330
4331 /* Return true if STMT is interesting for VRP. */
4332
4333 bool
4334 stmt_interesting_for_vrp (gimple *stmt)
4335 {
4336 if (gimple_code (stmt) == GIMPLE_PHI)
4337 {
4338 tree res = gimple_phi_result (stmt);
4339 return (!virtual_operand_p (res)
4340 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4341 || POINTER_TYPE_P (TREE_TYPE (res))));
4342 }
4343 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4344 {
4345 tree lhs = gimple_get_lhs (stmt);
4346
4347 /* In general, assignments with virtual operands are not useful
4348 for deriving ranges, with the obvious exception of calls to
4349 builtin functions. */
4350 if (lhs && TREE_CODE (lhs) == SSA_NAME
4351 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4352 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4353 && (is_gimple_call (stmt)
4354 || !gimple_vuse (stmt)))
4355 return true;
4356 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4357 switch (gimple_call_internal_fn (stmt))
4358 {
4359 case IFN_ADD_OVERFLOW:
4360 case IFN_SUB_OVERFLOW:
4361 case IFN_MUL_OVERFLOW:
4362 case IFN_ATOMIC_COMPARE_EXCHANGE:
4363 /* These internal calls return _Complex integer type,
4364 but are interesting to VRP nevertheless. */
4365 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4366 return true;
4367 break;
4368 default:
4369 break;
4370 }
4371 }
4372 else if (gimple_code (stmt) == GIMPLE_COND
4373 || gimple_code (stmt) == GIMPLE_SWITCH)
4374 return true;
4375
4376 return false;
4377 }
4378
4379 /* Initialization required by ssa_propagate engine. */
4380
4381 void
4382 vrp_prop::vrp_initialize ()
4383 {
4384 basic_block bb;
4385
4386 FOR_EACH_BB_FN (bb, cfun)
4387 {
4388 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4389 gsi_next (&si))
4390 {
4391 gphi *phi = si.phi ();
4392 if (!stmt_interesting_for_vrp (phi))
4393 {
4394 tree lhs = PHI_RESULT (phi);
4395 set_def_to_varying (lhs);
4396 prop_set_simulate_again (phi, false);
4397 }
4398 else
4399 prop_set_simulate_again (phi, true);
4400 }
4401
4402 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
4403 gsi_next (&si))
4404 {
4405 gimple *stmt = gsi_stmt (si);
4406
4407 /* If the statement is a control insn, then we do not
4408 want to avoid simulating the statement once. Failure
4409 to do so means that those edges will never get added. */
4410 if (stmt_ends_bb_p (stmt))
4411 prop_set_simulate_again (stmt, true);
4412 else if (!stmt_interesting_for_vrp (stmt))
4413 {
4414 set_defs_to_varying (stmt);
4415 prop_set_simulate_again (stmt, false);
4416 }
4417 else
4418 prop_set_simulate_again (stmt, true);
4419 }
4420 }
4421 }
4422
4423 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
4424 that includes the value VAL. The search is restricted to the range
4425 [START_IDX, n - 1] where n is the size of VEC.
4426
4427 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
4428 returned.
4429
4430 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
4431 it is placed in IDX and false is returned.
4432
4433 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
4434 returned. */
4435
4436 bool
4437 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
4438 {
4439 size_t n = gimple_switch_num_labels (stmt);
4440 size_t low, high;
4441
4442 /* Find case label for minimum of the value range or the next one.
4443 At each iteration we are searching in [low, high - 1]. */
4444
4445 for (low = start_idx, high = n; high != low; )
4446 {
4447 tree t;
4448 int cmp;
4449 /* Note that i != high, so we never ask for n. */
4450 size_t i = (high + low) / 2;
4451 t = gimple_switch_label (stmt, i);
4452
4453 /* Cache the result of comparing CASE_LOW and val. */
4454 cmp = tree_int_cst_compare (CASE_LOW (t), val);
4455
4456 if (cmp == 0)
4457 {
4458 /* Ranges cannot be empty. */
4459 *idx = i;
4460 return true;
4461 }
4462 else if (cmp > 0)
4463 high = i;
4464 else
4465 {
4466 low = i + 1;
4467 if (CASE_HIGH (t) != NULL
4468 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
4469 {
4470 *idx = i;
4471 return true;
4472 }
4473 }
4474 }
4475
4476 *idx = high;
4477 return false;
4478 }
4479
4480 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
4481 for values between MIN and MAX. The first index is placed in MIN_IDX. The
4482 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
4483 then MAX_IDX < MIN_IDX.
4484 Returns true if the default label is not needed. */
4485
4486 bool
4487 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
4488 size_t *max_idx)
4489 {
4490 size_t i, j;
4491 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
4492 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
4493
4494 if (i == j
4495 && min_take_default
4496 && max_take_default)
4497 {
4498 /* Only the default case label reached.
4499 Return an empty range. */
4500 *min_idx = 1;
4501 *max_idx = 0;
4502 return false;
4503 }
4504 else
4505 {
4506 bool take_default = min_take_default || max_take_default;
4507 tree low, high;
4508 size_t k;
4509
4510 if (max_take_default)
4511 j--;
4512
4513 /* If the case label range is continuous, we do not need
4514 the default case label. Verify that. */
4515 high = CASE_LOW (gimple_switch_label (stmt, i));
4516 if (CASE_HIGH (gimple_switch_label (stmt, i)))
4517 high = CASE_HIGH (gimple_switch_label (stmt, i));
4518 for (k = i + 1; k <= j; ++k)
4519 {
4520 low = CASE_LOW (gimple_switch_label (stmt, k));
4521 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
4522 {
4523 take_default = true;
4524 break;
4525 }
4526 high = low;
4527 if (CASE_HIGH (gimple_switch_label (stmt, k)))
4528 high = CASE_HIGH (gimple_switch_label (stmt, k));
4529 }
4530
4531 *min_idx = i;
4532 *max_idx = j;
4533 return !take_default;
4534 }
4535 }
4536
4537 /* Evaluate statement STMT. If the statement produces a useful range,
4538 return SSA_PROP_INTERESTING and record the SSA name with the
4539 interesting range into *OUTPUT_P.
4540
4541 If STMT is a conditional branch and we can determine its truth
4542 value, the taken edge is recorded in *TAKEN_EDGE_P.
4543
4544 If STMT produces a varying value, return SSA_PROP_VARYING. */
4545
4546 enum ssa_prop_result
4547 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
4548 {
4549 tree lhs = gimple_get_lhs (stmt);
4550 value_range_equiv vr;
4551 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
4552
4553 if (*output_p)
4554 {
4555 if (update_value_range (*output_p, &vr))
4556 {
4557 if (dump_file && (dump_flags & TDF_DETAILS))
4558 {
4559 fprintf (dump_file, "Found new range for ");
4560 print_generic_expr (dump_file, *output_p);
4561 fprintf (dump_file, ": ");
4562 dump_value_range (dump_file, &vr);
4563 fprintf (dump_file, "\n");
4564 }
4565
4566 if (vr.varying_p ())
4567 return SSA_PROP_VARYING;
4568
4569 return SSA_PROP_INTERESTING;
4570 }
4571 return SSA_PROP_NOT_INTERESTING;
4572 }
4573
4574 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4575 switch (gimple_call_internal_fn (stmt))
4576 {
4577 case IFN_ADD_OVERFLOW:
4578 case IFN_SUB_OVERFLOW:
4579 case IFN_MUL_OVERFLOW:
4580 case IFN_ATOMIC_COMPARE_EXCHANGE:
4581 /* These internal calls return _Complex integer type,
4582 which VRP does not track, but the immediate uses
4583 thereof might be interesting. */
4584 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4585 {
4586 imm_use_iterator iter;
4587 use_operand_p use_p;
4588 enum ssa_prop_result res = SSA_PROP_VARYING;
4589
4590 set_def_to_varying (lhs);
4591
4592 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
4593 {
4594 gimple *use_stmt = USE_STMT (use_p);
4595 if (!is_gimple_assign (use_stmt))
4596 continue;
4597 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
4598 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
4599 continue;
4600 tree rhs1 = gimple_assign_rhs1 (use_stmt);
4601 tree use_lhs = gimple_assign_lhs (use_stmt);
4602 if (TREE_CODE (rhs1) != rhs_code
4603 || TREE_OPERAND (rhs1, 0) != lhs
4604 || TREE_CODE (use_lhs) != SSA_NAME
4605 || !stmt_interesting_for_vrp (use_stmt)
4606 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
4607 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
4608 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
4609 continue;
4610
4611 /* If there is a change in the value range for any of the
4612 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
4613 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
4614 or IMAGPART_EXPR immediate uses, but none of them have
4615 a change in their value ranges, return
4616 SSA_PROP_NOT_INTERESTING. If there are no
4617 {REAL,IMAG}PART_EXPR uses at all,
4618 return SSA_PROP_VARYING. */
4619 value_range_equiv new_vr;
4620 extract_range_basic (&new_vr, use_stmt);
4621 const value_range_equiv *old_vr = get_value_range (use_lhs);
4622 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
4623 res = SSA_PROP_INTERESTING;
4624 else
4625 res = SSA_PROP_NOT_INTERESTING;
4626 new_vr.equiv_clear ();
4627 if (res == SSA_PROP_INTERESTING)
4628 {
4629 *output_p = lhs;
4630 return res;
4631 }
4632 }
4633
4634 return res;
4635 }
4636 break;
4637 default:
4638 break;
4639 }
4640
4641 /* All other statements produce nothing of interest for VRP, so mark
4642 their outputs varying and prevent further simulation. */
4643 set_defs_to_varying (stmt);
4644
4645 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
4646 }
4647
4648 void
4649 value_range_equiv::intersect (const value_range_equiv *other)
4650 {
4651 if (dump_file && (dump_flags & TDF_DETAILS))
4652 {
4653 fprintf (dump_file, "Intersecting\n ");
4654 dump_value_range (dump_file, this);
4655 fprintf (dump_file, "\nand\n ");
4656 dump_value_range (dump_file, other);
4657 fprintf (dump_file, "\n");
4658 }
4659
4660 /* If THIS is varying we want to pick up equivalences from OTHER.
4661 Just special-case this here rather than trying to fixup after the
4662 fact. */
4663 if (this->varying_p ())
4664 this->deep_copy (other);
4665 else
4666 {
4667 value_range tem = intersect_helper (this, other);
4668 this->update (tem.min (), tem.max (), tem.kind ());
4669
4670 /* If the result is VR_UNDEFINED there is no need to mess with
4671 equivalencies. */
4672 if (!undefined_p ())
4673 {
4674 /* The resulting set of equivalences for range intersection
4675 is the union of the two sets. */
4676 if (m_equiv && other->m_equiv && m_equiv != other->m_equiv)
4677 bitmap_ior_into (m_equiv, other->m_equiv);
4678 else if (other->m_equiv && !m_equiv)
4679 {
4680 /* All equivalence bitmaps are allocated from the same
4681 obstack. So we can use the obstack associated with
4682 VR to allocate this->m_equiv. */
4683 m_equiv = BITMAP_ALLOC (other->m_equiv->obstack);
4684 bitmap_copy (m_equiv, other->m_equiv);
4685 }
4686 }
4687 }
4688
4689 if (dump_file && (dump_flags & TDF_DETAILS))
4690 {
4691 fprintf (dump_file, "to\n ");
4692 dump_value_range (dump_file, this);
4693 fprintf (dump_file, "\n");
4694 }
4695 }
4696
4697 void
4698 value_range_equiv::union_ (const value_range_equiv *other)
4699 {
4700 if (dump_file && (dump_flags & TDF_DETAILS))
4701 {
4702 fprintf (dump_file, "Meeting\n ");
4703 dump_value_range (dump_file, this);
4704 fprintf (dump_file, "\nand\n ");
4705 dump_value_range (dump_file, other);
4706 fprintf (dump_file, "\n");
4707 }
4708
4709 /* If THIS is undefined we want to pick up equivalences from OTHER.
4710 Just special-case this here rather than trying to fixup after the fact. */
4711 if (this->undefined_p ())
4712 this->deep_copy (other);
4713 else
4714 {
4715 value_range tem = union_helper (this, other);
4716 this->update (tem.min (), tem.max (), tem.kind ());
4717
4718 /* The resulting set of equivalences is always the intersection of
4719 the two sets. */
4720 if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
4721 bitmap_and_into (this->m_equiv, other->m_equiv);
4722 else if (this->m_equiv && !other->m_equiv)
4723 bitmap_clear (this->m_equiv);
4724 }
4725
4726 if (dump_file && (dump_flags & TDF_DETAILS))
4727 {
4728 fprintf (dump_file, "to\n ");
4729 dump_value_range (dump_file, this);
4730 fprintf (dump_file, "\n");
4731 }
4732 }
4733
4734 /* Visit all arguments for PHI node PHI that flow through executable
4735 edges. If a valid value range can be derived from all the incoming
4736 value ranges, set a new range for the LHS of PHI. */
4737
4738 enum ssa_prop_result
4739 vrp_prop::visit_phi (gphi *phi)
4740 {
4741 tree lhs = PHI_RESULT (phi);
4742 value_range_equiv vr_result;
4743 extract_range_from_phi_node (phi, &vr_result);
4744 if (update_value_range (lhs, &vr_result))
4745 {
4746 if (dump_file && (dump_flags & TDF_DETAILS))
4747 {
4748 fprintf (dump_file, "Found new range for ");
4749 print_generic_expr (dump_file, lhs);
4750 fprintf (dump_file, ": ");
4751 dump_value_range (dump_file, &vr_result);
4752 fprintf (dump_file, "\n");
4753 }
4754
4755 if (vr_result.varying_p ())
4756 return SSA_PROP_VARYING;
4757
4758 return SSA_PROP_INTERESTING;
4759 }
4760
4761 /* Nothing changed, don't add outgoing edges. */
4762 return SSA_PROP_NOT_INTERESTING;
4763 }
4764
4765 class vrp_folder : public substitute_and_fold_engine
4766 {
4767 public:
4768 vrp_folder () : substitute_and_fold_engine (/* Fold all stmts. */ true) { }
4769 tree get_value (tree) FINAL OVERRIDE;
4770 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
4771 bool fold_predicate_in (gimple_stmt_iterator *);
4772
4773 class vr_values *vr_values;
4774
4775 /* Delegators. */
4776 tree vrp_evaluate_conditional (tree_code code, tree op0,
4777 tree op1, gimple *stmt)
4778 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
4779 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
4780 { return vr_values->simplify_stmt_using_ranges (gsi); }
4781 tree op_with_constant_singleton_value_range (tree op)
4782 { return vr_values->op_with_constant_singleton_value_range (op); }
4783 };
4784
4785 /* If the statement pointed by SI has a predicate whose value can be
4786 computed using the value range information computed by VRP, compute
4787 its value and return true. Otherwise, return false. */
4788
4789 bool
4790 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4791 {
4792 bool assignment_p = false;
4793 tree val;
4794 gimple *stmt = gsi_stmt (*si);
4795
4796 if (is_gimple_assign (stmt)
4797 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4798 {
4799 assignment_p = true;
4800 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4801 gimple_assign_rhs1 (stmt),
4802 gimple_assign_rhs2 (stmt),
4803 stmt);
4804 }
4805 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4806 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4807 gimple_cond_lhs (cond_stmt),
4808 gimple_cond_rhs (cond_stmt),
4809 stmt);
4810 else
4811 return false;
4812
4813 if (val)
4814 {
4815 if (assignment_p)
4816 val = fold_convert (gimple_expr_type (stmt), val);
4817
4818 if (dump_file)
4819 {
4820 fprintf (dump_file, "Folding predicate ");
4821 print_gimple_expr (dump_file, stmt, 0);
4822 fprintf (dump_file, " to ");
4823 print_generic_expr (dump_file, val);
4824 fprintf (dump_file, "\n");
4825 }
4826
4827 if (is_gimple_assign (stmt))
4828 gimple_assign_set_rhs_from_tree (si, val);
4829 else
4830 {
4831 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4832 gcond *cond_stmt = as_a <gcond *> (stmt);
4833 if (integer_zerop (val))
4834 gimple_cond_make_false (cond_stmt);
4835 else if (integer_onep (val))
4836 gimple_cond_make_true (cond_stmt);
4837 else
4838 gcc_unreachable ();
4839 }
4840
4841 return true;
4842 }
4843
4844 return false;
4845 }
4846
4847 /* Callback for substitute_and_fold folding the stmt at *SI. */
4848
4849 bool
4850 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4851 {
4852 if (fold_predicate_in (si))
4853 return true;
4854
4855 return simplify_stmt_using_ranges (si);
4856 }
4857
4858 /* If OP has a value range with a single constant value return that,
4859 otherwise return NULL_TREE. This returns OP itself if OP is a
4860 constant.
4861
4862 Implemented as a pure wrapper right now, but this will change. */
4863
4864 tree
4865 vrp_folder::get_value (tree op)
4866 {
4867 return op_with_constant_singleton_value_range (op);
4868 }
4869
4870 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
4871 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
4872 BB. If no such ASSERT_EXPR is found, return OP. */
4873
4874 static tree
4875 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
4876 {
4877 imm_use_iterator imm_iter;
4878 gimple *use_stmt;
4879 use_operand_p use_p;
4880
4881 if (TREE_CODE (op) == SSA_NAME)
4882 {
4883 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
4884 {
4885 use_stmt = USE_STMT (use_p);
4886 if (use_stmt != stmt
4887 && gimple_assign_single_p (use_stmt)
4888 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
4889 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
4890 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
4891 return gimple_assign_lhs (use_stmt);
4892 }
4893 }
4894 return op;
4895 }
4896
4897 /* A hack. */
4898 static class vr_values *x_vr_values;
4899
4900 /* A trivial wrapper so that we can present the generic jump threading
4901 code with a simple API for simplifying statements. STMT is the
4902 statement we want to simplify, WITHIN_STMT provides the location
4903 for any overflow warnings. */
4904
4905 static tree
4906 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
4907 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
4908 basic_block bb)
4909 {
4910 /* First see if the conditional is in the hash table. */
4911 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
4912 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
4913 return cached_lhs;
4914
4915 vr_values *vr_values = x_vr_values;
4916 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4917 {
4918 tree op0 = gimple_cond_lhs (cond_stmt);
4919 op0 = lhs_of_dominating_assert (op0, bb, stmt);
4920
4921 tree op1 = gimple_cond_rhs (cond_stmt);
4922 op1 = lhs_of_dominating_assert (op1, bb, stmt);
4923
4924 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4925 op0, op1, within_stmt);
4926 }
4927
4928 /* We simplify a switch statement by trying to determine which case label
4929 will be taken. If we are successful then we return the corresponding
4930 CASE_LABEL_EXPR. */
4931 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
4932 {
4933 tree op = gimple_switch_index (switch_stmt);
4934 if (TREE_CODE (op) != SSA_NAME)
4935 return NULL_TREE;
4936
4937 op = lhs_of_dominating_assert (op, bb, stmt);
4938
4939 const value_range_equiv *vr = vr_values->get_value_range (op);
4940 if (vr->undefined_p ()
4941 || vr->varying_p ()
4942 || vr->symbolic_p ())
4943 return NULL_TREE;
4944
4945 if (vr->kind () == VR_RANGE)
4946 {
4947 size_t i, j;
4948 /* Get the range of labels that contain a part of the operand's
4949 value range. */
4950 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
4951
4952 /* Is there only one such label? */
4953 if (i == j)
4954 {
4955 tree label = gimple_switch_label (switch_stmt, i);
4956
4957 /* The i'th label will be taken only if the value range of the
4958 operand is entirely within the bounds of this label. */
4959 if (CASE_HIGH (label) != NULL_TREE
4960 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
4961 && tree_int_cst_compare (CASE_HIGH (label),
4962 vr->max ()) >= 0)
4963 : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
4964 && tree_int_cst_equal (vr->min (), vr->max ())))
4965 return label;
4966 }
4967
4968 /* If there are no such labels then the default label will be
4969 taken. */
4970 if (i > j)
4971 return gimple_switch_label (switch_stmt, 0);
4972 }
4973
4974 if (vr->kind () == VR_ANTI_RANGE)
4975 {
4976 unsigned n = gimple_switch_num_labels (switch_stmt);
4977 tree min_label = gimple_switch_label (switch_stmt, 1);
4978 tree max_label = gimple_switch_label (switch_stmt, n - 1);
4979
4980 /* The default label will be taken only if the anti-range of the
4981 operand is entirely outside the bounds of all the (non-default)
4982 case labels. */
4983 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
4984 && (CASE_HIGH (max_label) != NULL_TREE
4985 ? tree_int_cst_compare (vr->max (),
4986 CASE_HIGH (max_label)) >= 0
4987 : tree_int_cst_compare (vr->max (),
4988 CASE_LOW (max_label)) >= 0))
4989 return gimple_switch_label (switch_stmt, 0);
4990 }
4991
4992 return NULL_TREE;
4993 }
4994
4995 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
4996 {
4997 tree lhs = gimple_assign_lhs (assign_stmt);
4998 if (TREE_CODE (lhs) == SSA_NAME
4999 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5000 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5001 && stmt_interesting_for_vrp (stmt))
5002 {
5003 edge dummy_e;
5004 tree dummy_tree;
5005 value_range_equiv new_vr;
5006 vr_values->extract_range_from_stmt (stmt, &dummy_e,
5007 &dummy_tree, &new_vr);
5008 tree singleton;
5009 if (new_vr.singleton_p (&singleton))
5010 return singleton;
5011 }
5012 }
5013
5014 return NULL_TREE;
5015 }
5016
5017 class vrp_dom_walker : public dom_walker
5018 {
5019 public:
5020 vrp_dom_walker (cdi_direction direction,
5021 class const_and_copies *const_and_copies,
5022 class avail_exprs_stack *avail_exprs_stack)
5023 : dom_walker (direction, REACHABLE_BLOCKS),
5024 m_const_and_copies (const_and_copies),
5025 m_avail_exprs_stack (avail_exprs_stack),
5026 m_dummy_cond (NULL) {}
5027
5028 virtual edge before_dom_children (basic_block);
5029 virtual void after_dom_children (basic_block);
5030
5031 class vr_values *vr_values;
5032
5033 private:
5034 class const_and_copies *m_const_and_copies;
5035 class avail_exprs_stack *m_avail_exprs_stack;
5036
5037 gcond *m_dummy_cond;
5038
5039 };
5040
5041 /* Called before processing dominator children of BB. We want to look
5042 at ASSERT_EXPRs and record information from them in the appropriate
5043 tables.
5044
5045 We could look at other statements here. It's not seen as likely
5046 to significantly increase the jump threads we discover. */
5047
5048 edge
5049 vrp_dom_walker::before_dom_children (basic_block bb)
5050 {
5051 gimple_stmt_iterator gsi;
5052
5053 m_avail_exprs_stack->push_marker ();
5054 m_const_and_copies->push_marker ();
5055 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5056 {
5057 gimple *stmt = gsi_stmt (gsi);
5058 if (gimple_assign_single_p (stmt)
5059 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
5060 {
5061 tree rhs1 = gimple_assign_rhs1 (stmt);
5062 tree cond = TREE_OPERAND (rhs1, 1);
5063 tree inverted = invert_truthvalue (cond);
5064 vec<cond_equivalence> p;
5065 p.create (3);
5066 record_conditions (&p, cond, inverted);
5067 for (unsigned int i = 0; i < p.length (); i++)
5068 m_avail_exprs_stack->record_cond (&p[i]);
5069
5070 tree lhs = gimple_assign_lhs (stmt);
5071 m_const_and_copies->record_const_or_copy (lhs,
5072 TREE_OPERAND (rhs1, 0));
5073 p.release ();
5074 continue;
5075 }
5076 break;
5077 }
5078 return NULL;
5079 }
5080
5081 /* Called after processing dominator children of BB. This is where we
5082 actually call into the threader. */
5083 void
5084 vrp_dom_walker::after_dom_children (basic_block bb)
5085 {
5086 if (!m_dummy_cond)
5087 m_dummy_cond = gimple_build_cond (NE_EXPR,
5088 integer_zero_node, integer_zero_node,
5089 NULL, NULL);
5090
5091 x_vr_values = vr_values;
5092 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
5093 m_avail_exprs_stack, NULL,
5094 simplify_stmt_for_jump_threading);
5095 x_vr_values = NULL;
5096
5097 m_avail_exprs_stack->pop_to_marker ();
5098 m_const_and_copies->pop_to_marker ();
5099 }
5100
5101 /* Blocks which have more than one predecessor and more than
5102 one successor present jump threading opportunities, i.e.,
5103 when the block is reached from a specific predecessor, we
5104 may be able to determine which of the outgoing edges will
5105 be traversed. When this optimization applies, we are able
5106 to avoid conditionals at runtime and we may expose secondary
5107 optimization opportunities.
5108
5109 This routine is effectively a driver for the generic jump
5110 threading code. It basically just presents the generic code
5111 with edges that may be suitable for jump threading.
5112
5113 Unlike DOM, we do not iterate VRP if jump threading was successful.
5114 While iterating may expose new opportunities for VRP, it is expected
5115 those opportunities would be very limited and the compile time cost
5116 to expose those opportunities would be significant.
5117
5118 As jump threading opportunities are discovered, they are registered
5119 for later realization. */
5120
5121 static void
5122 identify_jump_threads (class vr_values *vr_values)
5123 {
5124 /* Ugh. When substituting values earlier in this pass we can
5125 wipe the dominance information. So rebuild the dominator
5126 information as we need it within the jump threading code. */
5127 calculate_dominance_info (CDI_DOMINATORS);
5128
5129 /* We do not allow VRP information to be used for jump threading
5130 across a back edge in the CFG. Otherwise it becomes too
5131 difficult to avoid eliminating loop exit tests. Of course
5132 EDGE_DFS_BACK is not accurate at this time so we have to
5133 recompute it. */
5134 mark_dfs_back_edges ();
5135
5136 /* Allocate our unwinder stack to unwind any temporary equivalences
5137 that might be recorded. */
5138 const_and_copies *equiv_stack = new const_and_copies ();
5139
5140 hash_table<expr_elt_hasher> *avail_exprs
5141 = new hash_table<expr_elt_hasher> (1024);
5142 avail_exprs_stack *avail_exprs_stack
5143 = new class avail_exprs_stack (avail_exprs);
5144
5145 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
5146 walker.vr_values = vr_values;
5147 walker.walk (cfun->cfg->x_entry_block_ptr);
5148
5149 /* We do not actually update the CFG or SSA graphs at this point as
5150 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
5151 handle ASSERT_EXPRs gracefully. */
5152 delete equiv_stack;
5153 delete avail_exprs;
5154 delete avail_exprs_stack;
5155 }
5156
5157 /* Traverse all the blocks folding conditionals with known ranges. */
5158
5159 void
5160 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
5161 {
5162 size_t i;
5163
5164 /* We have completed propagating through the lattice. */
5165 vr_values.set_lattice_propagation_complete ();
5166
5167 if (dump_file)
5168 {
5169 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
5170 vr_values.dump_all_value_ranges (dump_file);
5171 fprintf (dump_file, "\n");
5172 }
5173
5174 /* Set value range to non pointer SSA_NAMEs. */
5175 for (i = 0; i < num_ssa_names; i++)
5176 {
5177 tree name = ssa_name (i);
5178 if (!name)
5179 continue;
5180
5181 const value_range_equiv *vr = get_value_range (name);
5182 if (!name || !vr->constant_p ())
5183 continue;
5184
5185 if (POINTER_TYPE_P (TREE_TYPE (name))
5186 && range_includes_zero_p (vr) == 0)
5187 set_ptr_nonnull (name);
5188 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
5189 set_range_info (name, *vr);
5190 }
5191
5192 /* If we're checking array refs, we want to merge information on
5193 the executability of each edge between vrp_folder and the
5194 check_array_bounds_dom_walker: each can clear the
5195 EDGE_EXECUTABLE flag on edges, in different ways.
5196
5197 Hence, if we're going to call check_all_array_refs, set
5198 the flag on every edge now, rather than in
5199 check_array_bounds_dom_walker's ctor; vrp_folder may clear
5200 it from some edges. */
5201 if (warn_array_bounds && warn_array_bounds_p)
5202 set_all_edges_as_executable (cfun);
5203
5204 class vrp_folder vrp_folder;
5205 vrp_folder.vr_values = &vr_values;
5206 vrp_folder.substitute_and_fold ();
5207
5208 if (warn_array_bounds && warn_array_bounds_p)
5209 check_all_array_refs ();
5210 }
5211
5212 /* Main entry point to VRP (Value Range Propagation). This pass is
5213 loosely based on J. R. C. Patterson, ``Accurate Static Branch
5214 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
5215 Programming Language Design and Implementation, pp. 67-78, 1995.
5216 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
5217
5218 This is essentially an SSA-CCP pass modified to deal with ranges
5219 instead of constants.
5220
5221 While propagating ranges, we may find that two or more SSA name
5222 have equivalent, though distinct ranges. For instance,
5223
5224 1 x_9 = p_3->a;
5225 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
5226 3 if (p_4 == q_2)
5227 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
5228 5 endif
5229 6 if (q_2)
5230
5231 In the code above, pointer p_5 has range [q_2, q_2], but from the
5232 code we can also determine that p_5 cannot be NULL and, if q_2 had
5233 a non-varying range, p_5's range should also be compatible with it.
5234
5235 These equivalences are created by two expressions: ASSERT_EXPR and
5236 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
5237 result of another assertion, then we can use the fact that p_5 and
5238 p_4 are equivalent when evaluating p_5's range.
5239
5240 Together with value ranges, we also propagate these equivalences
5241 between names so that we can take advantage of information from
5242 multiple ranges when doing final replacement. Note that this
5243 equivalency relation is transitive but not symmetric.
5244
5245 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
5246 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
5247 in contexts where that assertion does not hold (e.g., in line 6).
5248
5249 TODO, the main difference between this pass and Patterson's is that
5250 we do not propagate edge probabilities. We only compute whether
5251 edges can be taken or not. That is, instead of having a spectrum
5252 of jump probabilities between 0 and 1, we only deal with 0, 1 and
5253 DON'T KNOW. In the future, it may be worthwhile to propagate
5254 probabilities to aid branch prediction. */
5255
5256 static unsigned int
5257 execute_vrp (bool warn_array_bounds_p)
5258 {
5259
5260 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
5261 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
5262 scev_initialize ();
5263
5264 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
5265 Inserting assertions may split edges which will invalidate
5266 EDGE_DFS_BACK. */
5267 insert_range_assertions ();
5268
5269 threadedge_initialize_values ();
5270
5271 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
5272 mark_dfs_back_edges ();
5273
5274 class vrp_prop vrp_prop;
5275 vrp_prop.vrp_initialize ();
5276 vrp_prop.ssa_propagate ();
5277 vrp_prop.vrp_finalize (warn_array_bounds_p);
5278
5279 /* We must identify jump threading opportunities before we release
5280 the datastructures built by VRP. */
5281 identify_jump_threads (&vrp_prop.vr_values);
5282
5283 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
5284 was set by a type conversion can often be rewritten to use the
5285 RHS of the type conversion.
5286
5287 However, doing so inhibits jump threading through the comparison.
5288 So that transformation is not performed until after jump threading
5289 is complete. */
5290 basic_block bb;
5291 FOR_EACH_BB_FN (bb, cfun)
5292 {
5293 gimple *last = last_stmt (bb);
5294 if (last && gimple_code (last) == GIMPLE_COND)
5295 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
5296 }
5297
5298 free_numbers_of_iterations_estimates (cfun);
5299
5300 /* ASSERT_EXPRs must be removed before finalizing jump threads
5301 as finalizing jump threads calls the CFG cleanup code which
5302 does not properly handle ASSERT_EXPRs. */
5303 remove_range_assertions ();
5304
5305 /* If we exposed any new variables, go ahead and put them into
5306 SSA form now, before we handle jump threading. This simplifies
5307 interactions between rewriting of _DECL nodes into SSA form
5308 and rewriting SSA_NAME nodes into SSA form after block
5309 duplication and CFG manipulation. */
5310 update_ssa (TODO_update_ssa);
5311
5312 /* We identified all the jump threading opportunities earlier, but could
5313 not transform the CFG at that time. This routine transforms the
5314 CFG and arranges for the dominator tree to be rebuilt if necessary.
5315
5316 Note the SSA graph update will occur during the normal TODO
5317 processing by the pass manager. */
5318 thread_through_all_blocks (false);
5319
5320 vrp_prop.vr_values.cleanup_edges_and_switches ();
5321 threadedge_finalize_values ();
5322
5323 scev_finalize ();
5324 loop_optimizer_finalize ();
5325 return 0;
5326 }
5327
5328 namespace {
5329
5330 const pass_data pass_data_vrp =
5331 {
5332 GIMPLE_PASS, /* type */
5333 "vrp", /* name */
5334 OPTGROUP_NONE, /* optinfo_flags */
5335 TV_TREE_VRP, /* tv_id */
5336 PROP_ssa, /* properties_required */
5337 0, /* properties_provided */
5338 0, /* properties_destroyed */
5339 0, /* todo_flags_start */
5340 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
5341 };
5342
5343 class pass_vrp : public gimple_opt_pass
5344 {
5345 public:
5346 pass_vrp (gcc::context *ctxt)
5347 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
5348 {}
5349
5350 /* opt_pass methods: */
5351 opt_pass * clone () { return new pass_vrp (m_ctxt); }
5352 void set_pass_param (unsigned int n, bool param)
5353 {
5354 gcc_assert (n == 0);
5355 warn_array_bounds_p = param;
5356 }
5357 virtual bool gate (function *) { return flag_tree_vrp != 0; }
5358 virtual unsigned int execute (function *)
5359 { return execute_vrp (warn_array_bounds_p); }
5360
5361 private:
5362 bool warn_array_bounds_p;
5363 }; // class pass_vrp
5364
5365 } // anon namespace
5366
5367 gimple_opt_pass *
5368 make_pass_vrp (gcc::context *ctxt)
5369 {
5370 return new pass_vrp (ctxt);
5371 }
5372
5373
5374 /* Worker for determine_value_range. */
5375
5376 static void
5377 determine_value_range_1 (value_range *vr, tree expr)
5378 {
5379 if (BINARY_CLASS_P (expr))
5380 {
5381 value_range vr0, vr1;
5382 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
5383 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
5384 range_fold_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
5385 &vr0, &vr1);
5386 }
5387 else if (UNARY_CLASS_P (expr))
5388 {
5389 value_range vr0;
5390 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
5391 range_fold_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
5392 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
5393 }
5394 else if (TREE_CODE (expr) == INTEGER_CST)
5395 vr->set (expr);
5396 else
5397 {
5398 value_range_kind kind;
5399 wide_int min, max;
5400 /* For SSA names try to extract range info computed by VRP. Otherwise
5401 fall back to varying. */
5402 if (TREE_CODE (expr) == SSA_NAME
5403 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
5404 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
5405 vr->set (wide_int_to_tree (TREE_TYPE (expr), min),
5406 wide_int_to_tree (TREE_TYPE (expr), max),
5407 kind);
5408 else
5409 vr->set_varying (TREE_TYPE (expr));
5410 }
5411 }
5412
5413 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
5414 the determined range type. */
5415
5416 value_range_kind
5417 determine_value_range (tree expr, wide_int *min, wide_int *max)
5418 {
5419 value_range vr;
5420 determine_value_range_1 (&vr, expr);
5421 if (vr.constant_p ())
5422 {
5423 *min = wi::to_wide (vr.min ());
5424 *max = wi::to_wide (vr.max ());
5425 return vr.kind ();
5426 }
5427
5428 return VR_VARYING;
5429 }