Deal with incoming POLY_INT_CST ranges (PR92033)
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "range-op.h"
71
72 static bool
73 ranges_from_anti_range (const value_range_base *ar,
74 value_range_base *vr0, value_range_base *vr1,
75 bool handle_pointers = false);
76
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
79 static sbitmap *live;
80
81 void
82 value_range::set_equiv (bitmap equiv)
83 {
84 if (undefined_p () || varying_p ())
85 equiv = NULL;
86 /* Since updating the equivalence set involves deep copying the
87 bitmaps, only do it if absolutely necessary.
88
89 All equivalence bitmaps are allocated from the same obstack. So
90 we can use the obstack associated with EQUIV to allocate vr->equiv. */
91 if (m_equiv == NULL
92 && equiv != NULL)
93 m_equiv = BITMAP_ALLOC (equiv->obstack);
94
95 if (equiv != m_equiv)
96 {
97 if (equiv && !bitmap_empty_p (equiv))
98 bitmap_copy (m_equiv, equiv);
99 else
100 bitmap_clear (m_equiv);
101 }
102 }
103
104 /* Initialize value_range. */
105
106 void
107 value_range::set (enum value_range_kind kind, tree min, tree max,
108 bitmap equiv)
109 {
110 value_range_base::set (kind, min, max);
111 set_equiv (equiv);
112 if (flag_checking)
113 check ();
114 }
115
116 value_range_base::value_range_base (value_range_kind kind, tree min, tree max)
117 {
118 set (kind, min, max);
119 }
120
121 value_range::value_range (value_range_kind kind, tree min, tree max,
122 bitmap equiv)
123 {
124 m_equiv = NULL;
125 set (kind, min, max, equiv);
126 }
127
128 value_range::value_range (const value_range_base &other)
129 {
130 m_equiv = NULL;
131 set (other.kind (), other.min(), other.max (), NULL);
132 }
133
134 value_range_base::value_range_base (tree type)
135 {
136 set_varying (type);
137 }
138
139 value_range_base::value_range_base (enum value_range_kind kind,
140 tree type,
141 const wide_int &wmin,
142 const wide_int &wmax)
143 {
144 tree min = wide_int_to_tree (type, wmin);
145 tree max = wide_int_to_tree (type, wmax);
146 gcc_checking_assert (kind == VR_RANGE || kind == VR_ANTI_RANGE);
147 set (kind, min, max);
148 }
149
150 value_range_base::value_range_base (tree type,
151 const wide_int &wmin,
152 const wide_int &wmax)
153 {
154 tree min = wide_int_to_tree (type, wmin);
155 tree max = wide_int_to_tree (type, wmax);
156 set (VR_RANGE, min, max);
157 }
158
159 value_range_base::value_range_base (tree min, tree max)
160 {
161 set (VR_RANGE, min, max);
162 }
163
164 /* Like set, but keep the equivalences in place. */
165
166 void
167 value_range::update (value_range_kind kind, tree min, tree max)
168 {
169 set (kind, min, max,
170 (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL);
171 }
172
173 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
174
175 Note: The code that avoids the bitmap sharing looks at the existing
176 this->m_equiv, so this function cannot be used to initalize an
177 object. Use the constructors for initialization. */
178
179 void
180 value_range::deep_copy (const value_range *from)
181 {
182 set (from->m_kind, from->min (), from->max (), from->m_equiv);
183 }
184
185 void
186 value_range::move (value_range *from)
187 {
188 set (from->m_kind, from->min (), from->max ());
189 m_equiv = from->m_equiv;
190 from->m_equiv = NULL;
191 }
192
193 /* Check the validity of the range. */
194
195 void
196 value_range_base::check ()
197 {
198 switch (m_kind)
199 {
200 case VR_RANGE:
201 case VR_ANTI_RANGE:
202 {
203 int cmp;
204
205 gcc_assert (m_min && m_max);
206
207 gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max));
208
209 /* Creating ~[-MIN, +MAX] is stupid because that would be
210 the empty set. */
211 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE)
212 gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max));
213
214 cmp = compare_values (m_min, m_max);
215 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
216 break;
217 }
218 case VR_UNDEFINED:
219 gcc_assert (!min () && !max ());
220 break;
221 case VR_VARYING:
222 gcc_assert (m_min && m_max);
223 break;
224 default:
225 gcc_unreachable ();
226 }
227 }
228
229 void
230 value_range::check ()
231 {
232 value_range_base::check ();
233 switch (m_kind)
234 {
235 case VR_UNDEFINED:
236 case VR_VARYING:
237 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
238 default:;
239 }
240 }
241
242 /* Equality operator. We purposely do not overload ==, to avoid
243 confusion with the equality bitmap in the derived value_range
244 class. */
245
246 bool
247 value_range_base::equal_p (const value_range_base &other) const
248 {
249 /* Ignore types for undefined. All undefines are equal. */
250 if (undefined_p ())
251 return m_kind == other.m_kind;
252
253 return (m_kind == other.m_kind
254 && vrp_operand_equal_p (m_min, other.m_min)
255 && vrp_operand_equal_p (m_max, other.m_max));
256 }
257
258 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
259 IGNORE_EQUIVS is TRUE. */
260
261 bool
262 value_range::equal_p (const value_range &other, bool ignore_equivs) const
263 {
264 return (value_range_base::equal_p (other)
265 && (ignore_equivs
266 || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
267 }
268
269 /* Return TRUE if this is a symbolic range. */
270
271 bool
272 value_range_base::symbolic_p () const
273 {
274 return (!varying_p ()
275 && !undefined_p ()
276 && (!is_gimple_min_invariant (m_min)
277 || !is_gimple_min_invariant (m_max)));
278 }
279
280 /* NOTE: This is not the inverse of symbolic_p because the range
281 could also be varying or undefined. Ideally they should be inverse
282 of each other, with varying only applying to symbolics. Varying of
283 constants would be represented as [-MIN, +MAX]. */
284
285 bool
286 value_range_base::constant_p () const
287 {
288 return (!varying_p ()
289 && !undefined_p ()
290 && TREE_CODE (m_min) == INTEGER_CST
291 && TREE_CODE (m_max) == INTEGER_CST);
292 }
293
294 void
295 value_range_base::set_undefined ()
296 {
297 m_kind = VR_UNDEFINED;
298 m_min = m_max = NULL;
299 }
300
301 void
302 value_range::set_undefined ()
303 {
304 set (VR_UNDEFINED, NULL, NULL, NULL);
305 }
306
307 void
308 value_range_base::set_varying (tree type)
309 {
310 m_kind = VR_VARYING;
311 if (supports_type_p (type))
312 {
313 m_min = vrp_val_min (type, true);
314 m_max = vrp_val_max (type, true);
315 }
316 else
317 /* We can't do anything range-wise with these types. */
318 m_min = m_max = error_mark_node;
319 }
320
321 void
322 value_range::set_varying (tree type)
323 {
324 value_range_base::set_varying (type);
325 equiv_clear ();
326 }
327
328 /* Return TRUE if it is possible that range contains VAL. */
329
330 bool
331 value_range_base::may_contain_p (tree val) const
332 {
333 return value_inside_range (val) != 0;
334 }
335
336 void
337 value_range::equiv_clear ()
338 {
339 if (m_equiv)
340 bitmap_clear (m_equiv);
341 }
342
343 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
344 bitmap. If no equivalence table has been created, OBSTACK is the
345 obstack to use (NULL for the default obstack).
346
347 This is the central point where equivalence processing can be
348 turned on/off. */
349
350 void
351 value_range::equiv_add (const_tree var,
352 const value_range *var_vr,
353 bitmap_obstack *obstack)
354 {
355 if (!m_equiv)
356 m_equiv = BITMAP_ALLOC (obstack);
357 unsigned ver = SSA_NAME_VERSION (var);
358 bitmap_set_bit (m_equiv, ver);
359 if (var_vr && var_vr->m_equiv)
360 bitmap_ior_into (m_equiv, var_vr->m_equiv);
361 }
362
363 /* If range is a singleton, place it in RESULT and return TRUE.
364 Note: A singleton can be any gimple invariant, not just constants.
365 So, [&x, &x] counts as a singleton. */
366
367 bool
368 value_range_base::singleton_p (tree *result) const
369 {
370 if (m_kind == VR_ANTI_RANGE)
371 {
372 if (nonzero_p ())
373 {
374 if (TYPE_PRECISION (type ()) == 1)
375 {
376 if (result)
377 *result = m_max;
378 return true;
379 }
380 return false;
381 }
382 if (num_pairs () == 1)
383 {
384 value_range_base vr0, vr1;
385 ranges_from_anti_range (this, &vr0, &vr1, true);
386 return vr0.singleton_p (result);
387 }
388 }
389 if (m_kind == VR_RANGE
390 && vrp_operand_equal_p (min (), max ())
391 && is_gimple_min_invariant (min ()))
392 {
393 if (result)
394 *result = min ();
395 return true;
396 }
397 return false;
398 }
399
400 tree
401 value_range_base::type () const
402 {
403 gcc_checking_assert (m_min);
404 return TREE_TYPE (min ());
405 }
406
407 void
408 value_range_base::dump (FILE *file) const
409 {
410 if (undefined_p ())
411 fprintf (file, "UNDEFINED");
412 else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
413 {
414 tree ttype = type ();
415
416 print_generic_expr (file, ttype);
417 fprintf (file, " ");
418
419 fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : "");
420
421 if (INTEGRAL_TYPE_P (ttype)
422 && !TYPE_UNSIGNED (ttype)
423 && vrp_val_is_min (min ())
424 && TYPE_PRECISION (ttype) != 1)
425 fprintf (file, "-INF");
426 else
427 print_generic_expr (file, min ());
428
429 fprintf (file, ", ");
430
431 if (INTEGRAL_TYPE_P (ttype)
432 && vrp_val_is_max (max ())
433 && TYPE_PRECISION (ttype) != 1)
434 fprintf (file, "+INF");
435 else
436 print_generic_expr (file, max ());
437
438 fprintf (file, "]");
439 }
440 else if (varying_p ())
441 {
442 print_generic_expr (file, type ());
443 fprintf (file, " VARYING");
444 }
445 else
446 gcc_unreachable ();
447 }
448
449 void
450 value_range_base::dump () const
451 {
452 dump (stderr);
453 }
454
455 void
456 value_range::dump (FILE *file) const
457 {
458 value_range_base::dump (file);
459 if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
460 && m_equiv)
461 {
462 bitmap_iterator bi;
463 unsigned i, c = 0;
464
465 fprintf (file, " EQUIVALENCES: { ");
466
467 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
468 {
469 print_generic_expr (file, ssa_name (i));
470 fprintf (file, " ");
471 c++;
472 }
473
474 fprintf (file, "} (%u elements)", c);
475 }
476 }
477
478 void
479 value_range::dump () const
480 {
481 dump (stderr);
482 }
483
484 void
485 dump_value_range (FILE *file, const value_range *vr)
486 {
487 if (!vr)
488 fprintf (file, "[]");
489 else
490 vr->dump (file);
491 }
492
493 void
494 dump_value_range (FILE *file, const value_range_base *vr)
495 {
496 if (!vr)
497 fprintf (file, "[]");
498 else
499 vr->dump (file);
500 }
501
502 DEBUG_FUNCTION void
503 debug (const value_range_base *vr)
504 {
505 dump_value_range (stderr, vr);
506 }
507
508 DEBUG_FUNCTION void
509 debug (const value_range_base &vr)
510 {
511 dump_value_range (stderr, &vr);
512 }
513
514 DEBUG_FUNCTION void
515 debug (const value_range *vr)
516 {
517 dump_value_range (stderr, vr);
518 }
519
520 DEBUG_FUNCTION void
521 debug (const value_range &vr)
522 {
523 dump_value_range (stderr, &vr);
524 }
525
526 /* Return true if the SSA name NAME is live on the edge E. */
527
528 static bool
529 live_on_edge (edge e, tree name)
530 {
531 return (live[e->dest->index]
532 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
533 }
534
535 /* Location information for ASSERT_EXPRs. Each instance of this
536 structure describes an ASSERT_EXPR for an SSA name. Since a single
537 SSA name may have more than one assertion associated with it, these
538 locations are kept in a linked list attached to the corresponding
539 SSA name. */
540 struct assert_locus
541 {
542 /* Basic block where the assertion would be inserted. */
543 basic_block bb;
544
545 /* Some assertions need to be inserted on an edge (e.g., assertions
546 generated by COND_EXPRs). In those cases, BB will be NULL. */
547 edge e;
548
549 /* Pointer to the statement that generated this assertion. */
550 gimple_stmt_iterator si;
551
552 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
553 enum tree_code comp_code;
554
555 /* Value being compared against. */
556 tree val;
557
558 /* Expression to compare. */
559 tree expr;
560
561 /* Next node in the linked list. */
562 assert_locus *next;
563 };
564
565 /* If bit I is present, it means that SSA name N_i has a list of
566 assertions that should be inserted in the IL. */
567 static bitmap need_assert_for;
568
569 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
570 holds a list of ASSERT_LOCUS_T nodes that describe where
571 ASSERT_EXPRs for SSA name N_I should be inserted. */
572 static assert_locus **asserts_for;
573
574 /* Return the maximum value for TYPE. */
575
576 tree
577 vrp_val_max (const_tree type, bool handle_pointers)
578 {
579 if (INTEGRAL_TYPE_P (type))
580 return TYPE_MAX_VALUE (type);
581 if (POINTER_TYPE_P (type) && handle_pointers)
582 {
583 wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
584 return wide_int_to_tree (const_cast<tree> (type), max);
585 }
586 return NULL_TREE;
587 }
588
589 /* Return the minimum value for TYPE. */
590
591 tree
592 vrp_val_min (const_tree type, bool handle_pointers)
593 {
594 if (INTEGRAL_TYPE_P (type))
595 return TYPE_MIN_VALUE (type);
596 if (POINTER_TYPE_P (type) && handle_pointers)
597 return build_zero_cst (const_cast<tree> (type));
598 return NULL_TREE;
599 }
600
601 /* Return whether VAL is equal to the maximum value of its type.
602 We can't do a simple equality comparison with TYPE_MAX_VALUE because
603 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
604 is not == to the integer constant with the same value in the type. */
605
606 bool
607 vrp_val_is_max (const_tree val, bool handle_pointers)
608 {
609 tree type_max = vrp_val_max (TREE_TYPE (val), handle_pointers);
610 return (val == type_max
611 || (type_max != NULL_TREE
612 && operand_equal_p (val, type_max, 0)));
613 }
614
615 /* Return whether VAL is equal to the minimum value of its type. */
616
617 bool
618 vrp_val_is_min (const_tree val, bool handle_pointers)
619 {
620 tree type_min = vrp_val_min (TREE_TYPE (val), handle_pointers);
621 return (val == type_min
622 || (type_min != NULL_TREE
623 && operand_equal_p (val, type_min, 0)));
624 }
625
626 /* VR_TYPE describes a range with mininum value *MIN and maximum
627 value *MAX. Restrict the range to the set of values that have
628 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
629 return the new range type.
630
631 SGN gives the sign of the values described by the range. */
632
633 enum value_range_kind
634 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
635 wide_int *min, wide_int *max,
636 const wide_int &nonzero_bits,
637 signop sgn)
638 {
639 if (vr_type == VR_ANTI_RANGE)
640 {
641 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
642 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
643 to create an inclusive upper bound for A and an inclusive lower
644 bound for B. */
645 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
646 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
647
648 /* If the calculation of A_MAX wrapped, A is effectively empty
649 and A_MAX is the highest value that satisfies NONZERO_BITS.
650 Likewise if the calculation of B_MIN wrapped, B is effectively
651 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
652 bool a_empty = wi::ge_p (a_max, *min, sgn);
653 bool b_empty = wi::le_p (b_min, *max, sgn);
654
655 /* If both A and B are empty, there are no valid values. */
656 if (a_empty && b_empty)
657 return VR_UNDEFINED;
658
659 /* If exactly one of A or B is empty, return a VR_RANGE for the
660 other one. */
661 if (a_empty || b_empty)
662 {
663 *min = b_min;
664 *max = a_max;
665 gcc_checking_assert (wi::le_p (*min, *max, sgn));
666 return VR_RANGE;
667 }
668
669 /* Update the VR_ANTI_RANGE bounds. */
670 *min = a_max + 1;
671 *max = b_min - 1;
672 gcc_checking_assert (wi::le_p (*min, *max, sgn));
673
674 /* Now check whether the excluded range includes any values that
675 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
676 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
677 {
678 unsigned int precision = min->get_precision ();
679 *min = wi::min_value (precision, sgn);
680 *max = wi::max_value (precision, sgn);
681 vr_type = VR_RANGE;
682 }
683 }
684 if (vr_type == VR_RANGE)
685 {
686 *max = wi::round_down_for_mask (*max, nonzero_bits);
687
688 /* Check that the range contains at least one valid value. */
689 if (wi::gt_p (*min, *max, sgn))
690 return VR_UNDEFINED;
691
692 *min = wi::round_up_for_mask (*min, nonzero_bits);
693 gcc_checking_assert (wi::le_p (*min, *max, sgn));
694 }
695 return vr_type;
696 }
697
698
699 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
700 This means adjusting VRTYPE, MIN and MAX representing the case of a
701 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
702 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
703 In corner cases where MAX+1 or MIN-1 wraps this will fall back
704 to varying.
705 This routine exists to ease canonicalization in the case where we
706 extract ranges from var + CST op limit. */
707
708 void
709 value_range_base::set (enum value_range_kind kind, tree min, tree max)
710 {
711 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
712 if (kind == VR_UNDEFINED)
713 {
714 set_undefined ();
715 return;
716 }
717 else if (kind == VR_VARYING)
718 {
719 gcc_assert (TREE_TYPE (min) == TREE_TYPE (max));
720 tree typ = TREE_TYPE (min);
721 if (supports_type_p (typ))
722 {
723 gcc_assert (vrp_val_min (typ, true));
724 gcc_assert (vrp_val_max (typ, true));
725 }
726 set_varying (typ);
727 return;
728 }
729
730 /* Convert POLY_INT_CST bounds into worst-case INTEGER_CST bounds. */
731 if (POLY_INT_CST_P (min))
732 {
733 tree type_min = vrp_val_min (TREE_TYPE (min), true);
734 widest_int lb
735 = constant_lower_bound_with_limit (wi::to_poly_widest (min),
736 wi::to_widest (type_min));
737 min = wide_int_to_tree (TREE_TYPE (min), lb);
738 }
739 if (POLY_INT_CST_P (max))
740 {
741 tree type_max = vrp_val_max (TREE_TYPE (max), true);
742 widest_int ub
743 = constant_upper_bound_with_limit (wi::to_poly_widest (max),
744 wi::to_widest (type_max));
745 max = wide_int_to_tree (TREE_TYPE (max), ub);
746 }
747
748 /* Nothing to canonicalize for symbolic ranges. */
749 if (TREE_CODE (min) != INTEGER_CST
750 || TREE_CODE (max) != INTEGER_CST)
751 {
752 m_kind = kind;
753 m_min = min;
754 m_max = max;
755 return;
756 }
757
758 /* Wrong order for min and max, to swap them and the VR type we need
759 to adjust them. */
760 if (tree_int_cst_lt (max, min))
761 {
762 tree one, tmp;
763
764 /* For one bit precision if max < min, then the swapped
765 range covers all values, so for VR_RANGE it is varying and
766 for VR_ANTI_RANGE empty range, so drop to varying as well. */
767 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
768 {
769 set_varying (TREE_TYPE (min));
770 return;
771 }
772
773 one = build_int_cst (TREE_TYPE (min), 1);
774 tmp = int_const_binop (PLUS_EXPR, max, one);
775 max = int_const_binop (MINUS_EXPR, min, one);
776 min = tmp;
777
778 /* There's one corner case, if we had [C+1, C] before we now have
779 that again. But this represents an empty value range, so drop
780 to varying in this case. */
781 if (tree_int_cst_lt (max, min))
782 {
783 set_varying (TREE_TYPE (min));
784 return;
785 }
786
787 kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
788 }
789
790 tree type = TREE_TYPE (min);
791
792 /* Anti-ranges that can be represented as ranges should be so. */
793 if (kind == VR_ANTI_RANGE)
794 {
795 /* For -fstrict-enums we may receive out-of-range ranges so consider
796 values < -INF and values > INF as -INF/INF as well. */
797 bool is_min = (INTEGRAL_TYPE_P (type)
798 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
799 bool is_max = (INTEGRAL_TYPE_P (type)
800 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
801
802 if (is_min && is_max)
803 {
804 /* We cannot deal with empty ranges, drop to varying.
805 ??? This could be VR_UNDEFINED instead. */
806 set_varying (type);
807 return;
808 }
809 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
810 && (is_min || is_max))
811 {
812 /* Non-empty boolean ranges can always be represented
813 as a singleton range. */
814 if (is_min)
815 min = max = vrp_val_max (TREE_TYPE (min));
816 else
817 min = max = vrp_val_min (TREE_TYPE (min));
818 kind = VR_RANGE;
819 }
820 else if (is_min
821 /* Allow non-zero pointers to be normalized to [1,MAX]. */
822 || (POINTER_TYPE_P (TREE_TYPE (min))
823 && integer_zerop (min)))
824 {
825 tree one = build_int_cst (TREE_TYPE (max), 1);
826 min = int_const_binop (PLUS_EXPR, max, one);
827 max = vrp_val_max (TREE_TYPE (max), true);
828 kind = VR_RANGE;
829 }
830 else if (is_max)
831 {
832 tree one = build_int_cst (TREE_TYPE (min), 1);
833 max = int_const_binop (MINUS_EXPR, min, one);
834 min = vrp_val_min (TREE_TYPE (min));
835 kind = VR_RANGE;
836 }
837 }
838
839 /* Normalize [MIN, MAX] into VARYING and ~[MIN, MAX] into UNDEFINED.
840
841 Avoid using TYPE_{MIN,MAX}_VALUE because -fstrict-enums can
842 restrict those to a subset of what actually fits in the type.
843 Instead use the extremes of the type precision which will allow
844 compare_range_with_value() to check if a value is inside a range,
845 whereas if we used TYPE_*_VAL, said function would just punt
846 upon seeing a VARYING. */
847 unsigned prec = TYPE_PRECISION (type);
848 signop sign = TYPE_SIGN (type);
849 if (wi::eq_p (wi::to_wide (min), wi::min_value (prec, sign))
850 && wi::eq_p (wi::to_wide (max), wi::max_value (prec, sign)))
851 {
852 if (kind == VR_RANGE)
853 set_varying (type);
854 else if (kind == VR_ANTI_RANGE)
855 set_undefined ();
856 else
857 gcc_unreachable ();
858 return;
859 }
860
861 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
862 to make sure VRP iteration terminates, otherwise we can get into
863 oscillations. */
864
865 m_kind = kind;
866 m_min = min;
867 m_max = max;
868 if (flag_checking)
869 check ();
870 }
871
872 void
873 value_range_base::set (tree val)
874 {
875 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
876 if (TREE_OVERFLOW_P (val))
877 val = drop_tree_overflow (val);
878 set (VR_RANGE, val, val);
879 }
880
881 void
882 value_range::set (tree val)
883 {
884 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
885 if (TREE_OVERFLOW_P (val))
886 val = drop_tree_overflow (val);
887 set (VR_RANGE, val, val, NULL);
888 }
889
890 /* Set value range VR to a nonzero range of type TYPE. */
891
892 void
893 value_range_base::set_nonzero (tree type)
894 {
895 tree zero = build_int_cst (type, 0);
896 set (VR_ANTI_RANGE, zero, zero);
897 }
898
899 /* Set value range VR to a ZERO range of type TYPE. */
900
901 void
902 value_range_base::set_zero (tree type)
903 {
904 set (build_int_cst (type, 0));
905 }
906
907 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
908
909 bool
910 vrp_operand_equal_p (const_tree val1, const_tree val2)
911 {
912 if (val1 == val2)
913 return true;
914 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
915 return false;
916 return true;
917 }
918
919 /* Return true, if the bitmaps B1 and B2 are equal. */
920
921 bool
922 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
923 {
924 return (b1 == b2
925 || ((!b1 || bitmap_empty_p (b1))
926 && (!b2 || bitmap_empty_p (b2)))
927 || (b1 && b2
928 && bitmap_equal_p (b1, b2)));
929 }
930
931 static bool
932 range_has_numeric_bounds_p (const value_range_base *vr)
933 {
934 return (vr->min ()
935 && TREE_CODE (vr->min ()) == INTEGER_CST
936 && TREE_CODE (vr->max ()) == INTEGER_CST);
937 }
938
939 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
940 a singleton. */
941
942 bool
943 range_int_cst_p (const value_range_base *vr)
944 {
945 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
946 }
947
948 /* Return true if VR is a INTEGER_CST singleton. */
949
950 bool
951 range_int_cst_singleton_p (const value_range_base *vr)
952 {
953 return (range_int_cst_p (vr)
954 && tree_int_cst_equal (vr->min (), vr->max ()));
955 }
956
957 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
958 otherwise. We only handle additive operations and set NEG to true if the
959 symbol is negated and INV to the invariant part, if any. */
960
961 tree
962 get_single_symbol (tree t, bool *neg, tree *inv)
963 {
964 bool neg_;
965 tree inv_;
966
967 *inv = NULL_TREE;
968 *neg = false;
969
970 if (TREE_CODE (t) == PLUS_EXPR
971 || TREE_CODE (t) == POINTER_PLUS_EXPR
972 || TREE_CODE (t) == MINUS_EXPR)
973 {
974 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
975 {
976 neg_ = (TREE_CODE (t) == MINUS_EXPR);
977 inv_ = TREE_OPERAND (t, 0);
978 t = TREE_OPERAND (t, 1);
979 }
980 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
981 {
982 neg_ = false;
983 inv_ = TREE_OPERAND (t, 1);
984 t = TREE_OPERAND (t, 0);
985 }
986 else
987 return NULL_TREE;
988 }
989 else
990 {
991 neg_ = false;
992 inv_ = NULL_TREE;
993 }
994
995 if (TREE_CODE (t) == NEGATE_EXPR)
996 {
997 t = TREE_OPERAND (t, 0);
998 neg_ = !neg_;
999 }
1000
1001 if (TREE_CODE (t) != SSA_NAME)
1002 return NULL_TREE;
1003
1004 if (inv_ && TREE_OVERFLOW_P (inv_))
1005 inv_ = drop_tree_overflow (inv_);
1006
1007 *neg = neg_;
1008 *inv = inv_;
1009 return t;
1010 }
1011
1012 /* The reverse operation: build a symbolic expression with TYPE
1013 from symbol SYM, negated according to NEG, and invariant INV. */
1014
1015 static tree
1016 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
1017 {
1018 const bool pointer_p = POINTER_TYPE_P (type);
1019 tree t = sym;
1020
1021 if (neg)
1022 t = build1 (NEGATE_EXPR, type, t);
1023
1024 if (integer_zerop (inv))
1025 return t;
1026
1027 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
1028 }
1029
1030 /* Return
1031 1 if VAL < VAL2
1032 0 if !(VAL < VAL2)
1033 -2 if those are incomparable. */
1034 int
1035 operand_less_p (tree val, tree val2)
1036 {
1037 /* LT is folded faster than GE and others. Inline the common case. */
1038 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1039 return tree_int_cst_lt (val, val2);
1040 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
1041 return val == val2 ? 0 : -2;
1042 else
1043 {
1044 int cmp = compare_values (val, val2);
1045 if (cmp == -1)
1046 return 1;
1047 else if (cmp == 0 || cmp == 1)
1048 return 0;
1049 else
1050 return -2;
1051 }
1052
1053 return 0;
1054 }
1055
1056 /* Compare two values VAL1 and VAL2. Return
1057
1058 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1059 -1 if VAL1 < VAL2,
1060 0 if VAL1 == VAL2,
1061 +1 if VAL1 > VAL2, and
1062 +2 if VAL1 != VAL2
1063
1064 This is similar to tree_int_cst_compare but supports pointer values
1065 and values that cannot be compared at compile time.
1066
1067 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1068 true if the return value is only valid if we assume that signed
1069 overflow is undefined. */
1070
1071 int
1072 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1073 {
1074 if (val1 == val2)
1075 return 0;
1076
1077 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1078 both integers. */
1079 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1080 == POINTER_TYPE_P (TREE_TYPE (val2)));
1081
1082 /* Convert the two values into the same type. This is needed because
1083 sizetype causes sign extension even for unsigned types. */
1084 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
1085 val2 = fold_convert (TREE_TYPE (val1), val2);
1086
1087 const bool overflow_undefined
1088 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1089 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1090 tree inv1, inv2;
1091 bool neg1, neg2;
1092 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1093 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1094
1095 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1096 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1097 if (sym1 && sym2)
1098 {
1099 /* Both values must use the same name with the same sign. */
1100 if (sym1 != sym2 || neg1 != neg2)
1101 return -2;
1102
1103 /* [-]NAME + CST == [-]NAME + CST. */
1104 if (inv1 == inv2)
1105 return 0;
1106
1107 /* If overflow is defined we cannot simplify more. */
1108 if (!overflow_undefined)
1109 return -2;
1110
1111 if (strict_overflow_p != NULL
1112 /* Symbolic range building sets TREE_NO_WARNING to declare
1113 that overflow doesn't happen. */
1114 && (!inv1 || !TREE_NO_WARNING (val1))
1115 && (!inv2 || !TREE_NO_WARNING (val2)))
1116 *strict_overflow_p = true;
1117
1118 if (!inv1)
1119 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1120 if (!inv2)
1121 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1122
1123 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
1124 TYPE_SIGN (TREE_TYPE (val1)));
1125 }
1126
1127 const bool cst1 = is_gimple_min_invariant (val1);
1128 const bool cst2 = is_gimple_min_invariant (val2);
1129
1130 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1131 it might be possible to say something depending on the constants. */
1132 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1133 {
1134 if (!overflow_undefined)
1135 return -2;
1136
1137 if (strict_overflow_p != NULL
1138 /* Symbolic range building sets TREE_NO_WARNING to declare
1139 that overflow doesn't happen. */
1140 && (!sym1 || !TREE_NO_WARNING (val1))
1141 && (!sym2 || !TREE_NO_WARNING (val2)))
1142 *strict_overflow_p = true;
1143
1144 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1145 tree cst = cst1 ? val1 : val2;
1146 tree inv = cst1 ? inv2 : inv1;
1147
1148 /* Compute the difference between the constants. If it overflows or
1149 underflows, this means that we can trivially compare the NAME with
1150 it and, consequently, the two values with each other. */
1151 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
1152 if (wi::cmp (0, wi::to_wide (inv), sgn)
1153 != wi::cmp (diff, wi::to_wide (cst), sgn))
1154 {
1155 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
1156 return cst1 ? res : -res;
1157 }
1158
1159 return -2;
1160 }
1161
1162 /* We cannot say anything more for non-constants. */
1163 if (!cst1 || !cst2)
1164 return -2;
1165
1166 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1167 {
1168 /* We cannot compare overflowed values. */
1169 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1170 return -2;
1171
1172 if (TREE_CODE (val1) == INTEGER_CST
1173 && TREE_CODE (val2) == INTEGER_CST)
1174 return tree_int_cst_compare (val1, val2);
1175
1176 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
1177 {
1178 if (known_eq (wi::to_poly_widest (val1),
1179 wi::to_poly_widest (val2)))
1180 return 0;
1181 if (known_lt (wi::to_poly_widest (val1),
1182 wi::to_poly_widest (val2)))
1183 return -1;
1184 if (known_gt (wi::to_poly_widest (val1),
1185 wi::to_poly_widest (val2)))
1186 return 1;
1187 }
1188
1189 return -2;
1190 }
1191 else
1192 {
1193 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1194 {
1195 /* We cannot compare overflowed values. */
1196 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1197 return -2;
1198
1199 return tree_int_cst_compare (val1, val2);
1200 }
1201
1202 /* First see if VAL1 and VAL2 are not the same. */
1203 if (operand_equal_p (val1, val2, 0))
1204 return 0;
1205
1206 fold_defer_overflow_warnings ();
1207
1208 /* If VAL1 is a lower address than VAL2, return -1. */
1209 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
1210 if (t && integer_onep (t))
1211 {
1212 fold_undefer_and_ignore_overflow_warnings ();
1213 return -1;
1214 }
1215
1216 /* If VAL1 is a higher address than VAL2, return +1. */
1217 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
1218 if (t && integer_onep (t))
1219 {
1220 fold_undefer_and_ignore_overflow_warnings ();
1221 return 1;
1222 }
1223
1224 /* If VAL1 is different than VAL2, return +2. */
1225 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1226 fold_undefer_and_ignore_overflow_warnings ();
1227 if (t && integer_onep (t))
1228 return 2;
1229
1230 return -2;
1231 }
1232 }
1233
1234 /* Compare values like compare_values_warnv. */
1235
1236 int
1237 compare_values (tree val1, tree val2)
1238 {
1239 bool sop;
1240 return compare_values_warnv (val1, val2, &sop);
1241 }
1242
1243
1244 /* Return 1 if VAL is inside value range.
1245 0 if VAL is not inside value range.
1246 -2 if we cannot tell either way.
1247
1248 Benchmark compile/20001226-1.c compilation time after changing this
1249 function. */
1250
1251 int
1252 value_range_base::value_inside_range (tree val) const
1253 {
1254 int cmp1, cmp2;
1255
1256 if (varying_p ())
1257 return 1;
1258
1259 if (undefined_p ())
1260 return 0;
1261
1262 cmp1 = operand_less_p (val, m_min);
1263 if (cmp1 == -2)
1264 return -2;
1265 if (cmp1 == 1)
1266 return m_kind != VR_RANGE;
1267
1268 cmp2 = operand_less_p (m_max, val);
1269 if (cmp2 == -2)
1270 return -2;
1271
1272 if (m_kind == VR_RANGE)
1273 return !cmp2;
1274 else
1275 return !!cmp2;
1276 }
1277
1278 /* For range [LB, UB] compute two wide_int bit masks.
1279
1280 In the MAY_BE_NONZERO bit mask, if some bit is unset, it means that
1281 for all numbers in the range the bit is 0, otherwise it might be 0
1282 or 1.
1283
1284 In the MUST_BE_NONZERO bit mask, if some bit is set, it means that
1285 for all numbers in the range the bit is 1, otherwise it might be 0
1286 or 1. */
1287
1288 static inline void
1289 wide_int_range_set_zero_nonzero_bits (signop sign,
1290 const wide_int &lb, const wide_int &ub,
1291 wide_int &may_be_nonzero,
1292 wide_int &must_be_nonzero)
1293 {
1294 may_be_nonzero = wi::minus_one (lb.get_precision ());
1295 must_be_nonzero = wi::zero (lb.get_precision ());
1296
1297 if (wi::eq_p (lb, ub))
1298 {
1299 may_be_nonzero = lb;
1300 must_be_nonzero = may_be_nonzero;
1301 }
1302 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
1303 {
1304 wide_int xor_mask = lb ^ ub;
1305 may_be_nonzero = lb | ub;
1306 must_be_nonzero = lb & ub;
1307 if (xor_mask != 0)
1308 {
1309 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1310 may_be_nonzero.get_precision ());
1311 may_be_nonzero = may_be_nonzero | mask;
1312 must_be_nonzero = wi::bit_and_not (must_be_nonzero, mask);
1313 }
1314 }
1315 }
1316
1317 /* value_range wrapper for wide_int_range_set_zero_nonzero_bits above.
1318
1319 Return TRUE if VR was a constant range and we were able to compute
1320 the bit masks. */
1321
1322 bool
1323 vrp_set_zero_nonzero_bits (const tree expr_type,
1324 const value_range_base *vr,
1325 wide_int *may_be_nonzero,
1326 wide_int *must_be_nonzero)
1327 {
1328 if (!range_int_cst_p (vr))
1329 {
1330 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1331 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1332 return false;
1333 }
1334 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
1335 wi::to_wide (vr->min ()),
1336 wi::to_wide (vr->max ()),
1337 *may_be_nonzero, *must_be_nonzero);
1338 return true;
1339 }
1340
1341 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1342 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1343 false otherwise. If *AR can be represented with a single range
1344 *VR1 will be VR_UNDEFINED. */
1345
1346 static bool
1347 ranges_from_anti_range (const value_range_base *ar,
1348 value_range_base *vr0, value_range_base *vr1,
1349 bool handle_pointers)
1350 {
1351 tree type = ar->type ();
1352
1353 vr0->set_undefined ();
1354 vr1->set_undefined ();
1355
1356 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1357 [A+1, +INF]. Not sure if this helps in practice, though. */
1358
1359 if (ar->kind () != VR_ANTI_RANGE
1360 || TREE_CODE (ar->min ()) != INTEGER_CST
1361 || TREE_CODE (ar->max ()) != INTEGER_CST
1362 || !vrp_val_min (type, handle_pointers)
1363 || !vrp_val_max (type, handle_pointers))
1364 return false;
1365
1366 if (tree_int_cst_lt (vrp_val_min (type, handle_pointers), ar->min ()))
1367 vr0->set (VR_RANGE,
1368 vrp_val_min (type, handle_pointers),
1369 wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1));
1370 if (tree_int_cst_lt (ar->max (), vrp_val_max (type, handle_pointers)))
1371 vr1->set (VR_RANGE,
1372 wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1),
1373 vrp_val_max (type, handle_pointers));
1374 if (vr0->undefined_p ())
1375 {
1376 *vr0 = *vr1;
1377 vr1->set_undefined ();
1378 }
1379
1380 return !vr0->undefined_p ();
1381 }
1382
1383 /* If BOUND will include a symbolic bound, adjust it accordingly,
1384 otherwise leave it as is.
1385
1386 CODE is the original operation that combined the bounds (PLUS_EXPR
1387 or MINUS_EXPR).
1388
1389 TYPE is the type of the original operation.
1390
1391 SYM_OPn is the symbolic for OPn if it has a symbolic.
1392
1393 NEG_OPn is TRUE if the OPn was negated. */
1394
1395 static void
1396 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1397 tree sym_op0, tree sym_op1,
1398 bool neg_op0, bool neg_op1)
1399 {
1400 bool minus_p = (code == MINUS_EXPR);
1401 /* If the result bound is constant, we're done; otherwise, build the
1402 symbolic lower bound. */
1403 if (sym_op0 == sym_op1)
1404 ;
1405 else if (sym_op0)
1406 bound = build_symbolic_expr (type, sym_op0,
1407 neg_op0, bound);
1408 else if (sym_op1)
1409 {
1410 /* We may not negate if that might introduce
1411 undefined overflow. */
1412 if (!minus_p
1413 || neg_op1
1414 || TYPE_OVERFLOW_WRAPS (type))
1415 bound = build_symbolic_expr (type, sym_op1,
1416 neg_op1 ^ minus_p, bound);
1417 else
1418 bound = NULL_TREE;
1419 }
1420 }
1421
1422 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1423 int bound according to CODE. CODE is the operation combining the
1424 bound (either a PLUS_EXPR or a MINUS_EXPR).
1425
1426 TYPE is the type of the combine operation.
1427
1428 WI is the wide int to store the result.
1429
1430 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1431 if over/underflow occurred. */
1432
1433 static void
1434 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1435 tree type, tree op0, tree op1)
1436 {
1437 bool minus_p = (code == MINUS_EXPR);
1438 const signop sgn = TYPE_SIGN (type);
1439 const unsigned int prec = TYPE_PRECISION (type);
1440
1441 /* Combine the bounds, if any. */
1442 if (op0 && op1)
1443 {
1444 if (minus_p)
1445 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1446 else
1447 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1448 }
1449 else if (op0)
1450 wi = wi::to_wide (op0);
1451 else if (op1)
1452 {
1453 if (minus_p)
1454 wi = wi::neg (wi::to_wide (op1), &ovf);
1455 else
1456 wi = wi::to_wide (op1);
1457 }
1458 else
1459 wi = wi::shwi (0, prec);
1460 }
1461
1462 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1463 put the result in VR.
1464
1465 TYPE is the type of the range.
1466
1467 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1468 occurred while originally calculating WMIN or WMAX. -1 indicates
1469 underflow. +1 indicates overflow. 0 indicates neither. */
1470
1471 static void
1472 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
1473 tree type,
1474 const wide_int &wmin, const wide_int &wmax,
1475 wi::overflow_type min_ovf,
1476 wi::overflow_type max_ovf)
1477 {
1478 const signop sgn = TYPE_SIGN (type);
1479 const unsigned int prec = TYPE_PRECISION (type);
1480
1481 /* For one bit precision if max < min, then the swapped
1482 range covers all values. */
1483 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
1484 {
1485 kind = VR_VARYING;
1486 return;
1487 }
1488
1489 if (TYPE_OVERFLOW_WRAPS (type))
1490 {
1491 /* If overflow wraps, truncate the values and adjust the
1492 range kind and bounds appropriately. */
1493 wide_int tmin = wide_int::from (wmin, prec, sgn);
1494 wide_int tmax = wide_int::from (wmax, prec, sgn);
1495 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1496 {
1497 /* If the limits are swapped, we wrapped around and cover
1498 the entire range. */
1499 if (wi::gt_p (tmin, tmax, sgn))
1500 kind = VR_VARYING;
1501 else
1502 {
1503 kind = VR_RANGE;
1504 /* No overflow or both overflow or underflow. The
1505 range kind stays VR_RANGE. */
1506 min = wide_int_to_tree (type, tmin);
1507 max = wide_int_to_tree (type, tmax);
1508 }
1509 return;
1510 }
1511 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1512 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1513 {
1514 /* Min underflow or max overflow. The range kind
1515 changes to VR_ANTI_RANGE. */
1516 bool covers = false;
1517 wide_int tem = tmin;
1518 tmin = tmax + 1;
1519 if (wi::cmp (tmin, tmax, sgn) < 0)
1520 covers = true;
1521 tmax = tem - 1;
1522 if (wi::cmp (tmax, tem, sgn) > 0)
1523 covers = true;
1524 /* If the anti-range would cover nothing, drop to varying.
1525 Likewise if the anti-range bounds are outside of the
1526 types values. */
1527 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1528 {
1529 kind = VR_VARYING;
1530 return;
1531 }
1532 kind = VR_ANTI_RANGE;
1533 min = wide_int_to_tree (type, tmin);
1534 max = wide_int_to_tree (type, tmax);
1535 return;
1536 }
1537 else
1538 {
1539 /* Other underflow and/or overflow, drop to VR_VARYING. */
1540 kind = VR_VARYING;
1541 return;
1542 }
1543 }
1544 else
1545 {
1546 /* If overflow does not wrap, saturate to the types min/max
1547 value. */
1548 wide_int type_min = wi::min_value (prec, sgn);
1549 wide_int type_max = wi::max_value (prec, sgn);
1550 kind = VR_RANGE;
1551 if (min_ovf == wi::OVF_UNDERFLOW)
1552 min = wide_int_to_tree (type, type_min);
1553 else if (min_ovf == wi::OVF_OVERFLOW)
1554 min = wide_int_to_tree (type, type_max);
1555 else
1556 min = wide_int_to_tree (type, wmin);
1557
1558 if (max_ovf == wi::OVF_UNDERFLOW)
1559 max = wide_int_to_tree (type, type_min);
1560 else if (max_ovf == wi::OVF_OVERFLOW)
1561 max = wide_int_to_tree (type, type_max);
1562 else
1563 max = wide_int_to_tree (type, wmax);
1564 }
1565 }
1566
1567 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
1568
1569 static void
1570 extract_range_from_pointer_plus_expr (value_range_base *vr,
1571 enum tree_code code,
1572 tree expr_type,
1573 const value_range_base *vr0,
1574 const value_range_base *vr1)
1575 {
1576 gcc_checking_assert (POINTER_TYPE_P (expr_type)
1577 && code == POINTER_PLUS_EXPR);
1578 /* For pointer types, we are really only interested in asserting
1579 whether the expression evaluates to non-NULL.
1580 With -fno-delete-null-pointer-checks we need to be more
1581 conservative. As some object might reside at address 0,
1582 then some offset could be added to it and the same offset
1583 subtracted again and the result would be NULL.
1584 E.g.
1585 static int a[12]; where &a[0] is NULL and
1586 ptr = &a[6];
1587 ptr -= 6;
1588 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1589 where the first range doesn't include zero and the second one
1590 doesn't either. As the second operand is sizetype (unsigned),
1591 consider all ranges where the MSB could be set as possible
1592 subtractions where the result might be NULL. */
1593 if ((!range_includes_zero_p (vr0)
1594 || !range_includes_zero_p (vr1))
1595 && !TYPE_OVERFLOW_WRAPS (expr_type)
1596 && (flag_delete_null_pointer_checks
1597 || (range_int_cst_p (vr1)
1598 && !tree_int_cst_sign_bit (vr1->max ()))))
1599 vr->set_nonzero (expr_type);
1600 else if (vr0->zero_p () && vr1->zero_p ())
1601 vr->set_zero (expr_type);
1602 else
1603 vr->set_varying (expr_type);
1604 }
1605
1606 /* Extract range information from a PLUS/MINUS_EXPR and store the
1607 result in *VR. */
1608
1609 static void
1610 extract_range_from_plus_minus_expr (value_range_base *vr,
1611 enum tree_code code,
1612 tree expr_type,
1613 const value_range_base *vr0_,
1614 const value_range_base *vr1_)
1615 {
1616 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
1617
1618 value_range_base vr0 = *vr0_, vr1 = *vr1_;
1619 value_range_base vrtem0, vrtem1;
1620
1621 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1622 and express ~[] op X as ([]' op X) U ([]'' op X). */
1623 if (vr0.kind () == VR_ANTI_RANGE
1624 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1625 {
1626 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
1627 if (!vrtem1.undefined_p ())
1628 {
1629 value_range_base vrres;
1630 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
1631 &vrtem1, vr1_);
1632 vr->union_ (&vrres);
1633 }
1634 return;
1635 }
1636 /* Likewise for X op ~[]. */
1637 if (vr1.kind () == VR_ANTI_RANGE
1638 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1639 {
1640 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
1641 if (!vrtem1.undefined_p ())
1642 {
1643 value_range_base vrres;
1644 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
1645 vr0_, &vrtem1);
1646 vr->union_ (&vrres);
1647 }
1648 return;
1649 }
1650
1651 value_range_kind kind;
1652 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
1653 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
1654 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
1655 tree min = NULL, max = NULL;
1656
1657 /* This will normalize things such that calculating
1658 [0,0] - VR_VARYING is not dropped to varying, but is
1659 calculated as [MIN+1, MAX]. */
1660 if (vr0.varying_p ())
1661 {
1662 vr0_kind = VR_RANGE;
1663 vr0_min = vrp_val_min (expr_type);
1664 vr0_max = vrp_val_max (expr_type);
1665 }
1666 if (vr1.varying_p ())
1667 {
1668 vr1_kind = VR_RANGE;
1669 vr1_min = vrp_val_min (expr_type);
1670 vr1_max = vrp_val_max (expr_type);
1671 }
1672
1673 const bool minus_p = (code == MINUS_EXPR);
1674 tree min_op0 = vr0_min;
1675 tree min_op1 = minus_p ? vr1_max : vr1_min;
1676 tree max_op0 = vr0_max;
1677 tree max_op1 = minus_p ? vr1_min : vr1_max;
1678 tree sym_min_op0 = NULL_TREE;
1679 tree sym_min_op1 = NULL_TREE;
1680 tree sym_max_op0 = NULL_TREE;
1681 tree sym_max_op1 = NULL_TREE;
1682 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1683
1684 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1685
1686 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1687 single-symbolic ranges, try to compute the precise resulting range,
1688 but only if we know that this resulting range will also be constant
1689 or single-symbolic. */
1690 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
1691 && (TREE_CODE (min_op0) == INTEGER_CST
1692 || (sym_min_op0
1693 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1694 && (TREE_CODE (min_op1) == INTEGER_CST
1695 || (sym_min_op1
1696 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1697 && (!(sym_min_op0 && sym_min_op1)
1698 || (sym_min_op0 == sym_min_op1
1699 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1700 && (TREE_CODE (max_op0) == INTEGER_CST
1701 || (sym_max_op0
1702 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1703 && (TREE_CODE (max_op1) == INTEGER_CST
1704 || (sym_max_op1
1705 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1706 && (!(sym_max_op0 && sym_max_op1)
1707 || (sym_max_op0 == sym_max_op1
1708 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1709 {
1710 wide_int wmin, wmax;
1711 wi::overflow_type min_ovf = wi::OVF_NONE;
1712 wi::overflow_type max_ovf = wi::OVF_NONE;
1713
1714 /* Build the bounds. */
1715 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1716 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1717
1718 /* If we have overflow for the constant part and the resulting
1719 range will be symbolic, drop to VR_VARYING. */
1720 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1721 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1722 {
1723 vr->set_varying (expr_type);
1724 return;
1725 }
1726
1727 /* Adjust the range for possible overflow. */
1728 min = NULL_TREE;
1729 max = NULL_TREE;
1730 set_value_range_with_overflow (kind, min, max, expr_type,
1731 wmin, wmax, min_ovf, max_ovf);
1732 if (kind == VR_VARYING)
1733 {
1734 vr->set_varying (expr_type);
1735 return;
1736 }
1737
1738 /* Build the symbolic bounds if needed. */
1739 adjust_symbolic_bound (min, code, expr_type,
1740 sym_min_op0, sym_min_op1,
1741 neg_min_op0, neg_min_op1);
1742 adjust_symbolic_bound (max, code, expr_type,
1743 sym_max_op0, sym_max_op1,
1744 neg_max_op0, neg_max_op1);
1745 }
1746 else
1747 {
1748 /* For other cases, for example if we have a PLUS_EXPR with two
1749 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1750 to compute a precise range for such a case.
1751 ??? General even mixed range kind operations can be expressed
1752 by for example transforming ~[3, 5] + [1, 2] to range-only
1753 operations and a union primitive:
1754 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1755 [-INF+1, 4] U [6, +INF(OVF)]
1756 though usually the union is not exactly representable with
1757 a single range or anti-range as the above is
1758 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1759 but one could use a scheme similar to equivalences for this. */
1760 vr->set_varying (expr_type);
1761 return;
1762 }
1763
1764 /* If either MIN or MAX overflowed, then set the resulting range to
1765 VARYING. */
1766 if (min == NULL_TREE
1767 || TREE_OVERFLOW_P (min)
1768 || max == NULL_TREE
1769 || TREE_OVERFLOW_P (max))
1770 {
1771 vr->set_varying (expr_type);
1772 return;
1773 }
1774
1775 int cmp = compare_values (min, max);
1776 if (cmp == -2 || cmp == 1)
1777 {
1778 /* If the new range has its limits swapped around (MIN > MAX),
1779 then the operation caused one of them to wrap around, mark
1780 the new range VARYING. */
1781 vr->set_varying (expr_type);
1782 }
1783 else
1784 vr->set (kind, min, max);
1785 }
1786
1787 /* Return the range-ops handler for CODE and EXPR_TYPE. If no
1788 suitable operator is found, return NULL and set VR to VARYING. */
1789
1790 static const range_operator *
1791 get_range_op_handler (value_range_base *vr,
1792 enum tree_code code,
1793 tree expr_type)
1794 {
1795 const range_operator *op = range_op_handler (code, expr_type);
1796 if (!op)
1797 vr->set_varying (expr_type);
1798 return op;
1799 }
1800
1801 /* If the types passed are supported, return TRUE, otherwise set VR to
1802 VARYING and return FALSE. */
1803
1804 static bool
1805 supported_types_p (value_range_base *vr,
1806 tree type0,
1807 tree type1 = NULL)
1808 {
1809 if (!value_range_base::supports_type_p (type0)
1810 || (type1 && !value_range_base::supports_type_p (type1)))
1811 {
1812 vr->set_varying (type0);
1813 return false;
1814 }
1815 return true;
1816 }
1817
1818 /* If any of the ranges passed are defined, return TRUE, otherwise set
1819 VR to UNDEFINED and return FALSE. */
1820
1821 static bool
1822 defined_ranges_p (value_range_base *vr,
1823 const value_range_base *vr0,
1824 const value_range_base *vr1 = NULL)
1825 {
1826 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
1827 {
1828 vr->set_undefined ();
1829 return false;
1830 }
1831 return true;
1832 }
1833
1834 static value_range_base
1835 drop_undefines_to_varying (const value_range_base *vr, tree expr_type)
1836 {
1837 if (vr->undefined_p ())
1838 return value_range_base (expr_type);
1839 else
1840 return *vr;
1841 }
1842
1843 /* If any operand is symbolic, perform a binary operation on them and
1844 return TRUE, otherwise return FALSE. */
1845
1846 static bool
1847 range_fold_binary_symbolics_p (value_range_base *vr,
1848 tree_code code,
1849 tree expr_type,
1850 const value_range_base *vr0,
1851 const value_range_base *vr1)
1852 {
1853 if (vr0->symbolic_p () || vr1->symbolic_p ())
1854 {
1855 if ((code == PLUS_EXPR || code == MINUS_EXPR))
1856 {
1857 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0, vr1);
1858 return true;
1859 }
1860 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1861 {
1862 extract_range_from_pointer_plus_expr (vr, code, expr_type, vr0, vr1);
1863 return true;
1864 }
1865 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1866 *vr = op->fold_range (expr_type,
1867 vr0->normalize_symbolics (),
1868 vr1->normalize_symbolics ());
1869 return true;
1870 }
1871 return false;
1872 }
1873
1874 /* If operand is symbolic, perform a unary operation on it and return
1875 TRUE, otherwise return FALSE. */
1876
1877 static bool
1878 range_fold_unary_symbolics_p (value_range_base *vr,
1879 tree_code code,
1880 tree expr_type,
1881 const value_range_base *vr0)
1882 {
1883 if (vr0->symbolic_p ())
1884 {
1885 if (code == NEGATE_EXPR)
1886 {
1887 /* -X is simply 0 - X. */
1888 value_range_base zero;
1889 zero.set_zero (vr0->type ());
1890 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1891 return true;
1892 }
1893 if (code == BIT_NOT_EXPR)
1894 {
1895 /* ~X is simply -1 - X. */
1896 value_range_base minusone;
1897 minusone.set (build_int_cst (vr0->type (), -1));
1898 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1899 return true;
1900 }
1901 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1902 *vr = op->fold_range (expr_type,
1903 vr0->normalize_symbolics (),
1904 value_range_base (expr_type));
1905 return true;
1906 }
1907 return false;
1908 }
1909
1910 /* Perform a binary operation on a pair of ranges. */
1911
1912 void
1913 range_fold_binary_expr (value_range_base *vr,
1914 enum tree_code code,
1915 tree expr_type,
1916 const value_range_base *vr0_,
1917 const value_range_base *vr1_)
1918 {
1919 if (!supported_types_p (vr, expr_type)
1920 || !defined_ranges_p (vr, vr0_, vr1_))
1921 return;
1922 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1923 if (!op)
1924 return;
1925
1926 value_range_base vr0 = drop_undefines_to_varying (vr0_, expr_type);
1927 value_range_base vr1 = drop_undefines_to_varying (vr1_, expr_type);
1928 if (range_fold_binary_symbolics_p (vr, code, expr_type, &vr0, &vr1))
1929 return;
1930
1931 *vr = op->fold_range (expr_type,
1932 vr0.normalize_addresses (),
1933 vr1.normalize_addresses ());
1934 }
1935
1936 /* Perform a unary operation on a range. */
1937
1938 void
1939 range_fold_unary_expr (value_range_base *vr,
1940 enum tree_code code, tree expr_type,
1941 const value_range_base *vr0,
1942 tree vr0_type)
1943 {
1944 if (!supported_types_p (vr, expr_type, vr0_type)
1945 || !defined_ranges_p (vr, vr0))
1946 return;
1947 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1948 if (!op)
1949 return;
1950
1951 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1952 return;
1953
1954 *vr = op->fold_range (expr_type,
1955 vr0->normalize_addresses (),
1956 value_range_base (expr_type));
1957 }
1958
1959 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1960 create a new SSA name N and return the assertion assignment
1961 'N = ASSERT_EXPR <V, V OP W>'. */
1962
1963 static gimple *
1964 build_assert_expr_for (tree cond, tree v)
1965 {
1966 tree a;
1967 gassign *assertion;
1968
1969 gcc_assert (TREE_CODE (v) == SSA_NAME
1970 && COMPARISON_CLASS_P (cond));
1971
1972 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
1973 assertion = gimple_build_assign (NULL_TREE, a);
1974
1975 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
1976 operand of the ASSERT_EXPR. Create it so the new name and the old one
1977 are registered in the replacement table so that we can fix the SSA web
1978 after adding all the ASSERT_EXPRs. */
1979 tree new_def = create_new_def_for (v, assertion, NULL);
1980 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
1981 given we have to be able to fully propagate those out to re-create
1982 valid SSA when removing the asserts. */
1983 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
1984 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
1985
1986 return assertion;
1987 }
1988
1989
1990 /* Return false if EXPR is a predicate expression involving floating
1991 point values. */
1992
1993 static inline bool
1994 fp_predicate (gimple *stmt)
1995 {
1996 GIMPLE_CHECK (stmt, GIMPLE_COND);
1997
1998 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
1999 }
2000
2001 /* If the range of values taken by OP can be inferred after STMT executes,
2002 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2003 describes the inferred range. Return true if a range could be
2004 inferred. */
2005
2006 bool
2007 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2008 {
2009 *val_p = NULL_TREE;
2010 *comp_code_p = ERROR_MARK;
2011
2012 /* Do not attempt to infer anything in names that flow through
2013 abnormal edges. */
2014 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2015 return false;
2016
2017 /* If STMT is the last statement of a basic block with no normal
2018 successors, there is no point inferring anything about any of its
2019 operands. We would not be able to find a proper insertion point
2020 for the assertion, anyway. */
2021 if (stmt_ends_bb_p (stmt))
2022 {
2023 edge_iterator ei;
2024 edge e;
2025
2026 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2027 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2028 break;
2029 if (e == NULL)
2030 return false;
2031 }
2032
2033 if (infer_nonnull_range (stmt, op))
2034 {
2035 *val_p = build_int_cst (TREE_TYPE (op), 0);
2036 *comp_code_p = NE_EXPR;
2037 return true;
2038 }
2039
2040 return false;
2041 }
2042
2043
2044 void dump_asserts_for (FILE *, tree);
2045 void debug_asserts_for (tree);
2046 void dump_all_asserts (FILE *);
2047 void debug_all_asserts (void);
2048
2049 /* Dump all the registered assertions for NAME to FILE. */
2050
2051 void
2052 dump_asserts_for (FILE *file, tree name)
2053 {
2054 assert_locus *loc;
2055
2056 fprintf (file, "Assertions to be inserted for ");
2057 print_generic_expr (file, name);
2058 fprintf (file, "\n");
2059
2060 loc = asserts_for[SSA_NAME_VERSION (name)];
2061 while (loc)
2062 {
2063 fprintf (file, "\t");
2064 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2065 fprintf (file, "\n\tBB #%d", loc->bb->index);
2066 if (loc->e)
2067 {
2068 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2069 loc->e->dest->index);
2070 dump_edge_info (file, loc->e, dump_flags, 0);
2071 }
2072 fprintf (file, "\n\tPREDICATE: ");
2073 print_generic_expr (file, loc->expr);
2074 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2075 print_generic_expr (file, loc->val);
2076 fprintf (file, "\n\n");
2077 loc = loc->next;
2078 }
2079
2080 fprintf (file, "\n");
2081 }
2082
2083
2084 /* Dump all the registered assertions for NAME to stderr. */
2085
2086 DEBUG_FUNCTION void
2087 debug_asserts_for (tree name)
2088 {
2089 dump_asserts_for (stderr, name);
2090 }
2091
2092
2093 /* Dump all the registered assertions for all the names to FILE. */
2094
2095 void
2096 dump_all_asserts (FILE *file)
2097 {
2098 unsigned i;
2099 bitmap_iterator bi;
2100
2101 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2102 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2103 dump_asserts_for (file, ssa_name (i));
2104 fprintf (file, "\n");
2105 }
2106
2107
2108 /* Dump all the registered assertions for all the names to stderr. */
2109
2110 DEBUG_FUNCTION void
2111 debug_all_asserts (void)
2112 {
2113 dump_all_asserts (stderr);
2114 }
2115
2116 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2117
2118 static void
2119 add_assert_info (vec<assert_info> &asserts,
2120 tree name, tree expr, enum tree_code comp_code, tree val)
2121 {
2122 assert_info info;
2123 info.comp_code = comp_code;
2124 info.name = name;
2125 if (TREE_OVERFLOW_P (val))
2126 val = drop_tree_overflow (val);
2127 info.val = val;
2128 info.expr = expr;
2129 asserts.safe_push (info);
2130 if (dump_enabled_p ())
2131 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
2132 "Adding assert for %T from %T %s %T\n",
2133 name, expr, op_symbol_code (comp_code), val);
2134 }
2135
2136 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2137 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2138 E->DEST, then register this location as a possible insertion point
2139 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2140
2141 BB, E and SI provide the exact insertion point for the new
2142 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2143 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2144 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2145 must not be NULL. */
2146
2147 static void
2148 register_new_assert_for (tree name, tree expr,
2149 enum tree_code comp_code,
2150 tree val,
2151 basic_block bb,
2152 edge e,
2153 gimple_stmt_iterator si)
2154 {
2155 assert_locus *n, *loc, *last_loc;
2156 basic_block dest_bb;
2157
2158 gcc_checking_assert (bb == NULL || e == NULL);
2159
2160 if (e == NULL)
2161 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2162 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2163
2164 /* Never build an assert comparing against an integer constant with
2165 TREE_OVERFLOW set. This confuses our undefined overflow warning
2166 machinery. */
2167 if (TREE_OVERFLOW_P (val))
2168 val = drop_tree_overflow (val);
2169
2170 /* The new assertion A will be inserted at BB or E. We need to
2171 determine if the new location is dominated by a previously
2172 registered location for A. If we are doing an edge insertion,
2173 assume that A will be inserted at E->DEST. Note that this is not
2174 necessarily true.
2175
2176 If E is a critical edge, it will be split. But even if E is
2177 split, the new block will dominate the same set of blocks that
2178 E->DEST dominates.
2179
2180 The reverse, however, is not true, blocks dominated by E->DEST
2181 will not be dominated by the new block created to split E. So,
2182 if the insertion location is on a critical edge, we will not use
2183 the new location to move another assertion previously registered
2184 at a block dominated by E->DEST. */
2185 dest_bb = (bb) ? bb : e->dest;
2186
2187 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2188 VAL at a block dominating DEST_BB, then we don't need to insert a new
2189 one. Similarly, if the same assertion already exists at a block
2190 dominated by DEST_BB and the new location is not on a critical
2191 edge, then update the existing location for the assertion (i.e.,
2192 move the assertion up in the dominance tree).
2193
2194 Note, this is implemented as a simple linked list because there
2195 should not be more than a handful of assertions registered per
2196 name. If this becomes a performance problem, a table hashed by
2197 COMP_CODE and VAL could be implemented. */
2198 loc = asserts_for[SSA_NAME_VERSION (name)];
2199 last_loc = loc;
2200 while (loc)
2201 {
2202 if (loc->comp_code == comp_code
2203 && (loc->val == val
2204 || operand_equal_p (loc->val, val, 0))
2205 && (loc->expr == expr
2206 || operand_equal_p (loc->expr, expr, 0)))
2207 {
2208 /* If E is not a critical edge and DEST_BB
2209 dominates the existing location for the assertion, move
2210 the assertion up in the dominance tree by updating its
2211 location information. */
2212 if ((e == NULL || !EDGE_CRITICAL_P (e))
2213 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2214 {
2215 loc->bb = dest_bb;
2216 loc->e = e;
2217 loc->si = si;
2218 return;
2219 }
2220 }
2221
2222 /* Update the last node of the list and move to the next one. */
2223 last_loc = loc;
2224 loc = loc->next;
2225 }
2226
2227 /* If we didn't find an assertion already registered for
2228 NAME COMP_CODE VAL, add a new one at the end of the list of
2229 assertions associated with NAME. */
2230 n = XNEW (struct assert_locus);
2231 n->bb = dest_bb;
2232 n->e = e;
2233 n->si = si;
2234 n->comp_code = comp_code;
2235 n->val = val;
2236 n->expr = expr;
2237 n->next = NULL;
2238
2239 if (last_loc)
2240 last_loc->next = n;
2241 else
2242 asserts_for[SSA_NAME_VERSION (name)] = n;
2243
2244 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2245 }
2246
2247 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2248 Extract a suitable test code and value and store them into *CODE_P and
2249 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2250
2251 If no extraction was possible, return FALSE, otherwise return TRUE.
2252
2253 If INVERT is true, then we invert the result stored into *CODE_P. */
2254
2255 static bool
2256 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2257 tree cond_op0, tree cond_op1,
2258 bool invert, enum tree_code *code_p,
2259 tree *val_p)
2260 {
2261 enum tree_code comp_code;
2262 tree val;
2263
2264 /* Otherwise, we have a comparison of the form NAME COMP VAL
2265 or VAL COMP NAME. */
2266 if (name == cond_op1)
2267 {
2268 /* If the predicate is of the form VAL COMP NAME, flip
2269 COMP around because we need to register NAME as the
2270 first operand in the predicate. */
2271 comp_code = swap_tree_comparison (cond_code);
2272 val = cond_op0;
2273 }
2274 else if (name == cond_op0)
2275 {
2276 /* The comparison is of the form NAME COMP VAL, so the
2277 comparison code remains unchanged. */
2278 comp_code = cond_code;
2279 val = cond_op1;
2280 }
2281 else
2282 gcc_unreachable ();
2283
2284 /* Invert the comparison code as necessary. */
2285 if (invert)
2286 comp_code = invert_tree_comparison (comp_code, 0);
2287
2288 /* VRP only handles integral and pointer types. */
2289 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2290 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2291 return false;
2292
2293 /* Do not register always-false predicates.
2294 FIXME: this works around a limitation in fold() when dealing with
2295 enumerations. Given 'enum { N1, N2 } x;', fold will not
2296 fold 'if (x > N2)' to 'if (0)'. */
2297 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2298 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2299 {
2300 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2301 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2302
2303 if (comp_code == GT_EXPR
2304 && (!max
2305 || compare_values (val, max) == 0))
2306 return false;
2307
2308 if (comp_code == LT_EXPR
2309 && (!min
2310 || compare_values (val, min) == 0))
2311 return false;
2312 }
2313 *code_p = comp_code;
2314 *val_p = val;
2315 return true;
2316 }
2317
2318 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2319 (otherwise return VAL). VAL and MASK must be zero-extended for
2320 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2321 (to transform signed values into unsigned) and at the end xor
2322 SGNBIT back. */
2323
2324 static wide_int
2325 masked_increment (const wide_int &val_in, const wide_int &mask,
2326 const wide_int &sgnbit, unsigned int prec)
2327 {
2328 wide_int bit = wi::one (prec), res;
2329 unsigned int i;
2330
2331 wide_int val = val_in ^ sgnbit;
2332 for (i = 0; i < prec; i++, bit += bit)
2333 {
2334 res = mask;
2335 if ((res & bit) == 0)
2336 continue;
2337 res = bit - 1;
2338 res = wi::bit_and_not (val + bit, res);
2339 res &= mask;
2340 if (wi::gtu_p (res, val))
2341 return res ^ sgnbit;
2342 }
2343 return val ^ sgnbit;
2344 }
2345
2346 /* Helper for overflow_comparison_p
2347
2348 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2349 OP1's defining statement to see if it ultimately has the form
2350 OP0 CODE (OP0 PLUS INTEGER_CST)
2351
2352 If so, return TRUE indicating this is an overflow test and store into
2353 *NEW_CST an updated constant that can be used in a narrowed range test.
2354
2355 REVERSED indicates if the comparison was originally:
2356
2357 OP1 CODE' OP0.
2358
2359 This affects how we build the updated constant. */
2360
2361 static bool
2362 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2363 bool follow_assert_exprs, bool reversed, tree *new_cst)
2364 {
2365 /* See if this is a relational operation between two SSA_NAMES with
2366 unsigned, overflow wrapping values. If so, check it more deeply. */
2367 if ((code == LT_EXPR || code == LE_EXPR
2368 || code == GE_EXPR || code == GT_EXPR)
2369 && TREE_CODE (op0) == SSA_NAME
2370 && TREE_CODE (op1) == SSA_NAME
2371 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2372 && TYPE_UNSIGNED (TREE_TYPE (op0))
2373 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2374 {
2375 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2376
2377 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2378 if (follow_assert_exprs)
2379 {
2380 while (gimple_assign_single_p (op1_def)
2381 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2382 {
2383 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2384 if (TREE_CODE (op1) != SSA_NAME)
2385 break;
2386 op1_def = SSA_NAME_DEF_STMT (op1);
2387 }
2388 }
2389
2390 /* Now look at the defining statement of OP1 to see if it adds
2391 or subtracts a nonzero constant from another operand. */
2392 if (op1_def
2393 && is_gimple_assign (op1_def)
2394 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2395 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2396 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2397 {
2398 tree target = gimple_assign_rhs1 (op1_def);
2399
2400 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2401 for one where TARGET appears on the RHS. */
2402 if (follow_assert_exprs)
2403 {
2404 /* Now see if that "other operand" is op0, following the chain
2405 of ASSERT_EXPRs if necessary. */
2406 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2407 while (op0 != target
2408 && gimple_assign_single_p (op0_def)
2409 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2410 {
2411 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2412 if (TREE_CODE (op0) != SSA_NAME)
2413 break;
2414 op0_def = SSA_NAME_DEF_STMT (op0);
2415 }
2416 }
2417
2418 /* If we did not find our target SSA_NAME, then this is not
2419 an overflow test. */
2420 if (op0 != target)
2421 return false;
2422
2423 tree type = TREE_TYPE (op0);
2424 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2425 tree inc = gimple_assign_rhs2 (op1_def);
2426 if (reversed)
2427 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2428 else
2429 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2430 return true;
2431 }
2432 }
2433 return false;
2434 }
2435
2436 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2437 OP1's defining statement to see if it ultimately has the form
2438 OP0 CODE (OP0 PLUS INTEGER_CST)
2439
2440 If so, return TRUE indicating this is an overflow test and store into
2441 *NEW_CST an updated constant that can be used in a narrowed range test.
2442
2443 These statements are left as-is in the IL to facilitate discovery of
2444 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2445 the alternate range representation is often useful within VRP. */
2446
2447 bool
2448 overflow_comparison_p (tree_code code, tree name, tree val,
2449 bool use_equiv_p, tree *new_cst)
2450 {
2451 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2452 return true;
2453 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2454 use_equiv_p, true, new_cst);
2455 }
2456
2457
2458 /* Try to register an edge assertion for SSA name NAME on edge E for
2459 the condition COND contributing to the conditional jump pointed to by BSI.
2460 Invert the condition COND if INVERT is true. */
2461
2462 static void
2463 register_edge_assert_for_2 (tree name, edge e,
2464 enum tree_code cond_code,
2465 tree cond_op0, tree cond_op1, bool invert,
2466 vec<assert_info> &asserts)
2467 {
2468 tree val;
2469 enum tree_code comp_code;
2470
2471 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2472 cond_op0,
2473 cond_op1,
2474 invert, &comp_code, &val))
2475 return;
2476
2477 /* Queue the assert. */
2478 tree x;
2479 if (overflow_comparison_p (comp_code, name, val, false, &x))
2480 {
2481 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2482 ? GT_EXPR : LE_EXPR);
2483 add_assert_info (asserts, name, name, new_code, x);
2484 }
2485 add_assert_info (asserts, name, name, comp_code, val);
2486
2487 /* In the case of NAME <= CST and NAME being defined as
2488 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2489 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2490 This catches range and anti-range tests. */
2491 if ((comp_code == LE_EXPR
2492 || comp_code == GT_EXPR)
2493 && TREE_CODE (val) == INTEGER_CST
2494 && TYPE_UNSIGNED (TREE_TYPE (val)))
2495 {
2496 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2497 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2498
2499 /* Extract CST2 from the (optional) addition. */
2500 if (is_gimple_assign (def_stmt)
2501 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2502 {
2503 name2 = gimple_assign_rhs1 (def_stmt);
2504 cst2 = gimple_assign_rhs2 (def_stmt);
2505 if (TREE_CODE (name2) == SSA_NAME
2506 && TREE_CODE (cst2) == INTEGER_CST)
2507 def_stmt = SSA_NAME_DEF_STMT (name2);
2508 }
2509
2510 /* Extract NAME2 from the (optional) sign-changing cast. */
2511 if (gimple_assign_cast_p (def_stmt))
2512 {
2513 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2514 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2515 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2516 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2517 name3 = gimple_assign_rhs1 (def_stmt);
2518 }
2519
2520 /* If name3 is used later, create an ASSERT_EXPR for it. */
2521 if (name3 != NULL_TREE
2522 && TREE_CODE (name3) == SSA_NAME
2523 && (cst2 == NULL_TREE
2524 || TREE_CODE (cst2) == INTEGER_CST)
2525 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2526 {
2527 tree tmp;
2528
2529 /* Build an expression for the range test. */
2530 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2531 if (cst2 != NULL_TREE)
2532 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2533 add_assert_info (asserts, name3, tmp, comp_code, val);
2534 }
2535
2536 /* If name2 is used later, create an ASSERT_EXPR for it. */
2537 if (name2 != NULL_TREE
2538 && TREE_CODE (name2) == SSA_NAME
2539 && TREE_CODE (cst2) == INTEGER_CST
2540 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2541 {
2542 tree tmp;
2543
2544 /* Build an expression for the range test. */
2545 tmp = name2;
2546 if (TREE_TYPE (name) != TREE_TYPE (name2))
2547 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2548 if (cst2 != NULL_TREE)
2549 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2550 add_assert_info (asserts, name2, tmp, comp_code, val);
2551 }
2552 }
2553
2554 /* In the case of post-in/decrement tests like if (i++) ... and uses
2555 of the in/decremented value on the edge the extra name we want to
2556 assert for is not on the def chain of the name compared. Instead
2557 it is in the set of use stmts.
2558 Similar cases happen for conversions that were simplified through
2559 fold_{sign_changed,widened}_comparison. */
2560 if ((comp_code == NE_EXPR
2561 || comp_code == EQ_EXPR)
2562 && TREE_CODE (val) == INTEGER_CST)
2563 {
2564 imm_use_iterator ui;
2565 gimple *use_stmt;
2566 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2567 {
2568 if (!is_gimple_assign (use_stmt))
2569 continue;
2570
2571 /* Cut off to use-stmts that are dominating the predecessor. */
2572 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2573 continue;
2574
2575 tree name2 = gimple_assign_lhs (use_stmt);
2576 if (TREE_CODE (name2) != SSA_NAME)
2577 continue;
2578
2579 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2580 tree cst;
2581 if (code == PLUS_EXPR
2582 || code == MINUS_EXPR)
2583 {
2584 cst = gimple_assign_rhs2 (use_stmt);
2585 if (TREE_CODE (cst) != INTEGER_CST)
2586 continue;
2587 cst = int_const_binop (code, val, cst);
2588 }
2589 else if (CONVERT_EXPR_CODE_P (code))
2590 {
2591 /* For truncating conversions we cannot record
2592 an inequality. */
2593 if (comp_code == NE_EXPR
2594 && (TYPE_PRECISION (TREE_TYPE (name2))
2595 < TYPE_PRECISION (TREE_TYPE (name))))
2596 continue;
2597 cst = fold_convert (TREE_TYPE (name2), val);
2598 }
2599 else
2600 continue;
2601
2602 if (TREE_OVERFLOW_P (cst))
2603 cst = drop_tree_overflow (cst);
2604 add_assert_info (asserts, name2, name2, comp_code, cst);
2605 }
2606 }
2607
2608 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2609 && TREE_CODE (val) == INTEGER_CST)
2610 {
2611 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2612 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2613 tree val2 = NULL_TREE;
2614 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2615 wide_int mask = wi::zero (prec);
2616 unsigned int nprec = prec;
2617 enum tree_code rhs_code = ERROR_MARK;
2618
2619 if (is_gimple_assign (def_stmt))
2620 rhs_code = gimple_assign_rhs_code (def_stmt);
2621
2622 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2623 assert that A != CST1 -+ CST2. */
2624 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2625 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2626 {
2627 tree op0 = gimple_assign_rhs1 (def_stmt);
2628 tree op1 = gimple_assign_rhs2 (def_stmt);
2629 if (TREE_CODE (op0) == SSA_NAME
2630 && TREE_CODE (op1) == INTEGER_CST)
2631 {
2632 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2633 ? MINUS_EXPR : PLUS_EXPR);
2634 op1 = int_const_binop (reverse_op, val, op1);
2635 if (TREE_OVERFLOW (op1))
2636 op1 = drop_tree_overflow (op1);
2637 add_assert_info (asserts, op0, op0, comp_code, op1);
2638 }
2639 }
2640
2641 /* Add asserts for NAME cmp CST and NAME being defined
2642 as NAME = (int) NAME2. */
2643 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2644 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2645 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2646 && gimple_assign_cast_p (def_stmt))
2647 {
2648 name2 = gimple_assign_rhs1 (def_stmt);
2649 if (CONVERT_EXPR_CODE_P (rhs_code)
2650 && TREE_CODE (name2) == SSA_NAME
2651 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2652 && TYPE_UNSIGNED (TREE_TYPE (name2))
2653 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2654 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2655 || !tree_int_cst_equal (val,
2656 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2657 {
2658 tree tmp, cst;
2659 enum tree_code new_comp_code = comp_code;
2660
2661 cst = fold_convert (TREE_TYPE (name2),
2662 TYPE_MIN_VALUE (TREE_TYPE (val)));
2663 /* Build an expression for the range test. */
2664 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2665 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2666 fold_convert (TREE_TYPE (name2), val));
2667 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2668 {
2669 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2670 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2671 build_int_cst (TREE_TYPE (name2), 1));
2672 }
2673 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2674 }
2675 }
2676
2677 /* Add asserts for NAME cmp CST and NAME being defined as
2678 NAME = NAME2 >> CST2.
2679
2680 Extract CST2 from the right shift. */
2681 if (rhs_code == RSHIFT_EXPR)
2682 {
2683 name2 = gimple_assign_rhs1 (def_stmt);
2684 cst2 = gimple_assign_rhs2 (def_stmt);
2685 if (TREE_CODE (name2) == SSA_NAME
2686 && tree_fits_uhwi_p (cst2)
2687 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2688 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2689 && type_has_mode_precision_p (TREE_TYPE (val)))
2690 {
2691 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2692 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2693 }
2694 }
2695 if (val2 != NULL_TREE
2696 && TREE_CODE (val2) == INTEGER_CST
2697 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2698 TREE_TYPE (val),
2699 val2, cst2), val))
2700 {
2701 enum tree_code new_comp_code = comp_code;
2702 tree tmp, new_val;
2703
2704 tmp = name2;
2705 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2706 {
2707 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2708 {
2709 tree type = build_nonstandard_integer_type (prec, 1);
2710 tmp = build1 (NOP_EXPR, type, name2);
2711 val2 = fold_convert (type, val2);
2712 }
2713 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2714 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2715 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2716 }
2717 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2718 {
2719 wide_int minval
2720 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2721 new_val = val2;
2722 if (minval == wi::to_wide (new_val))
2723 new_val = NULL_TREE;
2724 }
2725 else
2726 {
2727 wide_int maxval
2728 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2729 mask |= wi::to_wide (val2);
2730 if (wi::eq_p (mask, maxval))
2731 new_val = NULL_TREE;
2732 else
2733 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2734 }
2735
2736 if (new_val)
2737 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2738 }
2739
2740 /* If we have a conversion that doesn't change the value of the source
2741 simply register the same assert for it. */
2742 if (CONVERT_EXPR_CODE_P (rhs_code))
2743 {
2744 wide_int rmin, rmax;
2745 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2746 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2747 && TREE_CODE (rhs1) == SSA_NAME
2748 /* Make sure the relation preserves the upper/lower boundary of
2749 the range conservatively. */
2750 && (comp_code == NE_EXPR
2751 || comp_code == EQ_EXPR
2752 || (TYPE_SIGN (TREE_TYPE (name))
2753 == TYPE_SIGN (TREE_TYPE (rhs1)))
2754 || ((comp_code == LE_EXPR
2755 || comp_code == LT_EXPR)
2756 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2757 || ((comp_code == GE_EXPR
2758 || comp_code == GT_EXPR)
2759 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
2760 /* And the conversion does not alter the value we compare
2761 against and all values in rhs1 can be represented in
2762 the converted to type. */
2763 && int_fits_type_p (val, TREE_TYPE (rhs1))
2764 && ((TYPE_PRECISION (TREE_TYPE (name))
2765 > TYPE_PRECISION (TREE_TYPE (rhs1)))
2766 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
2767 && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
2768 && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
2769 add_assert_info (asserts, rhs1, rhs1,
2770 comp_code, fold_convert (TREE_TYPE (rhs1), val));
2771 }
2772
2773 /* Add asserts for NAME cmp CST and NAME being defined as
2774 NAME = NAME2 & CST2.
2775
2776 Extract CST2 from the and.
2777
2778 Also handle
2779 NAME = (unsigned) NAME2;
2780 casts where NAME's type is unsigned and has smaller precision
2781 than NAME2's type as if it was NAME = NAME2 & MASK. */
2782 names[0] = NULL_TREE;
2783 names[1] = NULL_TREE;
2784 cst2 = NULL_TREE;
2785 if (rhs_code == BIT_AND_EXPR
2786 || (CONVERT_EXPR_CODE_P (rhs_code)
2787 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2788 && TYPE_UNSIGNED (TREE_TYPE (val))
2789 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2790 > prec))
2791 {
2792 name2 = gimple_assign_rhs1 (def_stmt);
2793 if (rhs_code == BIT_AND_EXPR)
2794 cst2 = gimple_assign_rhs2 (def_stmt);
2795 else
2796 {
2797 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2798 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2799 }
2800 if (TREE_CODE (name2) == SSA_NAME
2801 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2802 && TREE_CODE (cst2) == INTEGER_CST
2803 && !integer_zerop (cst2)
2804 && (nprec > 1
2805 || TYPE_UNSIGNED (TREE_TYPE (val))))
2806 {
2807 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2808 if (gimple_assign_cast_p (def_stmt2))
2809 {
2810 names[1] = gimple_assign_rhs1 (def_stmt2);
2811 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2812 || TREE_CODE (names[1]) != SSA_NAME
2813 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2814 || (TYPE_PRECISION (TREE_TYPE (name2))
2815 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2816 names[1] = NULL_TREE;
2817 }
2818 names[0] = name2;
2819 }
2820 }
2821 if (names[0] || names[1])
2822 {
2823 wide_int minv, maxv, valv, cst2v;
2824 wide_int tem, sgnbit;
2825 bool valid_p = false, valn, cst2n;
2826 enum tree_code ccode = comp_code;
2827
2828 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2829 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2830 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2831 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2832 /* If CST2 doesn't have most significant bit set,
2833 but VAL is negative, we have comparison like
2834 if ((x & 0x123) > -4) (always true). Just give up. */
2835 if (!cst2n && valn)
2836 ccode = ERROR_MARK;
2837 if (cst2n)
2838 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2839 else
2840 sgnbit = wi::zero (nprec);
2841 minv = valv & cst2v;
2842 switch (ccode)
2843 {
2844 case EQ_EXPR:
2845 /* Minimum unsigned value for equality is VAL & CST2
2846 (should be equal to VAL, otherwise we probably should
2847 have folded the comparison into false) and
2848 maximum unsigned value is VAL | ~CST2. */
2849 maxv = valv | ~cst2v;
2850 valid_p = true;
2851 break;
2852
2853 case NE_EXPR:
2854 tem = valv | ~cst2v;
2855 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2856 if (valv == 0)
2857 {
2858 cst2n = false;
2859 sgnbit = wi::zero (nprec);
2860 goto gt_expr;
2861 }
2862 /* If (VAL | ~CST2) is all ones, handle it as
2863 (X & CST2) < VAL. */
2864 if (tem == -1)
2865 {
2866 cst2n = false;
2867 valn = false;
2868 sgnbit = wi::zero (nprec);
2869 goto lt_expr;
2870 }
2871 if (!cst2n && wi::neg_p (cst2v))
2872 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2873 if (sgnbit != 0)
2874 {
2875 if (valv == sgnbit)
2876 {
2877 cst2n = true;
2878 valn = true;
2879 goto gt_expr;
2880 }
2881 if (tem == wi::mask (nprec - 1, false, nprec))
2882 {
2883 cst2n = true;
2884 goto lt_expr;
2885 }
2886 if (!cst2n)
2887 sgnbit = wi::zero (nprec);
2888 }
2889 break;
2890
2891 case GE_EXPR:
2892 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2893 is VAL and maximum unsigned value is ~0. For signed
2894 comparison, if CST2 doesn't have most significant bit
2895 set, handle it similarly. If CST2 has MSB set,
2896 the minimum is the same, and maximum is ~0U/2. */
2897 if (minv != valv)
2898 {
2899 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2900 VAL. */
2901 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2902 if (minv == valv)
2903 break;
2904 }
2905 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2906 valid_p = true;
2907 break;
2908
2909 case GT_EXPR:
2910 gt_expr:
2911 /* Find out smallest MINV where MINV > VAL
2912 && (MINV & CST2) == MINV, if any. If VAL is signed and
2913 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2914 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2915 if (minv == valv)
2916 break;
2917 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2918 valid_p = true;
2919 break;
2920
2921 case LE_EXPR:
2922 /* Minimum unsigned value for <= is 0 and maximum
2923 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2924 Otherwise, find smallest VAL2 where VAL2 > VAL
2925 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2926 as maximum.
2927 For signed comparison, if CST2 doesn't have most
2928 significant bit set, handle it similarly. If CST2 has
2929 MSB set, the maximum is the same and minimum is INT_MIN. */
2930 if (minv == valv)
2931 maxv = valv;
2932 else
2933 {
2934 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2935 if (maxv == valv)
2936 break;
2937 maxv -= 1;
2938 }
2939 maxv |= ~cst2v;
2940 minv = sgnbit;
2941 valid_p = true;
2942 break;
2943
2944 case LT_EXPR:
2945 lt_expr:
2946 /* Minimum unsigned value for < is 0 and maximum
2947 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2948 Otherwise, find smallest VAL2 where VAL2 > VAL
2949 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2950 as maximum.
2951 For signed comparison, if CST2 doesn't have most
2952 significant bit set, handle it similarly. If CST2 has
2953 MSB set, the maximum is the same and minimum is INT_MIN. */
2954 if (minv == valv)
2955 {
2956 if (valv == sgnbit)
2957 break;
2958 maxv = valv;
2959 }
2960 else
2961 {
2962 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2963 if (maxv == valv)
2964 break;
2965 }
2966 maxv -= 1;
2967 maxv |= ~cst2v;
2968 minv = sgnbit;
2969 valid_p = true;
2970 break;
2971
2972 default:
2973 break;
2974 }
2975 if (valid_p
2976 && (maxv - minv) != -1)
2977 {
2978 tree tmp, new_val, type;
2979 int i;
2980
2981 for (i = 0; i < 2; i++)
2982 if (names[i])
2983 {
2984 wide_int maxv2 = maxv;
2985 tmp = names[i];
2986 type = TREE_TYPE (names[i]);
2987 if (!TYPE_UNSIGNED (type))
2988 {
2989 type = build_nonstandard_integer_type (nprec, 1);
2990 tmp = build1 (NOP_EXPR, type, names[i]);
2991 }
2992 if (minv != 0)
2993 {
2994 tmp = build2 (PLUS_EXPR, type, tmp,
2995 wide_int_to_tree (type, -minv));
2996 maxv2 = maxv - minv;
2997 }
2998 new_val = wide_int_to_tree (type, maxv2);
2999 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3000 }
3001 }
3002 }
3003 }
3004 }
3005
3006 /* OP is an operand of a truth value expression which is known to have
3007 a particular value. Register any asserts for OP and for any
3008 operands in OP's defining statement.
3009
3010 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3011 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3012
3013 static void
3014 register_edge_assert_for_1 (tree op, enum tree_code code,
3015 edge e, vec<assert_info> &asserts)
3016 {
3017 gimple *op_def;
3018 tree val;
3019 enum tree_code rhs_code;
3020
3021 /* We only care about SSA_NAMEs. */
3022 if (TREE_CODE (op) != SSA_NAME)
3023 return;
3024
3025 /* We know that OP will have a zero or nonzero value. */
3026 val = build_int_cst (TREE_TYPE (op), 0);
3027 add_assert_info (asserts, op, op, code, val);
3028
3029 /* Now look at how OP is set. If it's set from a comparison,
3030 a truth operation or some bit operations, then we may be able
3031 to register information about the operands of that assignment. */
3032 op_def = SSA_NAME_DEF_STMT (op);
3033 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3034 return;
3035
3036 rhs_code = gimple_assign_rhs_code (op_def);
3037
3038 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3039 {
3040 bool invert = (code == EQ_EXPR ? true : false);
3041 tree op0 = gimple_assign_rhs1 (op_def);
3042 tree op1 = gimple_assign_rhs2 (op_def);
3043
3044 if (TREE_CODE (op0) == SSA_NAME)
3045 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3046 if (TREE_CODE (op1) == SSA_NAME)
3047 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3048 }
3049 else if ((code == NE_EXPR
3050 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3051 || (code == EQ_EXPR
3052 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3053 {
3054 /* Recurse on each operand. */
3055 tree op0 = gimple_assign_rhs1 (op_def);
3056 tree op1 = gimple_assign_rhs2 (op_def);
3057 if (TREE_CODE (op0) == SSA_NAME
3058 && has_single_use (op0))
3059 register_edge_assert_for_1 (op0, code, e, asserts);
3060 if (TREE_CODE (op1) == SSA_NAME
3061 && has_single_use (op1))
3062 register_edge_assert_for_1 (op1, code, e, asserts);
3063 }
3064 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3065 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3066 {
3067 /* Recurse, flipping CODE. */
3068 code = invert_tree_comparison (code, false);
3069 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3070 }
3071 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3072 {
3073 /* Recurse through the copy. */
3074 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3075 }
3076 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3077 {
3078 /* Recurse through the type conversion, unless it is a narrowing
3079 conversion or conversion from non-integral type. */
3080 tree rhs = gimple_assign_rhs1 (op_def);
3081 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3082 && (TYPE_PRECISION (TREE_TYPE (rhs))
3083 <= TYPE_PRECISION (TREE_TYPE (op))))
3084 register_edge_assert_for_1 (rhs, code, e, asserts);
3085 }
3086 }
3087
3088 /* Check if comparison
3089 NAME COND_OP INTEGER_CST
3090 has a form of
3091 (X & 11...100..0) COND_OP XX...X00...0
3092 Such comparison can yield assertions like
3093 X >= XX...X00...0
3094 X <= XX...X11...1
3095 in case of COND_OP being EQ_EXPR or
3096 X < XX...X00...0
3097 X > XX...X11...1
3098 in case of NE_EXPR. */
3099
3100 static bool
3101 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3102 tree *new_name, tree *low, enum tree_code *low_code,
3103 tree *high, enum tree_code *high_code)
3104 {
3105 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3106
3107 if (!is_gimple_assign (def_stmt)
3108 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3109 return false;
3110
3111 tree t = gimple_assign_rhs1 (def_stmt);
3112 tree maskt = gimple_assign_rhs2 (def_stmt);
3113 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3114 return false;
3115
3116 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3117 wide_int inv_mask = ~mask;
3118 /* Must have been removed by now so don't bother optimizing. */
3119 if (mask == 0 || inv_mask == 0)
3120 return false;
3121
3122 /* Assume VALT is INTEGER_CST. */
3123 wi::tree_to_wide_ref val = wi::to_wide (valt);
3124
3125 if ((inv_mask & (inv_mask + 1)) != 0
3126 || (val & mask) != val)
3127 return false;
3128
3129 bool is_range = cond_code == EQ_EXPR;
3130
3131 tree type = TREE_TYPE (t);
3132 wide_int min = wi::min_value (type),
3133 max = wi::max_value (type);
3134
3135 if (is_range)
3136 {
3137 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3138 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3139 }
3140 else
3141 {
3142 /* We can still generate assertion if one of alternatives
3143 is known to always be false. */
3144 if (val == min)
3145 {
3146 *low_code = (enum tree_code) 0;
3147 *high_code = GT_EXPR;
3148 }
3149 else if ((val | inv_mask) == max)
3150 {
3151 *low_code = LT_EXPR;
3152 *high_code = (enum tree_code) 0;
3153 }
3154 else
3155 return false;
3156 }
3157
3158 *new_name = t;
3159 *low = wide_int_to_tree (type, val);
3160 *high = wide_int_to_tree (type, val | inv_mask);
3161
3162 return true;
3163 }
3164
3165 /* Try to register an edge assertion for SSA name NAME on edge E for
3166 the condition COND contributing to the conditional jump pointed to by
3167 SI. */
3168
3169 void
3170 register_edge_assert_for (tree name, edge e,
3171 enum tree_code cond_code, tree cond_op0,
3172 tree cond_op1, vec<assert_info> &asserts)
3173 {
3174 tree val;
3175 enum tree_code comp_code;
3176 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3177
3178 /* Do not attempt to infer anything in names that flow through
3179 abnormal edges. */
3180 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3181 return;
3182
3183 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3184 cond_op0, cond_op1,
3185 is_else_edge,
3186 &comp_code, &val))
3187 return;
3188
3189 /* Register ASSERT_EXPRs for name. */
3190 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3191 cond_op1, is_else_edge, asserts);
3192
3193
3194 /* If COND is effectively an equality test of an SSA_NAME against
3195 the value zero or one, then we may be able to assert values
3196 for SSA_NAMEs which flow into COND. */
3197
3198 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3199 statement of NAME we can assert both operands of the BIT_AND_EXPR
3200 have nonzero value. */
3201 if (((comp_code == EQ_EXPR && integer_onep (val))
3202 || (comp_code == NE_EXPR && integer_zerop (val))))
3203 {
3204 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3205
3206 if (is_gimple_assign (def_stmt)
3207 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3208 {
3209 tree op0 = gimple_assign_rhs1 (def_stmt);
3210 tree op1 = gimple_assign_rhs2 (def_stmt);
3211 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3212 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3213 }
3214 }
3215
3216 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3217 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3218 have zero value. */
3219 if (((comp_code == EQ_EXPR && integer_zerop (val))
3220 || (comp_code == NE_EXPR && integer_onep (val))))
3221 {
3222 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3223
3224 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3225 necessarily zero value, or if type-precision is one. */
3226 if (is_gimple_assign (def_stmt)
3227 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3228 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3229 || comp_code == EQ_EXPR)))
3230 {
3231 tree op0 = gimple_assign_rhs1 (def_stmt);
3232 tree op1 = gimple_assign_rhs2 (def_stmt);
3233 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3234 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3235 }
3236 }
3237
3238 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3239 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3240 && TREE_CODE (val) == INTEGER_CST)
3241 {
3242 enum tree_code low_code, high_code;
3243 tree low, high;
3244 if (is_masked_range_test (name, val, comp_code, &name, &low,
3245 &low_code, &high, &high_code))
3246 {
3247 if (low_code != ERROR_MARK)
3248 register_edge_assert_for_2 (name, e, low_code, name,
3249 low, /*invert*/false, asserts);
3250 if (high_code != ERROR_MARK)
3251 register_edge_assert_for_2 (name, e, high_code, name,
3252 high, /*invert*/false, asserts);
3253 }
3254 }
3255 }
3256
3257 /* Finish found ASSERTS for E and register them at GSI. */
3258
3259 static void
3260 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3261 vec<assert_info> &asserts)
3262 {
3263 for (unsigned i = 0; i < asserts.length (); ++i)
3264 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3265 reachable from E. */
3266 if (live_on_edge (e, asserts[i].name))
3267 register_new_assert_for (asserts[i].name, asserts[i].expr,
3268 asserts[i].comp_code, asserts[i].val,
3269 NULL, e, gsi);
3270 }
3271
3272
3273
3274 /* Determine whether the outgoing edges of BB should receive an
3275 ASSERT_EXPR for each of the operands of BB's LAST statement.
3276 The last statement of BB must be a COND_EXPR.
3277
3278 If any of the sub-graphs rooted at BB have an interesting use of
3279 the predicate operands, an assert location node is added to the
3280 list of assertions for the corresponding operands. */
3281
3282 static void
3283 find_conditional_asserts (basic_block bb, gcond *last)
3284 {
3285 gimple_stmt_iterator bsi;
3286 tree op;
3287 edge_iterator ei;
3288 edge e;
3289 ssa_op_iter iter;
3290
3291 bsi = gsi_for_stmt (last);
3292
3293 /* Look for uses of the operands in each of the sub-graphs
3294 rooted at BB. We need to check each of the outgoing edges
3295 separately, so that we know what kind of ASSERT_EXPR to
3296 insert. */
3297 FOR_EACH_EDGE (e, ei, bb->succs)
3298 {
3299 if (e->dest == bb)
3300 continue;
3301
3302 /* Register the necessary assertions for each operand in the
3303 conditional predicate. */
3304 auto_vec<assert_info, 8> asserts;
3305 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3306 register_edge_assert_for (op, e,
3307 gimple_cond_code (last),
3308 gimple_cond_lhs (last),
3309 gimple_cond_rhs (last), asserts);
3310 finish_register_edge_assert_for (e, bsi, asserts);
3311 }
3312 }
3313
3314 struct case_info
3315 {
3316 tree expr;
3317 basic_block bb;
3318 };
3319
3320 /* Compare two case labels sorting first by the destination bb index
3321 and then by the case value. */
3322
3323 static int
3324 compare_case_labels (const void *p1, const void *p2)
3325 {
3326 const struct case_info *ci1 = (const struct case_info *) p1;
3327 const struct case_info *ci2 = (const struct case_info *) p2;
3328 int idx1 = ci1->bb->index;
3329 int idx2 = ci2->bb->index;
3330
3331 if (idx1 < idx2)
3332 return -1;
3333 else if (idx1 == idx2)
3334 {
3335 /* Make sure the default label is first in a group. */
3336 if (!CASE_LOW (ci1->expr))
3337 return -1;
3338 else if (!CASE_LOW (ci2->expr))
3339 return 1;
3340 else
3341 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3342 CASE_LOW (ci2->expr));
3343 }
3344 else
3345 return 1;
3346 }
3347
3348 /* Determine whether the outgoing edges of BB should receive an
3349 ASSERT_EXPR for each of the operands of BB's LAST statement.
3350 The last statement of BB must be a SWITCH_EXPR.
3351
3352 If any of the sub-graphs rooted at BB have an interesting use of
3353 the predicate operands, an assert location node is added to the
3354 list of assertions for the corresponding operands. */
3355
3356 static void
3357 find_switch_asserts (basic_block bb, gswitch *last)
3358 {
3359 gimple_stmt_iterator bsi;
3360 tree op;
3361 edge e;
3362 struct case_info *ci;
3363 size_t n = gimple_switch_num_labels (last);
3364 #if GCC_VERSION >= 4000
3365 unsigned int idx;
3366 #else
3367 /* Work around GCC 3.4 bug (PR 37086). */
3368 volatile unsigned int idx;
3369 #endif
3370
3371 bsi = gsi_for_stmt (last);
3372 op = gimple_switch_index (last);
3373 if (TREE_CODE (op) != SSA_NAME)
3374 return;
3375
3376 /* Build a vector of case labels sorted by destination label. */
3377 ci = XNEWVEC (struct case_info, n);
3378 for (idx = 0; idx < n; ++idx)
3379 {
3380 ci[idx].expr = gimple_switch_label (last, idx);
3381 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3382 }
3383 edge default_edge = find_edge (bb, ci[0].bb);
3384 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3385
3386 for (idx = 0; idx < n; ++idx)
3387 {
3388 tree min, max;
3389 tree cl = ci[idx].expr;
3390 basic_block cbb = ci[idx].bb;
3391
3392 min = CASE_LOW (cl);
3393 max = CASE_HIGH (cl);
3394
3395 /* If there are multiple case labels with the same destination
3396 we need to combine them to a single value range for the edge. */
3397 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3398 {
3399 /* Skip labels until the last of the group. */
3400 do {
3401 ++idx;
3402 } while (idx < n && cbb == ci[idx].bb);
3403 --idx;
3404
3405 /* Pick up the maximum of the case label range. */
3406 if (CASE_HIGH (ci[idx].expr))
3407 max = CASE_HIGH (ci[idx].expr);
3408 else
3409 max = CASE_LOW (ci[idx].expr);
3410 }
3411
3412 /* Can't extract a useful assertion out of a range that includes the
3413 default label. */
3414 if (min == NULL_TREE)
3415 continue;
3416
3417 /* Find the edge to register the assert expr on. */
3418 e = find_edge (bb, cbb);
3419
3420 /* Register the necessary assertions for the operand in the
3421 SWITCH_EXPR. */
3422 auto_vec<assert_info, 8> asserts;
3423 register_edge_assert_for (op, e,
3424 max ? GE_EXPR : EQ_EXPR,
3425 op, fold_convert (TREE_TYPE (op), min),
3426 asserts);
3427 if (max)
3428 register_edge_assert_for (op, e, LE_EXPR, op,
3429 fold_convert (TREE_TYPE (op), max),
3430 asserts);
3431 finish_register_edge_assert_for (e, bsi, asserts);
3432 }
3433
3434 XDELETEVEC (ci);
3435
3436 if (!live_on_edge (default_edge, op))
3437 return;
3438
3439 /* Now register along the default label assertions that correspond to the
3440 anti-range of each label. */
3441 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3442 if (insertion_limit == 0)
3443 return;
3444
3445 /* We can't do this if the default case shares a label with another case. */
3446 tree default_cl = gimple_switch_default_label (last);
3447 for (idx = 1; idx < n; idx++)
3448 {
3449 tree min, max;
3450 tree cl = gimple_switch_label (last, idx);
3451 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3452 continue;
3453
3454 min = CASE_LOW (cl);
3455 max = CASE_HIGH (cl);
3456
3457 /* Combine contiguous case ranges to reduce the number of assertions
3458 to insert. */
3459 for (idx = idx + 1; idx < n; idx++)
3460 {
3461 tree next_min, next_max;
3462 tree next_cl = gimple_switch_label (last, idx);
3463 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3464 break;
3465
3466 next_min = CASE_LOW (next_cl);
3467 next_max = CASE_HIGH (next_cl);
3468
3469 wide_int difference = (wi::to_wide (next_min)
3470 - wi::to_wide (max ? max : min));
3471 if (wi::eq_p (difference, 1))
3472 max = next_max ? next_max : next_min;
3473 else
3474 break;
3475 }
3476 idx--;
3477
3478 if (max == NULL_TREE)
3479 {
3480 /* Register the assertion OP != MIN. */
3481 auto_vec<assert_info, 8> asserts;
3482 min = fold_convert (TREE_TYPE (op), min);
3483 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3484 asserts);
3485 finish_register_edge_assert_for (default_edge, bsi, asserts);
3486 }
3487 else
3488 {
3489 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3490 which will give OP the anti-range ~[MIN,MAX]. */
3491 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3492 min = fold_convert (TREE_TYPE (uop), min);
3493 max = fold_convert (TREE_TYPE (uop), max);
3494
3495 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3496 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3497 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3498 NULL, default_edge, bsi);
3499 }
3500
3501 if (--insertion_limit == 0)
3502 break;
3503 }
3504 }
3505
3506
3507 /* Traverse all the statements in block BB looking for statements that
3508 may generate useful assertions for the SSA names in their operand.
3509 If a statement produces a useful assertion A for name N_i, then the
3510 list of assertions already generated for N_i is scanned to
3511 determine if A is actually needed.
3512
3513 If N_i already had the assertion A at a location dominating the
3514 current location, then nothing needs to be done. Otherwise, the
3515 new location for A is recorded instead.
3516
3517 1- For every statement S in BB, all the variables used by S are
3518 added to bitmap FOUND_IN_SUBGRAPH.
3519
3520 2- If statement S uses an operand N in a way that exposes a known
3521 value range for N, then if N was not already generated by an
3522 ASSERT_EXPR, create a new assert location for N. For instance,
3523 if N is a pointer and the statement dereferences it, we can
3524 assume that N is not NULL.
3525
3526 3- COND_EXPRs are a special case of #2. We can derive range
3527 information from the predicate but need to insert different
3528 ASSERT_EXPRs for each of the sub-graphs rooted at the
3529 conditional block. If the last statement of BB is a conditional
3530 expression of the form 'X op Y', then
3531
3532 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3533
3534 b) If the conditional is the only entry point to the sub-graph
3535 corresponding to the THEN_CLAUSE, recurse into it. On
3536 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3537 an ASSERT_EXPR is added for the corresponding variable.
3538
3539 c) Repeat step (b) on the ELSE_CLAUSE.
3540
3541 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3542
3543 For instance,
3544
3545 if (a == 9)
3546 b = a;
3547 else
3548 b = c + 1;
3549
3550 In this case, an assertion on the THEN clause is useful to
3551 determine that 'a' is always 9 on that edge. However, an assertion
3552 on the ELSE clause would be unnecessary.
3553
3554 4- If BB does not end in a conditional expression, then we recurse
3555 into BB's dominator children.
3556
3557 At the end of the recursive traversal, every SSA name will have a
3558 list of locations where ASSERT_EXPRs should be added. When a new
3559 location for name N is found, it is registered by calling
3560 register_new_assert_for. That function keeps track of all the
3561 registered assertions to prevent adding unnecessary assertions.
3562 For instance, if a pointer P_4 is dereferenced more than once in a
3563 dominator tree, only the location dominating all the dereference of
3564 P_4 will receive an ASSERT_EXPR. */
3565
3566 static void
3567 find_assert_locations_1 (basic_block bb, sbitmap live)
3568 {
3569 gimple *last;
3570
3571 last = last_stmt (bb);
3572
3573 /* If BB's last statement is a conditional statement involving integer
3574 operands, determine if we need to add ASSERT_EXPRs. */
3575 if (last
3576 && gimple_code (last) == GIMPLE_COND
3577 && !fp_predicate (last)
3578 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3579 find_conditional_asserts (bb, as_a <gcond *> (last));
3580
3581 /* If BB's last statement is a switch statement involving integer
3582 operands, determine if we need to add ASSERT_EXPRs. */
3583 if (last
3584 && gimple_code (last) == GIMPLE_SWITCH
3585 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3586 find_switch_asserts (bb, as_a <gswitch *> (last));
3587
3588 /* Traverse all the statements in BB marking used names and looking
3589 for statements that may infer assertions for their used operands. */
3590 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3591 gsi_prev (&si))
3592 {
3593 gimple *stmt;
3594 tree op;
3595 ssa_op_iter i;
3596
3597 stmt = gsi_stmt (si);
3598
3599 if (is_gimple_debug (stmt))
3600 continue;
3601
3602 /* See if we can derive an assertion for any of STMT's operands. */
3603 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3604 {
3605 tree value;
3606 enum tree_code comp_code;
3607
3608 /* If op is not live beyond this stmt, do not bother to insert
3609 asserts for it. */
3610 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3611 continue;
3612
3613 /* If OP is used in such a way that we can infer a value
3614 range for it, and we don't find a previous assertion for
3615 it, create a new assertion location node for OP. */
3616 if (infer_value_range (stmt, op, &comp_code, &value))
3617 {
3618 /* If we are able to infer a nonzero value range for OP,
3619 then walk backwards through the use-def chain to see if OP
3620 was set via a typecast.
3621
3622 If so, then we can also infer a nonzero value range
3623 for the operand of the NOP_EXPR. */
3624 if (comp_code == NE_EXPR && integer_zerop (value))
3625 {
3626 tree t = op;
3627 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3628
3629 while (is_gimple_assign (def_stmt)
3630 && CONVERT_EXPR_CODE_P
3631 (gimple_assign_rhs_code (def_stmt))
3632 && TREE_CODE
3633 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3634 && POINTER_TYPE_P
3635 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3636 {
3637 t = gimple_assign_rhs1 (def_stmt);
3638 def_stmt = SSA_NAME_DEF_STMT (t);
3639
3640 /* Note we want to register the assert for the
3641 operand of the NOP_EXPR after SI, not after the
3642 conversion. */
3643 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3644 register_new_assert_for (t, t, comp_code, value,
3645 bb, NULL, si);
3646 }
3647 }
3648
3649 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3650 }
3651 }
3652
3653 /* Update live. */
3654 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3655 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3656 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3657 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3658 }
3659
3660 /* Traverse all PHI nodes in BB, updating live. */
3661 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3662 gsi_next (&si))
3663 {
3664 use_operand_p arg_p;
3665 ssa_op_iter i;
3666 gphi *phi = si.phi ();
3667 tree res = gimple_phi_result (phi);
3668
3669 if (virtual_operand_p (res))
3670 continue;
3671
3672 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3673 {
3674 tree arg = USE_FROM_PTR (arg_p);
3675 if (TREE_CODE (arg) == SSA_NAME)
3676 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3677 }
3678
3679 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3680 }
3681 }
3682
3683 /* Do an RPO walk over the function computing SSA name liveness
3684 on-the-fly and deciding on assert expressions to insert. */
3685
3686 static void
3687 find_assert_locations (void)
3688 {
3689 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3690 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3691 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3692 int rpo_cnt, i;
3693
3694 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3695 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3696 for (i = 0; i < rpo_cnt; ++i)
3697 bb_rpo[rpo[i]] = i;
3698
3699 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3700 the order we compute liveness and insert asserts we otherwise
3701 fail to insert asserts into the loop latch. */
3702 loop_p loop;
3703 FOR_EACH_LOOP (loop, 0)
3704 {
3705 i = loop->latch->index;
3706 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3707 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3708 !gsi_end_p (gsi); gsi_next (&gsi))
3709 {
3710 gphi *phi = gsi.phi ();
3711 if (virtual_operand_p (gimple_phi_result (phi)))
3712 continue;
3713 tree arg = gimple_phi_arg_def (phi, j);
3714 if (TREE_CODE (arg) == SSA_NAME)
3715 {
3716 if (live[i] == NULL)
3717 {
3718 live[i] = sbitmap_alloc (num_ssa_names);
3719 bitmap_clear (live[i]);
3720 }
3721 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3722 }
3723 }
3724 }
3725
3726 for (i = rpo_cnt - 1; i >= 0; --i)
3727 {
3728 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3729 edge e;
3730 edge_iterator ei;
3731
3732 if (!live[rpo[i]])
3733 {
3734 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3735 bitmap_clear (live[rpo[i]]);
3736 }
3737
3738 /* Process BB and update the live information with uses in
3739 this block. */
3740 find_assert_locations_1 (bb, live[rpo[i]]);
3741
3742 /* Merge liveness into the predecessor blocks and free it. */
3743 if (!bitmap_empty_p (live[rpo[i]]))
3744 {
3745 int pred_rpo = i;
3746 FOR_EACH_EDGE (e, ei, bb->preds)
3747 {
3748 int pred = e->src->index;
3749 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3750 continue;
3751
3752 if (!live[pred])
3753 {
3754 live[pred] = sbitmap_alloc (num_ssa_names);
3755 bitmap_clear (live[pred]);
3756 }
3757 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3758
3759 if (bb_rpo[pred] < pred_rpo)
3760 pred_rpo = bb_rpo[pred];
3761 }
3762
3763 /* Record the RPO number of the last visited block that needs
3764 live information from this block. */
3765 last_rpo[rpo[i]] = pred_rpo;
3766 }
3767 else
3768 {
3769 sbitmap_free (live[rpo[i]]);
3770 live[rpo[i]] = NULL;
3771 }
3772
3773 /* We can free all successors live bitmaps if all their
3774 predecessors have been visited already. */
3775 FOR_EACH_EDGE (e, ei, bb->succs)
3776 if (last_rpo[e->dest->index] == i
3777 && live[e->dest->index])
3778 {
3779 sbitmap_free (live[e->dest->index]);
3780 live[e->dest->index] = NULL;
3781 }
3782 }
3783
3784 XDELETEVEC (rpo);
3785 XDELETEVEC (bb_rpo);
3786 XDELETEVEC (last_rpo);
3787 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3788 if (live[i])
3789 sbitmap_free (live[i]);
3790 XDELETEVEC (live);
3791 }
3792
3793 /* Create an ASSERT_EXPR for NAME and insert it in the location
3794 indicated by LOC. Return true if we made any edge insertions. */
3795
3796 static bool
3797 process_assert_insertions_for (tree name, assert_locus *loc)
3798 {
3799 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3800 gimple *stmt;
3801 tree cond;
3802 gimple *assert_stmt;
3803 edge_iterator ei;
3804 edge e;
3805
3806 /* If we have X <=> X do not insert an assert expr for that. */
3807 if (loc->expr == loc->val)
3808 return false;
3809
3810 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3811 assert_stmt = build_assert_expr_for (cond, name);
3812 if (loc->e)
3813 {
3814 /* We have been asked to insert the assertion on an edge. This
3815 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3816 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3817 || (gimple_code (gsi_stmt (loc->si))
3818 == GIMPLE_SWITCH));
3819
3820 gsi_insert_on_edge (loc->e, assert_stmt);
3821 return true;
3822 }
3823
3824 /* If the stmt iterator points at the end then this is an insertion
3825 at the beginning of a block. */
3826 if (gsi_end_p (loc->si))
3827 {
3828 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3829 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3830 return false;
3831
3832 }
3833 /* Otherwise, we can insert right after LOC->SI iff the
3834 statement must not be the last statement in the block. */
3835 stmt = gsi_stmt (loc->si);
3836 if (!stmt_ends_bb_p (stmt))
3837 {
3838 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3839 return false;
3840 }
3841
3842 /* If STMT must be the last statement in BB, we can only insert new
3843 assertions on the non-abnormal edge out of BB. Note that since
3844 STMT is not control flow, there may only be one non-abnormal/eh edge
3845 out of BB. */
3846 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3847 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3848 {
3849 gsi_insert_on_edge (e, assert_stmt);
3850 return true;
3851 }
3852
3853 gcc_unreachable ();
3854 }
3855
3856 /* Qsort helper for sorting assert locations. If stable is true, don't
3857 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3858 on the other side some pointers might be NULL. */
3859
3860 template <bool stable>
3861 static int
3862 compare_assert_loc (const void *pa, const void *pb)
3863 {
3864 assert_locus * const a = *(assert_locus * const *)pa;
3865 assert_locus * const b = *(assert_locus * const *)pb;
3866
3867 /* If stable, some asserts might be optimized away already, sort
3868 them last. */
3869 if (stable)
3870 {
3871 if (a == NULL)
3872 return b != NULL;
3873 else if (b == NULL)
3874 return -1;
3875 }
3876
3877 if (a->e == NULL && b->e != NULL)
3878 return 1;
3879 else if (a->e != NULL && b->e == NULL)
3880 return -1;
3881
3882 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3883 no need to test both a->e and b->e. */
3884
3885 /* Sort after destination index. */
3886 if (a->e == NULL)
3887 ;
3888 else if (a->e->dest->index > b->e->dest->index)
3889 return 1;
3890 else if (a->e->dest->index < b->e->dest->index)
3891 return -1;
3892
3893 /* Sort after comp_code. */
3894 if (a->comp_code > b->comp_code)
3895 return 1;
3896 else if (a->comp_code < b->comp_code)
3897 return -1;
3898
3899 hashval_t ha, hb;
3900
3901 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3902 uses DECL_UID of the VAR_DECL, so sorting might differ between
3903 -g and -g0. When doing the removal of redundant assert exprs
3904 and commonization to successors, this does not matter, but for
3905 the final sort needs to be stable. */
3906 if (stable)
3907 {
3908 ha = 0;
3909 hb = 0;
3910 }
3911 else
3912 {
3913 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3914 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3915 }
3916
3917 /* Break the tie using hashing and source/bb index. */
3918 if (ha == hb)
3919 return (a->e != NULL
3920 ? a->e->src->index - b->e->src->index
3921 : a->bb->index - b->bb->index);
3922 return ha > hb ? 1 : -1;
3923 }
3924
3925 /* Process all the insertions registered for every name N_i registered
3926 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3927 found in ASSERTS_FOR[i]. */
3928
3929 static void
3930 process_assert_insertions (void)
3931 {
3932 unsigned i;
3933 bitmap_iterator bi;
3934 bool update_edges_p = false;
3935 int num_asserts = 0;
3936
3937 if (dump_file && (dump_flags & TDF_DETAILS))
3938 dump_all_asserts (dump_file);
3939
3940 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3941 {
3942 assert_locus *loc = asserts_for[i];
3943 gcc_assert (loc);
3944
3945 auto_vec<assert_locus *, 16> asserts;
3946 for (; loc; loc = loc->next)
3947 asserts.safe_push (loc);
3948 asserts.qsort (compare_assert_loc<false>);
3949
3950 /* Push down common asserts to successors and remove redundant ones. */
3951 unsigned ecnt = 0;
3952 assert_locus *common = NULL;
3953 unsigned commonj = 0;
3954 for (unsigned j = 0; j < asserts.length (); ++j)
3955 {
3956 loc = asserts[j];
3957 if (! loc->e)
3958 common = NULL;
3959 else if (! common
3960 || loc->e->dest != common->e->dest
3961 || loc->comp_code != common->comp_code
3962 || ! operand_equal_p (loc->val, common->val, 0)
3963 || ! operand_equal_p (loc->expr, common->expr, 0))
3964 {
3965 commonj = j;
3966 common = loc;
3967 ecnt = 1;
3968 }
3969 else if (loc->e == asserts[j-1]->e)
3970 {
3971 /* Remove duplicate asserts. */
3972 if (commonj == j - 1)
3973 {
3974 commonj = j;
3975 common = loc;
3976 }
3977 free (asserts[j-1]);
3978 asserts[j-1] = NULL;
3979 }
3980 else
3981 {
3982 ecnt++;
3983 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3984 {
3985 /* We have the same assertion on all incoming edges of a BB.
3986 Insert it at the beginning of that block. */
3987 loc->bb = loc->e->dest;
3988 loc->e = NULL;
3989 loc->si = gsi_none ();
3990 common = NULL;
3991 /* Clear asserts commoned. */
3992 for (; commonj != j; ++commonj)
3993 if (asserts[commonj])
3994 {
3995 free (asserts[commonj]);
3996 asserts[commonj] = NULL;
3997 }
3998 }
3999 }
4000 }
4001
4002 /* The asserts vector sorting above might be unstable for
4003 -fcompare-debug, sort again to ensure a stable sort. */
4004 asserts.qsort (compare_assert_loc<true>);
4005 for (unsigned j = 0; j < asserts.length (); ++j)
4006 {
4007 loc = asserts[j];
4008 if (! loc)
4009 break;
4010 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4011 num_asserts++;
4012 free (loc);
4013 }
4014 }
4015
4016 if (update_edges_p)
4017 gsi_commit_edge_inserts ();
4018
4019 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4020 num_asserts);
4021 }
4022
4023
4024 /* Traverse the flowgraph looking for conditional jumps to insert range
4025 expressions. These range expressions are meant to provide information
4026 to optimizations that need to reason in terms of value ranges. They
4027 will not be expanded into RTL. For instance, given:
4028
4029 x = ...
4030 y = ...
4031 if (x < y)
4032 y = x - 2;
4033 else
4034 x = y + 3;
4035
4036 this pass will transform the code into:
4037
4038 x = ...
4039 y = ...
4040 if (x < y)
4041 {
4042 x = ASSERT_EXPR <x, x < y>
4043 y = x - 2
4044 }
4045 else
4046 {
4047 y = ASSERT_EXPR <y, x >= y>
4048 x = y + 3
4049 }
4050
4051 The idea is that once copy and constant propagation have run, other
4052 optimizations will be able to determine what ranges of values can 'x'
4053 take in different paths of the code, simply by checking the reaching
4054 definition of 'x'. */
4055
4056 static void
4057 insert_range_assertions (void)
4058 {
4059 need_assert_for = BITMAP_ALLOC (NULL);
4060 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4061
4062 calculate_dominance_info (CDI_DOMINATORS);
4063
4064 find_assert_locations ();
4065 if (!bitmap_empty_p (need_assert_for))
4066 {
4067 process_assert_insertions ();
4068 update_ssa (TODO_update_ssa_no_phi);
4069 }
4070
4071 if (dump_file && (dump_flags & TDF_DETAILS))
4072 {
4073 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4074 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4075 }
4076
4077 free (asserts_for);
4078 BITMAP_FREE (need_assert_for);
4079 }
4080
4081 class vrp_prop : public ssa_propagation_engine
4082 {
4083 public:
4084 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4085 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4086
4087 void vrp_initialize (void);
4088 void vrp_finalize (bool);
4089 void check_all_array_refs (void);
4090 bool check_array_ref (location_t, tree, bool);
4091 bool check_mem_ref (location_t, tree, bool);
4092 void search_for_addr_array (tree, location_t);
4093
4094 class vr_values vr_values;
4095 /* Temporary delegator to minimize code churn. */
4096 const value_range *get_value_range (const_tree op)
4097 { return vr_values.get_value_range (op); }
4098 void set_def_to_varying (const_tree def)
4099 { vr_values.set_def_to_varying (def); }
4100 void set_defs_to_varying (gimple *stmt)
4101 { vr_values.set_defs_to_varying (stmt); }
4102 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4103 tree *output_p, value_range *vr)
4104 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4105 bool update_value_range (const_tree op, value_range *vr)
4106 { return vr_values.update_value_range (op, vr); }
4107 void extract_range_basic (value_range *vr, gimple *stmt)
4108 { vr_values.extract_range_basic (vr, stmt); }
4109 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4110 { vr_values.extract_range_from_phi_node (phi, vr); }
4111 };
4112 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4113 and "struct" hacks. If VRP can determine that the
4114 array subscript is a constant, check if it is outside valid
4115 range. If the array subscript is a RANGE, warn if it is
4116 non-overlapping with valid range.
4117 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.
4118 Returns true if a warning has been issued. */
4119
4120 bool
4121 vrp_prop::check_array_ref (location_t location, tree ref,
4122 bool ignore_off_by_one)
4123 {
4124 const value_range *vr = NULL;
4125 tree low_sub, up_sub;
4126 tree low_bound, up_bound, up_bound_p1;
4127
4128 if (TREE_NO_WARNING (ref))
4129 return false;
4130
4131 low_sub = up_sub = TREE_OPERAND (ref, 1);
4132 up_bound = array_ref_up_bound (ref);
4133
4134 if (!up_bound
4135 || TREE_CODE (up_bound) != INTEGER_CST
4136 || (warn_array_bounds < 2
4137 && array_at_struct_end_p (ref)))
4138 {
4139 /* Accesses to trailing arrays via pointers may access storage
4140 beyond the types array bounds. For such arrays, or for flexible
4141 array members, as well as for other arrays of an unknown size,
4142 replace the upper bound with a more permissive one that assumes
4143 the size of the largest object is PTRDIFF_MAX. */
4144 tree eltsize = array_ref_element_size (ref);
4145
4146 if (TREE_CODE (eltsize) != INTEGER_CST
4147 || integer_zerop (eltsize))
4148 {
4149 up_bound = NULL_TREE;
4150 up_bound_p1 = NULL_TREE;
4151 }
4152 else
4153 {
4154 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4155 tree arg = TREE_OPERAND (ref, 0);
4156 poly_int64 off;
4157
4158 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4159 maxbound = wide_int_to_tree (sizetype,
4160 wi::sub (wi::to_wide (maxbound),
4161 off));
4162 else
4163 maxbound = fold_convert (sizetype, maxbound);
4164
4165 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4166
4167 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4168 build_int_cst (ptrdiff_type_node, 1));
4169 }
4170 }
4171 else
4172 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4173 build_int_cst (TREE_TYPE (up_bound), 1));
4174
4175 low_bound = array_ref_low_bound (ref);
4176
4177 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4178
4179 bool warned = false;
4180
4181 /* Empty array. */
4182 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4183 warned = warning_at (location, OPT_Warray_bounds,
4184 "array subscript %E is above array bounds of %qT",
4185 low_bound, artype);
4186
4187 if (TREE_CODE (low_sub) == SSA_NAME)
4188 {
4189 vr = get_value_range (low_sub);
4190 if (!vr->undefined_p () && !vr->varying_p ())
4191 {
4192 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
4193 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
4194 }
4195 }
4196
4197 if (vr && vr->kind () == VR_ANTI_RANGE)
4198 {
4199 if (up_bound
4200 && TREE_CODE (up_sub) == INTEGER_CST
4201 && (ignore_off_by_one
4202 ? tree_int_cst_lt (up_bound, up_sub)
4203 : tree_int_cst_le (up_bound, up_sub))
4204 && TREE_CODE (low_sub) == INTEGER_CST
4205 && tree_int_cst_le (low_sub, low_bound))
4206 warned = warning_at (location, OPT_Warray_bounds,
4207 "array subscript [%E, %E] is outside "
4208 "array bounds of %qT",
4209 low_sub, up_sub, artype);
4210 }
4211 else if (up_bound
4212 && TREE_CODE (up_sub) == INTEGER_CST
4213 && (ignore_off_by_one
4214 ? !tree_int_cst_le (up_sub, up_bound_p1)
4215 : !tree_int_cst_le (up_sub, up_bound)))
4216 {
4217 if (dump_file && (dump_flags & TDF_DETAILS))
4218 {
4219 fprintf (dump_file, "Array bound warning for ");
4220 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4221 fprintf (dump_file, "\n");
4222 }
4223 warned = warning_at (location, OPT_Warray_bounds,
4224 "array subscript %E is above array bounds of %qT",
4225 up_sub, artype);
4226 }
4227 else if (TREE_CODE (low_sub) == INTEGER_CST
4228 && tree_int_cst_lt (low_sub, low_bound))
4229 {
4230 if (dump_file && (dump_flags & TDF_DETAILS))
4231 {
4232 fprintf (dump_file, "Array bound warning for ");
4233 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4234 fprintf (dump_file, "\n");
4235 }
4236 warned = warning_at (location, OPT_Warray_bounds,
4237 "array subscript %E is below array bounds of %qT",
4238 low_sub, artype);
4239 }
4240
4241 if (warned)
4242 {
4243 ref = TREE_OPERAND (ref, 0);
4244 if (TREE_CODE (ref) == COMPONENT_REF)
4245 ref = TREE_OPERAND (ref, 1);
4246
4247 if (DECL_P (ref))
4248 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4249
4250 TREE_NO_WARNING (ref) = 1;
4251 }
4252
4253 return warned;
4254 }
4255
4256 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4257 references to string constants. If VRP can determine that the array
4258 subscript is a constant, check if it is outside valid range.
4259 If the array subscript is a RANGE, warn if it is non-overlapping
4260 with valid range.
4261 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4262 (used to allow one-past-the-end indices for code that takes
4263 the address of the just-past-the-end element of an array).
4264 Returns true if a warning has been issued. */
4265
4266 bool
4267 vrp_prop::check_mem_ref (location_t location, tree ref,
4268 bool ignore_off_by_one)
4269 {
4270 if (TREE_NO_WARNING (ref))
4271 return false;
4272
4273 tree arg = TREE_OPERAND (ref, 0);
4274 /* The constant and variable offset of the reference. */
4275 tree cstoff = TREE_OPERAND (ref, 1);
4276 tree varoff = NULL_TREE;
4277
4278 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4279
4280 /* The array or string constant bounds in bytes. Initially set
4281 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4282 determined. */
4283 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4284
4285 /* The minimum and maximum intermediate offset. For a reference
4286 to be valid, not only does the final offset/subscript must be
4287 in bounds but all intermediate offsets should be as well.
4288 GCC may be able to deal gracefully with such out-of-bounds
4289 offsets so the checking is only enbaled at -Warray-bounds=2
4290 where it may help detect bugs in uses of the intermediate
4291 offsets that could otherwise not be detectable. */
4292 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4293 offset_int extrema[2] = { 0, wi::abs (ioff) };
4294
4295 /* The range of the byte offset into the reference. */
4296 offset_int offrange[2] = { 0, 0 };
4297
4298 const value_range *vr = NULL;
4299
4300 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4301 The loop computes the range of the final offset for expressions such
4302 as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
4303 some range. */
4304 const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
4305 for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
4306 {
4307 gimple *def = SSA_NAME_DEF_STMT (arg);
4308 if (!is_gimple_assign (def))
4309 break;
4310
4311 tree_code code = gimple_assign_rhs_code (def);
4312 if (code == POINTER_PLUS_EXPR)
4313 {
4314 arg = gimple_assign_rhs1 (def);
4315 varoff = gimple_assign_rhs2 (def);
4316 }
4317 else if (code == ASSERT_EXPR)
4318 {
4319 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4320 continue;
4321 }
4322 else
4323 return false;
4324
4325 /* VAROFF should always be a SSA_NAME here (and not even
4326 INTEGER_CST) but there's no point in taking chances. */
4327 if (TREE_CODE (varoff) != SSA_NAME)
4328 break;
4329
4330 vr = get_value_range (varoff);
4331 if (!vr || vr->undefined_p () || vr->varying_p ())
4332 break;
4333
4334 if (!vr->constant_p ())
4335 break;
4336
4337 if (vr->kind () == VR_RANGE)
4338 {
4339 offset_int min
4340 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
4341 offset_int max
4342 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
4343 if (min < max)
4344 {
4345 offrange[0] += min;
4346 offrange[1] += max;
4347 }
4348 else
4349 {
4350 /* When MIN >= MAX, the offset is effectively in a union
4351 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
4352 Since there is no way to represent such a range across
4353 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4354 to OFFRANGE. */
4355 offrange[0] += arrbounds[0];
4356 offrange[1] += arrbounds[1];
4357 }
4358 }
4359 else
4360 {
4361 /* For an anti-range, analogously to the above, conservatively
4362 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4363 offrange[0] += arrbounds[0];
4364 offrange[1] += arrbounds[1];
4365 }
4366
4367 /* Keep track of the minimum and maximum offset. */
4368 if (offrange[1] < 0 && offrange[1] < extrema[0])
4369 extrema[0] = offrange[1];
4370 if (offrange[0] > 0 && offrange[0] > extrema[1])
4371 extrema[1] = offrange[0];
4372
4373 if (offrange[0] < arrbounds[0])
4374 offrange[0] = arrbounds[0];
4375
4376 if (offrange[1] > arrbounds[1])
4377 offrange[1] = arrbounds[1];
4378 }
4379
4380 if (TREE_CODE (arg) == ADDR_EXPR)
4381 {
4382 arg = TREE_OPERAND (arg, 0);
4383 if (TREE_CODE (arg) != STRING_CST
4384 && TREE_CODE (arg) != VAR_DECL)
4385 return false;
4386 }
4387 else
4388 return false;
4389
4390 /* The type of the object being referred to. It can be an array,
4391 string literal, or a non-array type when the MEM_REF represents
4392 a reference/subscript via a pointer to an object that is not
4393 an element of an array. References to members of structs and
4394 unions are excluded because MEM_REF doesn't make it possible
4395 to identify the member where the reference originated.
4396 Incomplete types are excluded as well because their size is
4397 not known. */
4398 tree reftype = TREE_TYPE (arg);
4399 if (POINTER_TYPE_P (reftype)
4400 || !COMPLETE_TYPE_P (reftype)
4401 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4402 || RECORD_OR_UNION_TYPE_P (reftype))
4403 return false;
4404
4405 arrbounds[0] = 0;
4406
4407 offset_int eltsize;
4408 if (TREE_CODE (reftype) == ARRAY_TYPE)
4409 {
4410 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4411 if (tree dom = TYPE_DOMAIN (reftype))
4412 {
4413 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4414 if (array_at_struct_end_p (arg) || !bnds[0] || !bnds[1])
4415 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4416 else
4417 arrbounds[1] = (wi::to_offset (bnds[1]) - wi::to_offset (bnds[0])
4418 + 1) * eltsize;
4419 }
4420 else
4421 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4422
4423 if (TREE_CODE (ref) == MEM_REF)
4424 {
4425 /* For MEM_REF determine a tighter bound of the non-array
4426 element type. */
4427 tree eltype = TREE_TYPE (reftype);
4428 while (TREE_CODE (eltype) == ARRAY_TYPE)
4429 eltype = TREE_TYPE (eltype);
4430 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4431 }
4432 }
4433 else
4434 {
4435 eltsize = 1;
4436 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4437 }
4438
4439 offrange[0] += ioff;
4440 offrange[1] += ioff;
4441
4442 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4443 is set (when taking the address of the one-past-last element
4444 of an array) but always use the stricter bound in diagnostics. */
4445 offset_int ubound = arrbounds[1];
4446 if (ignore_off_by_one)
4447 ubound += 1;
4448
4449 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4450 {
4451 /* Treat a reference to a non-array object as one to an array
4452 of a single element. */
4453 if (TREE_CODE (reftype) != ARRAY_TYPE)
4454 reftype = build_array_type_nelts (reftype, 1);
4455
4456 if (TREE_CODE (ref) == MEM_REF)
4457 {
4458 /* Extract the element type out of MEM_REF and use its size
4459 to compute the index to print in the diagnostic; arrays
4460 in MEM_REF don't mean anything. A type with no size like
4461 void is as good as having a size of 1. */
4462 tree type = TREE_TYPE (ref);
4463 while (TREE_CODE (type) == ARRAY_TYPE)
4464 type = TREE_TYPE (type);
4465 if (tree size = TYPE_SIZE_UNIT (type))
4466 {
4467 offrange[0] = offrange[0] / wi::to_offset (size);
4468 offrange[1] = offrange[1] / wi::to_offset (size);
4469 }
4470 }
4471 else
4472 {
4473 /* For anything other than MEM_REF, compute the index to
4474 print in the diagnostic as the offset over element size. */
4475 offrange[0] = offrange[0] / eltsize;
4476 offrange[1] = offrange[1] / eltsize;
4477 }
4478
4479 bool warned;
4480 if (offrange[0] == offrange[1])
4481 warned = warning_at (location, OPT_Warray_bounds,
4482 "array subscript %wi is outside array bounds "
4483 "of %qT",
4484 offrange[0].to_shwi (), reftype);
4485 else
4486 warned = warning_at (location, OPT_Warray_bounds,
4487 "array subscript [%wi, %wi] is outside "
4488 "array bounds of %qT",
4489 offrange[0].to_shwi (),
4490 offrange[1].to_shwi (), reftype);
4491 if (warned && DECL_P (arg))
4492 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4493
4494 if (warned)
4495 TREE_NO_WARNING (ref) = 1;
4496 return warned;
4497 }
4498
4499 if (warn_array_bounds < 2)
4500 return false;
4501
4502 /* At level 2 check also intermediate offsets. */
4503 int i = 0;
4504 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4505 {
4506 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4507
4508 if (warning_at (location, OPT_Warray_bounds,
4509 "intermediate array offset %wi is outside array bounds "
4510 "of %qT", tmpidx, reftype))
4511 {
4512 TREE_NO_WARNING (ref) = 1;
4513 return true;
4514 }
4515 }
4516
4517 return false;
4518 }
4519
4520 /* Searches if the expr T, located at LOCATION computes
4521 address of an ARRAY_REF, and call check_array_ref on it. */
4522
4523 void
4524 vrp_prop::search_for_addr_array (tree t, location_t location)
4525 {
4526 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4527 do
4528 {
4529 bool warned = false;
4530 if (TREE_CODE (t) == ARRAY_REF)
4531 warned = check_array_ref (location, t, true /*ignore_off_by_one*/);
4532 else if (TREE_CODE (t) == MEM_REF)
4533 warned = check_mem_ref (location, t, true /*ignore_off_by_one*/);
4534
4535 if (warned)
4536 TREE_NO_WARNING (t) = true;
4537
4538 t = TREE_OPERAND (t, 0);
4539 }
4540 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4541
4542 if (TREE_CODE (t) != MEM_REF
4543 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4544 || TREE_NO_WARNING (t))
4545 return;
4546
4547 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4548 tree low_bound, up_bound, el_sz;
4549 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4550 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4551 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4552 return;
4553
4554 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4555 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4556 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4557 if (!low_bound
4558 || TREE_CODE (low_bound) != INTEGER_CST
4559 || !up_bound
4560 || TREE_CODE (up_bound) != INTEGER_CST
4561 || !el_sz
4562 || TREE_CODE (el_sz) != INTEGER_CST)
4563 return;
4564
4565 offset_int idx;
4566 if (!mem_ref_offset (t).is_constant (&idx))
4567 return;
4568
4569 bool warned = false;
4570 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4571 if (idx < 0)
4572 {
4573 if (dump_file && (dump_flags & TDF_DETAILS))
4574 {
4575 fprintf (dump_file, "Array bound warning for ");
4576 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4577 fprintf (dump_file, "\n");
4578 }
4579 warned = warning_at (location, OPT_Warray_bounds,
4580 "array subscript %wi is below "
4581 "array bounds of %qT",
4582 idx.to_shwi (), TREE_TYPE (tem));
4583 }
4584 else if (idx > (wi::to_offset (up_bound)
4585 - wi::to_offset (low_bound) + 1))
4586 {
4587 if (dump_file && (dump_flags & TDF_DETAILS))
4588 {
4589 fprintf (dump_file, "Array bound warning for ");
4590 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4591 fprintf (dump_file, "\n");
4592 }
4593 warned = warning_at (location, OPT_Warray_bounds,
4594 "array subscript %wu is above "
4595 "array bounds of %qT",
4596 idx.to_uhwi (), TREE_TYPE (tem));
4597 }
4598
4599 if (warned)
4600 {
4601 if (DECL_P (t))
4602 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4603
4604 TREE_NO_WARNING (t) = 1;
4605 }
4606 }
4607
4608 /* walk_tree() callback that checks if *TP is
4609 an ARRAY_REF inside an ADDR_EXPR (in which an array
4610 subscript one outside the valid range is allowed). Call
4611 check_array_ref for each ARRAY_REF found. The location is
4612 passed in DATA. */
4613
4614 static tree
4615 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4616 {
4617 tree t = *tp;
4618 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4619 location_t location;
4620
4621 if (EXPR_HAS_LOCATION (t))
4622 location = EXPR_LOCATION (t);
4623 else
4624 location = gimple_location (wi->stmt);
4625
4626 *walk_subtree = TRUE;
4627
4628 bool warned = false;
4629 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4630 if (TREE_CODE (t) == ARRAY_REF)
4631 warned = vrp_prop->check_array_ref (location, t, false/*ignore_off_by_one*/);
4632 else if (TREE_CODE (t) == MEM_REF)
4633 warned = vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4634 else if (TREE_CODE (t) == ADDR_EXPR)
4635 {
4636 vrp_prop->search_for_addr_array (t, location);
4637 *walk_subtree = FALSE;
4638 }
4639 /* Propagate the no-warning bit to the outer expression. */
4640 if (warned)
4641 TREE_NO_WARNING (t) = true;
4642
4643 return NULL_TREE;
4644 }
4645
4646 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4647 to walk over all statements of all reachable BBs and call
4648 check_array_bounds on them. */
4649
4650 class check_array_bounds_dom_walker : public dom_walker
4651 {
4652 public:
4653 check_array_bounds_dom_walker (vrp_prop *prop)
4654 : dom_walker (CDI_DOMINATORS,
4655 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4656 flags, so that we can merge in information on
4657 non-executable edges from vrp_folder . */
4658 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4659 m_prop (prop) {}
4660 ~check_array_bounds_dom_walker () {}
4661
4662 edge before_dom_children (basic_block) FINAL OVERRIDE;
4663
4664 private:
4665 vrp_prop *m_prop;
4666 };
4667
4668 /* Implementation of dom_walker::before_dom_children.
4669
4670 Walk over all statements of BB and call check_array_bounds on them,
4671 and determine if there's a unique successor edge. */
4672
4673 edge
4674 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4675 {
4676 gimple_stmt_iterator si;
4677 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4678 {
4679 gimple *stmt = gsi_stmt (si);
4680 struct walk_stmt_info wi;
4681 if (!gimple_has_location (stmt)
4682 || is_gimple_debug (stmt))
4683 continue;
4684
4685 memset (&wi, 0, sizeof (wi));
4686
4687 wi.info = m_prop;
4688
4689 walk_gimple_op (stmt, check_array_bounds, &wi);
4690 }
4691
4692 /* Determine if there's a unique successor edge, and if so, return
4693 that back to dom_walker, ensuring that we don't visit blocks that
4694 became unreachable during the VRP propagation
4695 (PR tree-optimization/83312). */
4696 return find_taken_edge (bb, NULL_TREE);
4697 }
4698
4699 /* Walk over all statements of all reachable BBs and call check_array_bounds
4700 on them. */
4701
4702 void
4703 vrp_prop::check_all_array_refs ()
4704 {
4705 check_array_bounds_dom_walker w (this);
4706 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4707 }
4708
4709 /* Return true if all imm uses of VAR are either in STMT, or
4710 feed (optionally through a chain of single imm uses) GIMPLE_COND
4711 in basic block COND_BB. */
4712
4713 static bool
4714 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4715 {
4716 use_operand_p use_p, use2_p;
4717 imm_use_iterator iter;
4718
4719 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4720 if (USE_STMT (use_p) != stmt)
4721 {
4722 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4723 if (is_gimple_debug (use_stmt))
4724 continue;
4725 while (is_gimple_assign (use_stmt)
4726 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4727 && single_imm_use (gimple_assign_lhs (use_stmt),
4728 &use2_p, &use_stmt2))
4729 use_stmt = use_stmt2;
4730 if (gimple_code (use_stmt) != GIMPLE_COND
4731 || gimple_bb (use_stmt) != cond_bb)
4732 return false;
4733 }
4734 return true;
4735 }
4736
4737 /* Handle
4738 _4 = x_3 & 31;
4739 if (_4 != 0)
4740 goto <bb 6>;
4741 else
4742 goto <bb 7>;
4743 <bb 6>:
4744 __builtin_unreachable ();
4745 <bb 7>:
4746 x_5 = ASSERT_EXPR <x_3, ...>;
4747 If x_3 has no other immediate uses (checked by caller),
4748 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4749 from the non-zero bitmask. */
4750
4751 void
4752 maybe_set_nonzero_bits (edge e, tree var)
4753 {
4754 basic_block cond_bb = e->src;
4755 gimple *stmt = last_stmt (cond_bb);
4756 tree cst;
4757
4758 if (stmt == NULL
4759 || gimple_code (stmt) != GIMPLE_COND
4760 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4761 ? EQ_EXPR : NE_EXPR)
4762 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4763 || !integer_zerop (gimple_cond_rhs (stmt)))
4764 return;
4765
4766 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4767 if (!is_gimple_assign (stmt)
4768 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4769 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4770 return;
4771 if (gimple_assign_rhs1 (stmt) != var)
4772 {
4773 gimple *stmt2;
4774
4775 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4776 return;
4777 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4778 if (!gimple_assign_cast_p (stmt2)
4779 || gimple_assign_rhs1 (stmt2) != var
4780 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4781 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4782 != TYPE_PRECISION (TREE_TYPE (var))))
4783 return;
4784 }
4785 cst = gimple_assign_rhs2 (stmt);
4786 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4787 wi::to_wide (cst)));
4788 }
4789
4790 /* Convert range assertion expressions into the implied copies and
4791 copy propagate away the copies. Doing the trivial copy propagation
4792 here avoids the need to run the full copy propagation pass after
4793 VRP.
4794
4795 FIXME, this will eventually lead to copy propagation removing the
4796 names that had useful range information attached to them. For
4797 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4798 then N_i will have the range [3, +INF].
4799
4800 However, by converting the assertion into the implied copy
4801 operation N_i = N_j, we will then copy-propagate N_j into the uses
4802 of N_i and lose the range information. We may want to hold on to
4803 ASSERT_EXPRs a little while longer as the ranges could be used in
4804 things like jump threading.
4805
4806 The problem with keeping ASSERT_EXPRs around is that passes after
4807 VRP need to handle them appropriately.
4808
4809 Another approach would be to make the range information a first
4810 class property of the SSA_NAME so that it can be queried from
4811 any pass. This is made somewhat more complex by the need for
4812 multiple ranges to be associated with one SSA_NAME. */
4813
4814 static void
4815 remove_range_assertions (void)
4816 {
4817 basic_block bb;
4818 gimple_stmt_iterator si;
4819 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4820 a basic block preceeded by GIMPLE_COND branching to it and
4821 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4822 int is_unreachable;
4823
4824 /* Note that the BSI iterator bump happens at the bottom of the
4825 loop and no bump is necessary if we're removing the statement
4826 referenced by the current BSI. */
4827 FOR_EACH_BB_FN (bb, cfun)
4828 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4829 {
4830 gimple *stmt = gsi_stmt (si);
4831
4832 if (is_gimple_assign (stmt)
4833 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4834 {
4835 tree lhs = gimple_assign_lhs (stmt);
4836 tree rhs = gimple_assign_rhs1 (stmt);
4837 tree var;
4838
4839 var = ASSERT_EXPR_VAR (rhs);
4840
4841 if (TREE_CODE (var) == SSA_NAME
4842 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4843 && SSA_NAME_RANGE_INFO (lhs))
4844 {
4845 if (is_unreachable == -1)
4846 {
4847 is_unreachable = 0;
4848 if (single_pred_p (bb)
4849 && assert_unreachable_fallthru_edge_p
4850 (single_pred_edge (bb)))
4851 is_unreachable = 1;
4852 }
4853 /* Handle
4854 if (x_7 >= 10 && x_7 < 20)
4855 __builtin_unreachable ();
4856 x_8 = ASSERT_EXPR <x_7, ...>;
4857 if the only uses of x_7 are in the ASSERT_EXPR and
4858 in the condition. In that case, we can copy the
4859 range info from x_8 computed in this pass also
4860 for x_7. */
4861 if (is_unreachable
4862 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4863 single_pred (bb)))
4864 {
4865 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4866 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4867 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4868 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4869 }
4870 }
4871
4872 /* Propagate the RHS into every use of the LHS. For SSA names
4873 also propagate abnormals as it merely restores the original
4874 IL in this case (an replace_uses_by would assert). */
4875 if (TREE_CODE (var) == SSA_NAME)
4876 {
4877 imm_use_iterator iter;
4878 use_operand_p use_p;
4879 gimple *use_stmt;
4880 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4881 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4882 SET_USE (use_p, var);
4883 }
4884 else
4885 replace_uses_by (lhs, var);
4886
4887 /* And finally, remove the copy, it is not needed. */
4888 gsi_remove (&si, true);
4889 release_defs (stmt);
4890 }
4891 else
4892 {
4893 if (!is_gimple_debug (gsi_stmt (si)))
4894 is_unreachable = 0;
4895 gsi_next (&si);
4896 }
4897 }
4898 }
4899
4900 /* Return true if STMT is interesting for VRP. */
4901
4902 bool
4903 stmt_interesting_for_vrp (gimple *stmt)
4904 {
4905 if (gimple_code (stmt) == GIMPLE_PHI)
4906 {
4907 tree res = gimple_phi_result (stmt);
4908 return (!virtual_operand_p (res)
4909 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4910 || POINTER_TYPE_P (TREE_TYPE (res))));
4911 }
4912 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4913 {
4914 tree lhs = gimple_get_lhs (stmt);
4915
4916 /* In general, assignments with virtual operands are not useful
4917 for deriving ranges, with the obvious exception of calls to
4918 builtin functions. */
4919 if (lhs && TREE_CODE (lhs) == SSA_NAME
4920 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4921 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4922 && (is_gimple_call (stmt)
4923 || !gimple_vuse (stmt)))
4924 return true;
4925 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4926 switch (gimple_call_internal_fn (stmt))
4927 {
4928 case IFN_ADD_OVERFLOW:
4929 case IFN_SUB_OVERFLOW:
4930 case IFN_MUL_OVERFLOW:
4931 case IFN_ATOMIC_COMPARE_EXCHANGE:
4932 /* These internal calls return _Complex integer type,
4933 but are interesting to VRP nevertheless. */
4934 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4935 return true;
4936 break;
4937 default:
4938 break;
4939 }
4940 }
4941 else if (gimple_code (stmt) == GIMPLE_COND
4942 || gimple_code (stmt) == GIMPLE_SWITCH)
4943 return true;
4944
4945 return false;
4946 }
4947
4948 /* Initialization required by ssa_propagate engine. */
4949
4950 void
4951 vrp_prop::vrp_initialize ()
4952 {
4953 basic_block bb;
4954
4955 FOR_EACH_BB_FN (bb, cfun)
4956 {
4957 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4958 gsi_next (&si))
4959 {
4960 gphi *phi = si.phi ();
4961 if (!stmt_interesting_for_vrp (phi))
4962 {
4963 tree lhs = PHI_RESULT (phi);
4964 set_def_to_varying (lhs);
4965 prop_set_simulate_again (phi, false);
4966 }
4967 else
4968 prop_set_simulate_again (phi, true);
4969 }
4970
4971 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
4972 gsi_next (&si))
4973 {
4974 gimple *stmt = gsi_stmt (si);
4975
4976 /* If the statement is a control insn, then we do not
4977 want to avoid simulating the statement once. Failure
4978 to do so means that those edges will never get added. */
4979 if (stmt_ends_bb_p (stmt))
4980 prop_set_simulate_again (stmt, true);
4981 else if (!stmt_interesting_for_vrp (stmt))
4982 {
4983 set_defs_to_varying (stmt);
4984 prop_set_simulate_again (stmt, false);
4985 }
4986 else
4987 prop_set_simulate_again (stmt, true);
4988 }
4989 }
4990 }
4991
4992 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
4993 that includes the value VAL. The search is restricted to the range
4994 [START_IDX, n - 1] where n is the size of VEC.
4995
4996 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
4997 returned.
4998
4999 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5000 it is placed in IDX and false is returned.
5001
5002 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5003 returned. */
5004
5005 bool
5006 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5007 {
5008 size_t n = gimple_switch_num_labels (stmt);
5009 size_t low, high;
5010
5011 /* Find case label for minimum of the value range or the next one.
5012 At each iteration we are searching in [low, high - 1]. */
5013
5014 for (low = start_idx, high = n; high != low; )
5015 {
5016 tree t;
5017 int cmp;
5018 /* Note that i != high, so we never ask for n. */
5019 size_t i = (high + low) / 2;
5020 t = gimple_switch_label (stmt, i);
5021
5022 /* Cache the result of comparing CASE_LOW and val. */
5023 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5024
5025 if (cmp == 0)
5026 {
5027 /* Ranges cannot be empty. */
5028 *idx = i;
5029 return true;
5030 }
5031 else if (cmp > 0)
5032 high = i;
5033 else
5034 {
5035 low = i + 1;
5036 if (CASE_HIGH (t) != NULL
5037 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5038 {
5039 *idx = i;
5040 return true;
5041 }
5042 }
5043 }
5044
5045 *idx = high;
5046 return false;
5047 }
5048
5049 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5050 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5051 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5052 then MAX_IDX < MIN_IDX.
5053 Returns true if the default label is not needed. */
5054
5055 bool
5056 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5057 size_t *max_idx)
5058 {
5059 size_t i, j;
5060 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5061 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5062
5063 if (i == j
5064 && min_take_default
5065 && max_take_default)
5066 {
5067 /* Only the default case label reached.
5068 Return an empty range. */
5069 *min_idx = 1;
5070 *max_idx = 0;
5071 return false;
5072 }
5073 else
5074 {
5075 bool take_default = min_take_default || max_take_default;
5076 tree low, high;
5077 size_t k;
5078
5079 if (max_take_default)
5080 j--;
5081
5082 /* If the case label range is continuous, we do not need
5083 the default case label. Verify that. */
5084 high = CASE_LOW (gimple_switch_label (stmt, i));
5085 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5086 high = CASE_HIGH (gimple_switch_label (stmt, i));
5087 for (k = i + 1; k <= j; ++k)
5088 {
5089 low = CASE_LOW (gimple_switch_label (stmt, k));
5090 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5091 {
5092 take_default = true;
5093 break;
5094 }
5095 high = low;
5096 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5097 high = CASE_HIGH (gimple_switch_label (stmt, k));
5098 }
5099
5100 *min_idx = i;
5101 *max_idx = j;
5102 return !take_default;
5103 }
5104 }
5105
5106 /* Evaluate statement STMT. If the statement produces a useful range,
5107 return SSA_PROP_INTERESTING and record the SSA name with the
5108 interesting range into *OUTPUT_P.
5109
5110 If STMT is a conditional branch and we can determine its truth
5111 value, the taken edge is recorded in *TAKEN_EDGE_P.
5112
5113 If STMT produces a varying value, return SSA_PROP_VARYING. */
5114
5115 enum ssa_prop_result
5116 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5117 {
5118 tree lhs = gimple_get_lhs (stmt);
5119 value_range vr;
5120 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5121
5122 if (*output_p)
5123 {
5124 if (update_value_range (*output_p, &vr))
5125 {
5126 if (dump_file && (dump_flags & TDF_DETAILS))
5127 {
5128 fprintf (dump_file, "Found new range for ");
5129 print_generic_expr (dump_file, *output_p);
5130 fprintf (dump_file, ": ");
5131 dump_value_range (dump_file, &vr);
5132 fprintf (dump_file, "\n");
5133 }
5134
5135 if (vr.varying_p ())
5136 return SSA_PROP_VARYING;
5137
5138 return SSA_PROP_INTERESTING;
5139 }
5140 return SSA_PROP_NOT_INTERESTING;
5141 }
5142
5143 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5144 switch (gimple_call_internal_fn (stmt))
5145 {
5146 case IFN_ADD_OVERFLOW:
5147 case IFN_SUB_OVERFLOW:
5148 case IFN_MUL_OVERFLOW:
5149 case IFN_ATOMIC_COMPARE_EXCHANGE:
5150 /* These internal calls return _Complex integer type,
5151 which VRP does not track, but the immediate uses
5152 thereof might be interesting. */
5153 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5154 {
5155 imm_use_iterator iter;
5156 use_operand_p use_p;
5157 enum ssa_prop_result res = SSA_PROP_VARYING;
5158
5159 set_def_to_varying (lhs);
5160
5161 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5162 {
5163 gimple *use_stmt = USE_STMT (use_p);
5164 if (!is_gimple_assign (use_stmt))
5165 continue;
5166 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5167 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5168 continue;
5169 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5170 tree use_lhs = gimple_assign_lhs (use_stmt);
5171 if (TREE_CODE (rhs1) != rhs_code
5172 || TREE_OPERAND (rhs1, 0) != lhs
5173 || TREE_CODE (use_lhs) != SSA_NAME
5174 || !stmt_interesting_for_vrp (use_stmt)
5175 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5176 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5177 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5178 continue;
5179
5180 /* If there is a change in the value range for any of the
5181 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5182 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5183 or IMAGPART_EXPR immediate uses, but none of them have
5184 a change in their value ranges, return
5185 SSA_PROP_NOT_INTERESTING. If there are no
5186 {REAL,IMAG}PART_EXPR uses at all,
5187 return SSA_PROP_VARYING. */
5188 value_range new_vr;
5189 extract_range_basic (&new_vr, use_stmt);
5190 const value_range *old_vr = get_value_range (use_lhs);
5191 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
5192 res = SSA_PROP_INTERESTING;
5193 else
5194 res = SSA_PROP_NOT_INTERESTING;
5195 new_vr.equiv_clear ();
5196 if (res == SSA_PROP_INTERESTING)
5197 {
5198 *output_p = lhs;
5199 return res;
5200 }
5201 }
5202
5203 return res;
5204 }
5205 break;
5206 default:
5207 break;
5208 }
5209
5210 /* All other statements produce nothing of interest for VRP, so mark
5211 their outputs varying and prevent further simulation. */
5212 set_defs_to_varying (stmt);
5213
5214 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5215 }
5216
5217 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5218 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5219 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5220 possible such range. The resulting range is not canonicalized. */
5221
5222 static void
5223 union_ranges (enum value_range_kind *vr0type,
5224 tree *vr0min, tree *vr0max,
5225 enum value_range_kind vr1type,
5226 tree vr1min, tree vr1max)
5227 {
5228 int cmpmin = compare_values (*vr0min, vr1min);
5229 int cmpmax = compare_values (*vr0max, vr1max);
5230 bool mineq = cmpmin == 0;
5231 bool maxeq = cmpmax == 0;
5232
5233 /* [] is vr0, () is vr1 in the following classification comments. */
5234 if (mineq && maxeq)
5235 {
5236 /* [( )] */
5237 if (*vr0type == vr1type)
5238 /* Nothing to do for equal ranges. */
5239 ;
5240 else if ((*vr0type == VR_RANGE
5241 && vr1type == VR_ANTI_RANGE)
5242 || (*vr0type == VR_ANTI_RANGE
5243 && vr1type == VR_RANGE))
5244 {
5245 /* For anti-range with range union the result is varying. */
5246 goto give_up;
5247 }
5248 else
5249 gcc_unreachable ();
5250 }
5251 else if (operand_less_p (*vr0max, vr1min) == 1
5252 || operand_less_p (vr1max, *vr0min) == 1)
5253 {
5254 /* [ ] ( ) or ( ) [ ]
5255 If the ranges have an empty intersection, result of the union
5256 operation is the anti-range or if both are anti-ranges
5257 it covers all. */
5258 if (*vr0type == VR_ANTI_RANGE
5259 && vr1type == VR_ANTI_RANGE)
5260 goto give_up;
5261 else if (*vr0type == VR_ANTI_RANGE
5262 && vr1type == VR_RANGE)
5263 ;
5264 else if (*vr0type == VR_RANGE
5265 && vr1type == VR_ANTI_RANGE)
5266 {
5267 *vr0type = vr1type;
5268 *vr0min = vr1min;
5269 *vr0max = vr1max;
5270 }
5271 else if (*vr0type == VR_RANGE
5272 && vr1type == VR_RANGE)
5273 {
5274 /* The result is the convex hull of both ranges. */
5275 if (operand_less_p (*vr0max, vr1min) == 1)
5276 {
5277 /* If the result can be an anti-range, create one. */
5278 if (TREE_CODE (*vr0max) == INTEGER_CST
5279 && TREE_CODE (vr1min) == INTEGER_CST
5280 && vrp_val_is_min (*vr0min)
5281 && vrp_val_is_max (vr1max))
5282 {
5283 tree min = int_const_binop (PLUS_EXPR,
5284 *vr0max,
5285 build_int_cst (TREE_TYPE (*vr0max), 1));
5286 tree max = int_const_binop (MINUS_EXPR,
5287 vr1min,
5288 build_int_cst (TREE_TYPE (vr1min), 1));
5289 if (!operand_less_p (max, min))
5290 {
5291 *vr0type = VR_ANTI_RANGE;
5292 *vr0min = min;
5293 *vr0max = max;
5294 }
5295 else
5296 *vr0max = vr1max;
5297 }
5298 else
5299 *vr0max = vr1max;
5300 }
5301 else
5302 {
5303 /* If the result can be an anti-range, create one. */
5304 if (TREE_CODE (vr1max) == INTEGER_CST
5305 && TREE_CODE (*vr0min) == INTEGER_CST
5306 && vrp_val_is_min (vr1min)
5307 && vrp_val_is_max (*vr0max))
5308 {
5309 tree min = int_const_binop (PLUS_EXPR,
5310 vr1max,
5311 build_int_cst (TREE_TYPE (vr1max), 1));
5312 tree max = int_const_binop (MINUS_EXPR,
5313 *vr0min,
5314 build_int_cst (TREE_TYPE (*vr0min), 1));
5315 if (!operand_less_p (max, min))
5316 {
5317 *vr0type = VR_ANTI_RANGE;
5318 *vr0min = min;
5319 *vr0max = max;
5320 }
5321 else
5322 *vr0min = vr1min;
5323 }
5324 else
5325 *vr0min = vr1min;
5326 }
5327 }
5328 else
5329 gcc_unreachable ();
5330 }
5331 else if ((maxeq || cmpmax == 1)
5332 && (mineq || cmpmin == -1))
5333 {
5334 /* [ ( ) ] or [( ) ] or [ ( )] */
5335 if (*vr0type == VR_RANGE
5336 && vr1type == VR_RANGE)
5337 ;
5338 else if (*vr0type == VR_ANTI_RANGE
5339 && vr1type == VR_ANTI_RANGE)
5340 {
5341 *vr0type = vr1type;
5342 *vr0min = vr1min;
5343 *vr0max = vr1max;
5344 }
5345 else if (*vr0type == VR_ANTI_RANGE
5346 && vr1type == VR_RANGE)
5347 {
5348 /* Arbitrarily choose the right or left gap. */
5349 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5350 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5351 build_int_cst (TREE_TYPE (vr1min), 1));
5352 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5353 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5354 build_int_cst (TREE_TYPE (vr1max), 1));
5355 else
5356 goto give_up;
5357 }
5358 else if (*vr0type == VR_RANGE
5359 && vr1type == VR_ANTI_RANGE)
5360 /* The result covers everything. */
5361 goto give_up;
5362 else
5363 gcc_unreachable ();
5364 }
5365 else if ((maxeq || cmpmax == -1)
5366 && (mineq || cmpmin == 1))
5367 {
5368 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5369 if (*vr0type == VR_RANGE
5370 && vr1type == VR_RANGE)
5371 {
5372 *vr0type = vr1type;
5373 *vr0min = vr1min;
5374 *vr0max = vr1max;
5375 }
5376 else if (*vr0type == VR_ANTI_RANGE
5377 && vr1type == VR_ANTI_RANGE)
5378 ;
5379 else if (*vr0type == VR_RANGE
5380 && vr1type == VR_ANTI_RANGE)
5381 {
5382 *vr0type = VR_ANTI_RANGE;
5383 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5384 {
5385 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5386 build_int_cst (TREE_TYPE (*vr0min), 1));
5387 *vr0min = vr1min;
5388 }
5389 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5390 {
5391 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5392 build_int_cst (TREE_TYPE (*vr0max), 1));
5393 *vr0max = vr1max;
5394 }
5395 else
5396 goto give_up;
5397 }
5398 else if (*vr0type == VR_ANTI_RANGE
5399 && vr1type == VR_RANGE)
5400 /* The result covers everything. */
5401 goto give_up;
5402 else
5403 gcc_unreachable ();
5404 }
5405 else if (cmpmin == -1
5406 && cmpmax == -1
5407 && (operand_less_p (vr1min, *vr0max) == 1
5408 || operand_equal_p (vr1min, *vr0max, 0)))
5409 {
5410 /* [ ( ] ) or [ ]( ) */
5411 if (*vr0type == VR_RANGE
5412 && vr1type == VR_RANGE)
5413 *vr0max = vr1max;
5414 else if (*vr0type == VR_ANTI_RANGE
5415 && vr1type == VR_ANTI_RANGE)
5416 *vr0min = vr1min;
5417 else if (*vr0type == VR_ANTI_RANGE
5418 && vr1type == VR_RANGE)
5419 {
5420 if (TREE_CODE (vr1min) == INTEGER_CST)
5421 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5422 build_int_cst (TREE_TYPE (vr1min), 1));
5423 else
5424 goto give_up;
5425 }
5426 else if (*vr0type == VR_RANGE
5427 && vr1type == VR_ANTI_RANGE)
5428 {
5429 if (TREE_CODE (*vr0max) == INTEGER_CST)
5430 {
5431 *vr0type = vr1type;
5432 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5433 build_int_cst (TREE_TYPE (*vr0max), 1));
5434 *vr0max = vr1max;
5435 }
5436 else
5437 goto give_up;
5438 }
5439 else
5440 gcc_unreachable ();
5441 }
5442 else if (cmpmin == 1
5443 && cmpmax == 1
5444 && (operand_less_p (*vr0min, vr1max) == 1
5445 || operand_equal_p (*vr0min, vr1max, 0)))
5446 {
5447 /* ( [ ) ] or ( )[ ] */
5448 if (*vr0type == VR_RANGE
5449 && vr1type == VR_RANGE)
5450 *vr0min = vr1min;
5451 else if (*vr0type == VR_ANTI_RANGE
5452 && vr1type == VR_ANTI_RANGE)
5453 *vr0max = vr1max;
5454 else if (*vr0type == VR_ANTI_RANGE
5455 && vr1type == VR_RANGE)
5456 {
5457 if (TREE_CODE (vr1max) == INTEGER_CST)
5458 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5459 build_int_cst (TREE_TYPE (vr1max), 1));
5460 else
5461 goto give_up;
5462 }
5463 else if (*vr0type == VR_RANGE
5464 && vr1type == VR_ANTI_RANGE)
5465 {
5466 if (TREE_CODE (*vr0min) == INTEGER_CST)
5467 {
5468 *vr0type = vr1type;
5469 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5470 build_int_cst (TREE_TYPE (*vr0min), 1));
5471 *vr0min = vr1min;
5472 }
5473 else
5474 goto give_up;
5475 }
5476 else
5477 gcc_unreachable ();
5478 }
5479 else
5480 goto give_up;
5481
5482 return;
5483
5484 give_up:
5485 *vr0type = VR_VARYING;
5486 *vr0min = NULL_TREE;
5487 *vr0max = NULL_TREE;
5488 }
5489
5490 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5491 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5492 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5493 possible such range. The resulting range is not canonicalized. */
5494
5495 static void
5496 intersect_ranges (enum value_range_kind *vr0type,
5497 tree *vr0min, tree *vr0max,
5498 enum value_range_kind vr1type,
5499 tree vr1min, tree vr1max)
5500 {
5501 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5502 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5503
5504 /* [] is vr0, () is vr1 in the following classification comments. */
5505 if (mineq && maxeq)
5506 {
5507 /* [( )] */
5508 if (*vr0type == vr1type)
5509 /* Nothing to do for equal ranges. */
5510 ;
5511 else if ((*vr0type == VR_RANGE
5512 && vr1type == VR_ANTI_RANGE)
5513 || (*vr0type == VR_ANTI_RANGE
5514 && vr1type == VR_RANGE))
5515 {
5516 /* For anti-range with range intersection the result is empty. */
5517 *vr0type = VR_UNDEFINED;
5518 *vr0min = NULL_TREE;
5519 *vr0max = NULL_TREE;
5520 }
5521 else
5522 gcc_unreachable ();
5523 }
5524 else if (operand_less_p (*vr0max, vr1min) == 1
5525 || operand_less_p (vr1max, *vr0min) == 1)
5526 {
5527 /* [ ] ( ) or ( ) [ ]
5528 If the ranges have an empty intersection, the result of the
5529 intersect operation is the range for intersecting an
5530 anti-range with a range or empty when intersecting two ranges. */
5531 if (*vr0type == VR_RANGE
5532 && vr1type == VR_ANTI_RANGE)
5533 ;
5534 else if (*vr0type == VR_ANTI_RANGE
5535 && vr1type == VR_RANGE)
5536 {
5537 *vr0type = vr1type;
5538 *vr0min = vr1min;
5539 *vr0max = vr1max;
5540 }
5541 else if (*vr0type == VR_RANGE
5542 && vr1type == VR_RANGE)
5543 {
5544 *vr0type = VR_UNDEFINED;
5545 *vr0min = NULL_TREE;
5546 *vr0max = NULL_TREE;
5547 }
5548 else if (*vr0type == VR_ANTI_RANGE
5549 && vr1type == VR_ANTI_RANGE)
5550 {
5551 /* If the anti-ranges are adjacent to each other merge them. */
5552 if (TREE_CODE (*vr0max) == INTEGER_CST
5553 && TREE_CODE (vr1min) == INTEGER_CST
5554 && operand_less_p (*vr0max, vr1min) == 1
5555 && integer_onep (int_const_binop (MINUS_EXPR,
5556 vr1min, *vr0max)))
5557 *vr0max = vr1max;
5558 else if (TREE_CODE (vr1max) == INTEGER_CST
5559 && TREE_CODE (*vr0min) == INTEGER_CST
5560 && operand_less_p (vr1max, *vr0min) == 1
5561 && integer_onep (int_const_binop (MINUS_EXPR,
5562 *vr0min, vr1max)))
5563 *vr0min = vr1min;
5564 /* Else arbitrarily take VR0. */
5565 }
5566 }
5567 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5568 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5569 {
5570 /* [ ( ) ] or [( ) ] or [ ( )] */
5571 if (*vr0type == VR_RANGE
5572 && vr1type == VR_RANGE)
5573 {
5574 /* If both are ranges the result is the inner one. */
5575 *vr0type = vr1type;
5576 *vr0min = vr1min;
5577 *vr0max = vr1max;
5578 }
5579 else if (*vr0type == VR_RANGE
5580 && vr1type == VR_ANTI_RANGE)
5581 {
5582 /* Choose the right gap if the left one is empty. */
5583 if (mineq)
5584 {
5585 if (TREE_CODE (vr1max) != INTEGER_CST)
5586 *vr0min = vr1max;
5587 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5588 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5589 *vr0min
5590 = int_const_binop (MINUS_EXPR, vr1max,
5591 build_int_cst (TREE_TYPE (vr1max), -1));
5592 else
5593 *vr0min
5594 = int_const_binop (PLUS_EXPR, vr1max,
5595 build_int_cst (TREE_TYPE (vr1max), 1));
5596 }
5597 /* Choose the left gap if the right one is empty. */
5598 else if (maxeq)
5599 {
5600 if (TREE_CODE (vr1min) != INTEGER_CST)
5601 *vr0max = vr1min;
5602 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5603 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5604 *vr0max
5605 = int_const_binop (PLUS_EXPR, vr1min,
5606 build_int_cst (TREE_TYPE (vr1min), -1));
5607 else
5608 *vr0max
5609 = int_const_binop (MINUS_EXPR, vr1min,
5610 build_int_cst (TREE_TYPE (vr1min), 1));
5611 }
5612 /* Choose the anti-range if the range is effectively varying. */
5613 else if (vrp_val_is_min (*vr0min)
5614 && vrp_val_is_max (*vr0max))
5615 {
5616 *vr0type = vr1type;
5617 *vr0min = vr1min;
5618 *vr0max = vr1max;
5619 }
5620 /* Else choose the range. */
5621 }
5622 else if (*vr0type == VR_ANTI_RANGE
5623 && vr1type == VR_ANTI_RANGE)
5624 /* If both are anti-ranges the result is the outer one. */
5625 ;
5626 else if (*vr0type == VR_ANTI_RANGE
5627 && vr1type == VR_RANGE)
5628 {
5629 /* The intersection is empty. */
5630 *vr0type = VR_UNDEFINED;
5631 *vr0min = NULL_TREE;
5632 *vr0max = NULL_TREE;
5633 }
5634 else
5635 gcc_unreachable ();
5636 }
5637 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5638 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5639 {
5640 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5641 if (*vr0type == VR_RANGE
5642 && vr1type == VR_RANGE)
5643 /* Choose the inner range. */
5644 ;
5645 else if (*vr0type == VR_ANTI_RANGE
5646 && vr1type == VR_RANGE)
5647 {
5648 /* Choose the right gap if the left is empty. */
5649 if (mineq)
5650 {
5651 *vr0type = VR_RANGE;
5652 if (TREE_CODE (*vr0max) != INTEGER_CST)
5653 *vr0min = *vr0max;
5654 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5655 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5656 *vr0min
5657 = int_const_binop (MINUS_EXPR, *vr0max,
5658 build_int_cst (TREE_TYPE (*vr0max), -1));
5659 else
5660 *vr0min
5661 = int_const_binop (PLUS_EXPR, *vr0max,
5662 build_int_cst (TREE_TYPE (*vr0max), 1));
5663 *vr0max = vr1max;
5664 }
5665 /* Choose the left gap if the right is empty. */
5666 else if (maxeq)
5667 {
5668 *vr0type = VR_RANGE;
5669 if (TREE_CODE (*vr0min) != INTEGER_CST)
5670 *vr0max = *vr0min;
5671 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5672 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5673 *vr0max
5674 = int_const_binop (PLUS_EXPR, *vr0min,
5675 build_int_cst (TREE_TYPE (*vr0min), -1));
5676 else
5677 *vr0max
5678 = int_const_binop (MINUS_EXPR, *vr0min,
5679 build_int_cst (TREE_TYPE (*vr0min), 1));
5680 *vr0min = vr1min;
5681 }
5682 /* Choose the anti-range if the range is effectively varying. */
5683 else if (vrp_val_is_min (vr1min)
5684 && vrp_val_is_max (vr1max))
5685 ;
5686 /* Choose the anti-range if it is ~[0,0], that range is special
5687 enough to special case when vr1's range is relatively wide.
5688 At least for types bigger than int - this covers pointers
5689 and arguments to functions like ctz. */
5690 else if (*vr0min == *vr0max
5691 && integer_zerop (*vr0min)
5692 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5693 >= TYPE_PRECISION (integer_type_node))
5694 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5695 && TREE_CODE (vr1max) == INTEGER_CST
5696 && TREE_CODE (vr1min) == INTEGER_CST
5697 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5698 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5699 ;
5700 /* Else choose the range. */
5701 else
5702 {
5703 *vr0type = vr1type;
5704 *vr0min = vr1min;
5705 *vr0max = vr1max;
5706 }
5707 }
5708 else if (*vr0type == VR_ANTI_RANGE
5709 && vr1type == VR_ANTI_RANGE)
5710 {
5711 /* If both are anti-ranges the result is the outer one. */
5712 *vr0type = vr1type;
5713 *vr0min = vr1min;
5714 *vr0max = vr1max;
5715 }
5716 else if (vr1type == VR_ANTI_RANGE
5717 && *vr0type == VR_RANGE)
5718 {
5719 /* The intersection is empty. */
5720 *vr0type = VR_UNDEFINED;
5721 *vr0min = NULL_TREE;
5722 *vr0max = NULL_TREE;
5723 }
5724 else
5725 gcc_unreachable ();
5726 }
5727 else if ((operand_less_p (vr1min, *vr0max) == 1
5728 || operand_equal_p (vr1min, *vr0max, 0))
5729 && operand_less_p (*vr0min, vr1min) == 1)
5730 {
5731 /* [ ( ] ) or [ ]( ) */
5732 if (*vr0type == VR_ANTI_RANGE
5733 && vr1type == VR_ANTI_RANGE)
5734 *vr0max = vr1max;
5735 else if (*vr0type == VR_RANGE
5736 && vr1type == VR_RANGE)
5737 *vr0min = vr1min;
5738 else if (*vr0type == VR_RANGE
5739 && vr1type == VR_ANTI_RANGE)
5740 {
5741 if (TREE_CODE (vr1min) == INTEGER_CST)
5742 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5743 build_int_cst (TREE_TYPE (vr1min), 1));
5744 else
5745 *vr0max = vr1min;
5746 }
5747 else if (*vr0type == VR_ANTI_RANGE
5748 && vr1type == VR_RANGE)
5749 {
5750 *vr0type = VR_RANGE;
5751 if (TREE_CODE (*vr0max) == INTEGER_CST)
5752 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5753 build_int_cst (TREE_TYPE (*vr0max), 1));
5754 else
5755 *vr0min = *vr0max;
5756 *vr0max = vr1max;
5757 }
5758 else
5759 gcc_unreachable ();
5760 }
5761 else if ((operand_less_p (*vr0min, vr1max) == 1
5762 || operand_equal_p (*vr0min, vr1max, 0))
5763 && operand_less_p (vr1min, *vr0min) == 1)
5764 {
5765 /* ( [ ) ] or ( )[ ] */
5766 if (*vr0type == VR_ANTI_RANGE
5767 && vr1type == VR_ANTI_RANGE)
5768 *vr0min = vr1min;
5769 else if (*vr0type == VR_RANGE
5770 && vr1type == VR_RANGE)
5771 *vr0max = vr1max;
5772 else if (*vr0type == VR_RANGE
5773 && vr1type == VR_ANTI_RANGE)
5774 {
5775 if (TREE_CODE (vr1max) == INTEGER_CST)
5776 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5777 build_int_cst (TREE_TYPE (vr1max), 1));
5778 else
5779 *vr0min = vr1max;
5780 }
5781 else if (*vr0type == VR_ANTI_RANGE
5782 && vr1type == VR_RANGE)
5783 {
5784 *vr0type = VR_RANGE;
5785 if (TREE_CODE (*vr0min) == INTEGER_CST)
5786 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5787 build_int_cst (TREE_TYPE (*vr0min), 1));
5788 else
5789 *vr0max = *vr0min;
5790 *vr0min = vr1min;
5791 }
5792 else
5793 gcc_unreachable ();
5794 }
5795
5796 /* If we know the intersection is empty, there's no need to
5797 conservatively add anything else to the set. */
5798 if (*vr0type == VR_UNDEFINED)
5799 return;
5800
5801 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5802 result for the intersection. That's always a conservative
5803 correct estimate unless VR1 is a constant singleton range
5804 in which case we choose that. */
5805 if (vr1type == VR_RANGE
5806 && is_gimple_min_invariant (vr1min)
5807 && vrp_operand_equal_p (vr1min, vr1max))
5808 {
5809 *vr0type = vr1type;
5810 *vr0min = vr1min;
5811 *vr0max = vr1max;
5812 }
5813 }
5814
5815
5816 /* Helper for the intersection operation for value ranges. Given two
5817 value ranges VR0 and VR1, return the intersection of the two
5818 ranges. This may not be the smallest possible such range. */
5819
5820 value_range_base
5821 value_range_base::intersect_helper (const value_range_base *vr0,
5822 const value_range_base *vr1)
5823 {
5824 /* If either range is VR_VARYING the other one wins. */
5825 if (vr1->varying_p ())
5826 return *vr0;
5827 if (vr0->varying_p ())
5828 return *vr1;
5829
5830 /* When either range is VR_UNDEFINED the resulting range is
5831 VR_UNDEFINED, too. */
5832 if (vr0->undefined_p ())
5833 return *vr0;
5834 if (vr1->undefined_p ())
5835 return *vr1;
5836
5837 value_range_kind vr0type = vr0->kind ();
5838 tree vr0min = vr0->min ();
5839 tree vr0max = vr0->max ();
5840 intersect_ranges (&vr0type, &vr0min, &vr0max,
5841 vr1->kind (), vr1->min (), vr1->max ());
5842 /* Make sure to canonicalize the result though as the inversion of a
5843 VR_RANGE can still be a VR_RANGE. Work on a temporary so we can
5844 fall back to vr0 when this turns things to varying. */
5845 value_range_base tem;
5846 if (vr0type == VR_UNDEFINED)
5847 tem.set_undefined ();
5848 else if (vr0type == VR_VARYING)
5849 tem.set_varying (vr0->type ());
5850 else
5851 tem.set (vr0type, vr0min, vr0max);
5852 /* If that failed, use the saved original VR0. */
5853 if (tem.varying_p ())
5854 return *vr0;
5855
5856 return tem;
5857 }
5858
5859 void
5860 value_range_base::intersect (const value_range_base *other)
5861 {
5862 if (dump_file && (dump_flags & TDF_DETAILS))
5863 {
5864 fprintf (dump_file, "Intersecting\n ");
5865 dump_value_range (dump_file, this);
5866 fprintf (dump_file, "\nand\n ");
5867 dump_value_range (dump_file, other);
5868 fprintf (dump_file, "\n");
5869 }
5870
5871 *this = intersect_helper (this, other);
5872
5873 if (dump_file && (dump_flags & TDF_DETAILS))
5874 {
5875 fprintf (dump_file, "to\n ");
5876 dump_value_range (dump_file, this);
5877 fprintf (dump_file, "\n");
5878 }
5879 }
5880
5881 void
5882 value_range::intersect (const value_range *other)
5883 {
5884 if (dump_file && (dump_flags & TDF_DETAILS))
5885 {
5886 fprintf (dump_file, "Intersecting\n ");
5887 dump_value_range (dump_file, this);
5888 fprintf (dump_file, "\nand\n ");
5889 dump_value_range (dump_file, other);
5890 fprintf (dump_file, "\n");
5891 }
5892
5893 /* If THIS is varying we want to pick up equivalences from OTHER.
5894 Just special-case this here rather than trying to fixup after the
5895 fact. */
5896 if (this->varying_p ())
5897 this->deep_copy (other);
5898 else
5899 {
5900 value_range_base tem = intersect_helper (this, other);
5901 this->update (tem.kind (), tem.min (), tem.max ());
5902
5903 /* If the result is VR_UNDEFINED there is no need to mess with
5904 equivalencies. */
5905 if (!undefined_p ())
5906 {
5907 /* The resulting set of equivalences for range intersection
5908 is the union of the two sets. */
5909 if (m_equiv && other->m_equiv && m_equiv != other->m_equiv)
5910 bitmap_ior_into (m_equiv, other->m_equiv);
5911 else if (other->m_equiv && !m_equiv)
5912 {
5913 /* All equivalence bitmaps are allocated from the same
5914 obstack. So we can use the obstack associated with
5915 VR to allocate this->m_equiv. */
5916 m_equiv = BITMAP_ALLOC (other->m_equiv->obstack);
5917 bitmap_copy (m_equiv, other->m_equiv);
5918 }
5919 }
5920 }
5921
5922 if (dump_file && (dump_flags & TDF_DETAILS))
5923 {
5924 fprintf (dump_file, "to\n ");
5925 dump_value_range (dump_file, this);
5926 fprintf (dump_file, "\n");
5927 }
5928 }
5929
5930 /* Helper for meet operation for value ranges. Given two value ranges VR0 and
5931 VR1, return a range that contains both VR0 and VR1. This may not be the
5932 smallest possible such range. */
5933
5934 value_range_base
5935 value_range_base::union_helper (const value_range_base *vr0,
5936 const value_range_base *vr1)
5937 {
5938 /* VR0 has the resulting range if VR1 is undefined or VR0 is varying. */
5939 if (vr1->undefined_p ()
5940 || vr0->varying_p ())
5941 return *vr0;
5942
5943 /* VR1 has the resulting range if VR0 is undefined or VR1 is varying. */
5944 if (vr0->undefined_p ()
5945 || vr1->varying_p ())
5946 return *vr1;
5947
5948 value_range_kind vr0type = vr0->kind ();
5949 tree vr0min = vr0->min ();
5950 tree vr0max = vr0->max ();
5951 union_ranges (&vr0type, &vr0min, &vr0max,
5952 vr1->kind (), vr1->min (), vr1->max ());
5953
5954 /* Work on a temporary so we can still use vr0 when union returns varying. */
5955 value_range_base tem;
5956 if (vr0type == VR_UNDEFINED)
5957 tem.set_undefined ();
5958 else if (vr0type == VR_VARYING)
5959 tem.set_varying (vr0->type ());
5960 else
5961 tem.set (vr0type, vr0min, vr0max);
5962
5963 /* Failed to find an efficient meet. Before giving up and setting
5964 the result to VARYING, see if we can at least derive a useful
5965 anti-range. */
5966 if (tem.varying_p ()
5967 && range_includes_zero_p (vr0) == 0
5968 && range_includes_zero_p (vr1) == 0)
5969 {
5970 tem.set_nonzero (vr0->type ());
5971 return tem;
5972 }
5973
5974 return tem;
5975 }
5976
5977
5978 /* Meet operation for value ranges. Given two value ranges VR0 and
5979 VR1, store in VR0 a range that contains both VR0 and VR1. This
5980 may not be the smallest possible such range. */
5981
5982 void
5983 value_range_base::union_ (const value_range_base *other)
5984 {
5985 if (dump_file && (dump_flags & TDF_DETAILS))
5986 {
5987 fprintf (dump_file, "Meeting\n ");
5988 dump_value_range (dump_file, this);
5989 fprintf (dump_file, "\nand\n ");
5990 dump_value_range (dump_file, other);
5991 fprintf (dump_file, "\n");
5992 }
5993
5994 *this = union_helper (this, other);
5995
5996 if (dump_file && (dump_flags & TDF_DETAILS))
5997 {
5998 fprintf (dump_file, "to\n ");
5999 dump_value_range (dump_file, this);
6000 fprintf (dump_file, "\n");
6001 }
6002 }
6003
6004 void
6005 value_range::union_ (const value_range *other)
6006 {
6007 if (dump_file && (dump_flags & TDF_DETAILS))
6008 {
6009 fprintf (dump_file, "Meeting\n ");
6010 dump_value_range (dump_file, this);
6011 fprintf (dump_file, "\nand\n ");
6012 dump_value_range (dump_file, other);
6013 fprintf (dump_file, "\n");
6014 }
6015
6016 /* If THIS is undefined we want to pick up equivalences from OTHER.
6017 Just special-case this here rather than trying to fixup after the fact. */
6018 if (this->undefined_p ())
6019 this->deep_copy (other);
6020 else
6021 {
6022 value_range_base tem = union_helper (this, other);
6023 this->update (tem.kind (), tem.min (), tem.max ());
6024
6025 /* The resulting set of equivalences is always the intersection of
6026 the two sets. */
6027 if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
6028 bitmap_and_into (this->m_equiv, other->m_equiv);
6029 else if (this->m_equiv && !other->m_equiv)
6030 bitmap_clear (this->m_equiv);
6031 }
6032
6033 if (dump_file && (dump_flags & TDF_DETAILS))
6034 {
6035 fprintf (dump_file, "to\n ");
6036 dump_value_range (dump_file, this);
6037 fprintf (dump_file, "\n");
6038 }
6039 }
6040
6041 /* Normalize addresses into constants. */
6042
6043 value_range_base
6044 value_range_base::normalize_addresses () const
6045 {
6046 if (!POINTER_TYPE_P (type ()) || range_has_numeric_bounds_p (this))
6047 return *this;
6048
6049 if (!range_includes_zero_p (this))
6050 {
6051 gcc_checking_assert (TREE_CODE (m_min) == ADDR_EXPR
6052 || TREE_CODE (m_max) == ADDR_EXPR);
6053 return range_nonzero (type ());
6054 }
6055 return value_range_base (type ());
6056 }
6057
6058 /* Normalize symbolics and addresses into constants. */
6059
6060 value_range_base
6061 value_range_base::normalize_symbolics () const
6062 {
6063 if (varying_p () || undefined_p ())
6064 return *this;
6065 tree ttype = type ();
6066 bool min_symbolic = !is_gimple_min_invariant (min ());
6067 bool max_symbolic = !is_gimple_min_invariant (max ());
6068 if (!min_symbolic && !max_symbolic)
6069 return normalize_addresses ();
6070
6071 // [SYM, SYM] -> VARYING
6072 if (min_symbolic && max_symbolic)
6073 {
6074 value_range_base var;
6075 var.set_varying (ttype);
6076 return var;
6077 }
6078 if (kind () == VR_RANGE)
6079 {
6080 // [SYM, NUM] -> [-MIN, NUM]
6081 if (min_symbolic)
6082 return value_range_base (VR_RANGE, vrp_val_min (ttype, true), max ());
6083 // [NUM, SYM] -> [NUM, +MAX]
6084 return value_range_base (VR_RANGE, min (), vrp_val_max (ttype, true));
6085 }
6086 gcc_checking_assert (kind () == VR_ANTI_RANGE);
6087 // ~[SYM, NUM] -> [NUM + 1, +MAX]
6088 if (min_symbolic)
6089 {
6090 if (!vrp_val_is_max (max ()))
6091 {
6092 tree n = wide_int_to_tree (ttype, wi::to_wide (max ()) + 1);
6093 return value_range_base (VR_RANGE, n, vrp_val_max (ttype, true));
6094 }
6095 value_range_base var;
6096 var.set_varying (ttype);
6097 return var;
6098 }
6099 // ~[NUM, SYM] -> [-MIN, NUM - 1]
6100 if (!vrp_val_is_min (min ()))
6101 {
6102 tree n = wide_int_to_tree (ttype, wi::to_wide (min ()) - 1);
6103 return value_range_base (VR_RANGE, vrp_val_min (ttype, true), n);
6104 }
6105 value_range_base var;
6106 var.set_varying (ttype);
6107 return var;
6108 }
6109
6110 /* Return the number of sub-ranges in a range. */
6111
6112 unsigned
6113 value_range_base::num_pairs () const
6114 {
6115 if (undefined_p ())
6116 return 0;
6117 if (varying_p ())
6118 return 1;
6119 if (symbolic_p ())
6120 return normalize_symbolics ().num_pairs ();
6121 if (m_kind == VR_ANTI_RANGE)
6122 {
6123 // ~[MIN, X] has one sub-range of [X+1, MAX], and
6124 // ~[X, MAX] has one sub-range of [MIN, X-1].
6125 if (vrp_val_is_min (m_min, true) || vrp_val_is_max (m_max, true))
6126 return 1;
6127 return 2;
6128 }
6129 return 1;
6130 }
6131
6132 /* Return the lower bound for a sub-range. PAIR is the sub-range in
6133 question. */
6134
6135 wide_int
6136 value_range_base::lower_bound (unsigned pair) const
6137 {
6138 if (symbolic_p ())
6139 return normalize_symbolics ().lower_bound (pair);
6140
6141 gcc_checking_assert (!undefined_p ());
6142 gcc_checking_assert (pair + 1 <= num_pairs ());
6143 tree t = NULL;
6144 if (m_kind == VR_ANTI_RANGE)
6145 {
6146 tree typ = type ();
6147 if (pair == 1 || vrp_val_is_min (m_min, true))
6148 t = wide_int_to_tree (typ, wi::to_wide (m_max) + 1);
6149 else
6150 t = vrp_val_min (typ, true);
6151 }
6152 else
6153 t = m_min;
6154 return wi::to_wide (t);
6155 }
6156
6157 /* Return the upper bound for a sub-range. PAIR is the sub-range in
6158 question. */
6159
6160 wide_int
6161 value_range_base::upper_bound (unsigned pair) const
6162 {
6163 if (symbolic_p ())
6164 return normalize_symbolics ().upper_bound (pair);
6165
6166 gcc_checking_assert (!undefined_p ());
6167 gcc_checking_assert (pair + 1 <= num_pairs ());
6168 tree t = NULL;
6169 if (m_kind == VR_ANTI_RANGE)
6170 {
6171 tree typ = type ();
6172 if (pair == 1 || vrp_val_is_min (m_min, true))
6173 t = vrp_val_max (typ, true);
6174 else
6175 t = wide_int_to_tree (typ, wi::to_wide (m_min) - 1);
6176 }
6177 else
6178 t = m_max;
6179 return wi::to_wide (t);
6180 }
6181
6182 /* Return the highest bound in a range. */
6183
6184 wide_int
6185 value_range_base::upper_bound () const
6186 {
6187 unsigned pairs = num_pairs ();
6188 gcc_checking_assert (pairs > 0);
6189 return upper_bound (pairs - 1);
6190 }
6191
6192 /* Return TRUE if range contains INTEGER_CST. */
6193
6194 bool
6195 value_range_base::contains_p (tree cst) const
6196 {
6197 gcc_checking_assert (TREE_CODE (cst) == INTEGER_CST);
6198 if (symbolic_p ())
6199 return normalize_symbolics ().contains_p (cst);
6200 return value_inside_range (cst) == 1;
6201 }
6202
6203 /* Return the inverse of a range. */
6204
6205 void
6206 value_range_base::invert ()
6207 {
6208 if (m_kind == VR_RANGE)
6209 m_kind = VR_ANTI_RANGE;
6210 else if (m_kind == VR_ANTI_RANGE)
6211 m_kind = VR_RANGE;
6212 else
6213 gcc_unreachable ();
6214 }
6215
6216 /* Range union, but for references. */
6217
6218 void
6219 value_range_base::union_ (const value_range_base &r)
6220 {
6221 /* Disable details for now, because it makes the ranger dump
6222 unnecessarily verbose. */
6223 bool details = dump_flags & TDF_DETAILS;
6224 if (details)
6225 dump_flags &= ~TDF_DETAILS;
6226 union_ (&r);
6227 if (details)
6228 dump_flags |= TDF_DETAILS;
6229 }
6230
6231 /* Range intersect, but for references. */
6232
6233 void
6234 value_range_base::intersect (const value_range_base &r)
6235 {
6236 /* Disable details for now, because it makes the ranger dump
6237 unnecessarily verbose. */
6238 bool details = dump_flags & TDF_DETAILS;
6239 if (details)
6240 dump_flags &= ~TDF_DETAILS;
6241 intersect (&r);
6242 if (details)
6243 dump_flags |= TDF_DETAILS;
6244 }
6245
6246 /* Return TRUE if two types are compatible for range operations. */
6247
6248 static bool
6249 range_compatible_p (tree t1, tree t2)
6250 {
6251 if (POINTER_TYPE_P (t1) && POINTER_TYPE_P (t2))
6252 return true;
6253
6254 return types_compatible_p (t1, t2);
6255 }
6256
6257 bool
6258 value_range_base::operator== (const value_range_base &r) const
6259 {
6260 if (undefined_p ())
6261 return r.undefined_p ();
6262
6263 if (num_pairs () != r.num_pairs ()
6264 || !range_compatible_p (type (), r.type ()))
6265 return false;
6266
6267 for (unsigned p = 0; p < num_pairs (); p++)
6268 if (wi::ne_p (lower_bound (p), r.lower_bound (p))
6269 || wi::ne_p (upper_bound (p), r.upper_bound (p)))
6270 return false;
6271
6272 return true;
6273 }
6274
6275 /* Visit all arguments for PHI node PHI that flow through executable
6276 edges. If a valid value range can be derived from all the incoming
6277 value ranges, set a new range for the LHS of PHI. */
6278
6279 enum ssa_prop_result
6280 vrp_prop::visit_phi (gphi *phi)
6281 {
6282 tree lhs = PHI_RESULT (phi);
6283 value_range vr_result;
6284 extract_range_from_phi_node (phi, &vr_result);
6285 if (update_value_range (lhs, &vr_result))
6286 {
6287 if (dump_file && (dump_flags & TDF_DETAILS))
6288 {
6289 fprintf (dump_file, "Found new range for ");
6290 print_generic_expr (dump_file, lhs);
6291 fprintf (dump_file, ": ");
6292 dump_value_range (dump_file, &vr_result);
6293 fprintf (dump_file, "\n");
6294 }
6295
6296 if (vr_result.varying_p ())
6297 return SSA_PROP_VARYING;
6298
6299 return SSA_PROP_INTERESTING;
6300 }
6301
6302 /* Nothing changed, don't add outgoing edges. */
6303 return SSA_PROP_NOT_INTERESTING;
6304 }
6305
6306 class vrp_folder : public substitute_and_fold_engine
6307 {
6308 public:
6309 vrp_folder () : substitute_and_fold_engine (/* Fold all stmts. */ true) { }
6310 tree get_value (tree) FINAL OVERRIDE;
6311 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6312 bool fold_predicate_in (gimple_stmt_iterator *);
6313
6314 class vr_values *vr_values;
6315
6316 /* Delegators. */
6317 tree vrp_evaluate_conditional (tree_code code, tree op0,
6318 tree op1, gimple *stmt)
6319 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6320 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6321 { return vr_values->simplify_stmt_using_ranges (gsi); }
6322 tree op_with_constant_singleton_value_range (tree op)
6323 { return vr_values->op_with_constant_singleton_value_range (op); }
6324 };
6325
6326 /* If the statement pointed by SI has a predicate whose value can be
6327 computed using the value range information computed by VRP, compute
6328 its value and return true. Otherwise, return false. */
6329
6330 bool
6331 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6332 {
6333 bool assignment_p = false;
6334 tree val;
6335 gimple *stmt = gsi_stmt (*si);
6336
6337 if (is_gimple_assign (stmt)
6338 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6339 {
6340 assignment_p = true;
6341 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6342 gimple_assign_rhs1 (stmt),
6343 gimple_assign_rhs2 (stmt),
6344 stmt);
6345 }
6346 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6347 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6348 gimple_cond_lhs (cond_stmt),
6349 gimple_cond_rhs (cond_stmt),
6350 stmt);
6351 else
6352 return false;
6353
6354 if (val)
6355 {
6356 if (assignment_p)
6357 val = fold_convert (gimple_expr_type (stmt), val);
6358
6359 if (dump_file)
6360 {
6361 fprintf (dump_file, "Folding predicate ");
6362 print_gimple_expr (dump_file, stmt, 0);
6363 fprintf (dump_file, " to ");
6364 print_generic_expr (dump_file, val);
6365 fprintf (dump_file, "\n");
6366 }
6367
6368 if (is_gimple_assign (stmt))
6369 gimple_assign_set_rhs_from_tree (si, val);
6370 else
6371 {
6372 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6373 gcond *cond_stmt = as_a <gcond *> (stmt);
6374 if (integer_zerop (val))
6375 gimple_cond_make_false (cond_stmt);
6376 else if (integer_onep (val))
6377 gimple_cond_make_true (cond_stmt);
6378 else
6379 gcc_unreachable ();
6380 }
6381
6382 return true;
6383 }
6384
6385 return false;
6386 }
6387
6388 /* Callback for substitute_and_fold folding the stmt at *SI. */
6389
6390 bool
6391 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6392 {
6393 if (fold_predicate_in (si))
6394 return true;
6395
6396 return simplify_stmt_using_ranges (si);
6397 }
6398
6399 /* If OP has a value range with a single constant value return that,
6400 otherwise return NULL_TREE. This returns OP itself if OP is a
6401 constant.
6402
6403 Implemented as a pure wrapper right now, but this will change. */
6404
6405 tree
6406 vrp_folder::get_value (tree op)
6407 {
6408 return op_with_constant_singleton_value_range (op);
6409 }
6410
6411 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6412 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6413 BB. If no such ASSERT_EXPR is found, return OP. */
6414
6415 static tree
6416 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6417 {
6418 imm_use_iterator imm_iter;
6419 gimple *use_stmt;
6420 use_operand_p use_p;
6421
6422 if (TREE_CODE (op) == SSA_NAME)
6423 {
6424 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6425 {
6426 use_stmt = USE_STMT (use_p);
6427 if (use_stmt != stmt
6428 && gimple_assign_single_p (use_stmt)
6429 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6430 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6431 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6432 return gimple_assign_lhs (use_stmt);
6433 }
6434 }
6435 return op;
6436 }
6437
6438 /* A hack. */
6439 static class vr_values *x_vr_values;
6440
6441 /* A trivial wrapper so that we can present the generic jump threading
6442 code with a simple API for simplifying statements. STMT is the
6443 statement we want to simplify, WITHIN_STMT provides the location
6444 for any overflow warnings. */
6445
6446 static tree
6447 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6448 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6449 basic_block bb)
6450 {
6451 /* First see if the conditional is in the hash table. */
6452 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6453 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6454 return cached_lhs;
6455
6456 vr_values *vr_values = x_vr_values;
6457 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6458 {
6459 tree op0 = gimple_cond_lhs (cond_stmt);
6460 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6461
6462 tree op1 = gimple_cond_rhs (cond_stmt);
6463 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6464
6465 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6466 op0, op1, within_stmt);
6467 }
6468
6469 /* We simplify a switch statement by trying to determine which case label
6470 will be taken. If we are successful then we return the corresponding
6471 CASE_LABEL_EXPR. */
6472 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6473 {
6474 tree op = gimple_switch_index (switch_stmt);
6475 if (TREE_CODE (op) != SSA_NAME)
6476 return NULL_TREE;
6477
6478 op = lhs_of_dominating_assert (op, bb, stmt);
6479
6480 const value_range *vr = vr_values->get_value_range (op);
6481 if (vr->undefined_p ()
6482 || vr->varying_p ()
6483 || vr->symbolic_p ())
6484 return NULL_TREE;
6485
6486 if (vr->kind () == VR_RANGE)
6487 {
6488 size_t i, j;
6489 /* Get the range of labels that contain a part of the operand's
6490 value range. */
6491 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
6492
6493 /* Is there only one such label? */
6494 if (i == j)
6495 {
6496 tree label = gimple_switch_label (switch_stmt, i);
6497
6498 /* The i'th label will be taken only if the value range of the
6499 operand is entirely within the bounds of this label. */
6500 if (CASE_HIGH (label) != NULL_TREE
6501 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
6502 && tree_int_cst_compare (CASE_HIGH (label),
6503 vr->max ()) >= 0)
6504 : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
6505 && tree_int_cst_equal (vr->min (), vr->max ())))
6506 return label;
6507 }
6508
6509 /* If there are no such labels then the default label will be
6510 taken. */
6511 if (i > j)
6512 return gimple_switch_label (switch_stmt, 0);
6513 }
6514
6515 if (vr->kind () == VR_ANTI_RANGE)
6516 {
6517 unsigned n = gimple_switch_num_labels (switch_stmt);
6518 tree min_label = gimple_switch_label (switch_stmt, 1);
6519 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6520
6521 /* The default label will be taken only if the anti-range of the
6522 operand is entirely outside the bounds of all the (non-default)
6523 case labels. */
6524 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
6525 && (CASE_HIGH (max_label) != NULL_TREE
6526 ? tree_int_cst_compare (vr->max (),
6527 CASE_HIGH (max_label)) >= 0
6528 : tree_int_cst_compare (vr->max (),
6529 CASE_LOW (max_label)) >= 0))
6530 return gimple_switch_label (switch_stmt, 0);
6531 }
6532
6533 return NULL_TREE;
6534 }
6535
6536 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6537 {
6538 tree lhs = gimple_assign_lhs (assign_stmt);
6539 if (TREE_CODE (lhs) == SSA_NAME
6540 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6541 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6542 && stmt_interesting_for_vrp (stmt))
6543 {
6544 edge dummy_e;
6545 tree dummy_tree;
6546 value_range new_vr;
6547 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6548 &dummy_tree, &new_vr);
6549 tree singleton;
6550 if (new_vr.singleton_p (&singleton))
6551 return singleton;
6552 }
6553 }
6554
6555 return NULL_TREE;
6556 }
6557
6558 class vrp_dom_walker : public dom_walker
6559 {
6560 public:
6561 vrp_dom_walker (cdi_direction direction,
6562 class const_and_copies *const_and_copies,
6563 class avail_exprs_stack *avail_exprs_stack)
6564 : dom_walker (direction, REACHABLE_BLOCKS),
6565 m_const_and_copies (const_and_copies),
6566 m_avail_exprs_stack (avail_exprs_stack),
6567 m_dummy_cond (NULL) {}
6568
6569 virtual edge before_dom_children (basic_block);
6570 virtual void after_dom_children (basic_block);
6571
6572 class vr_values *vr_values;
6573
6574 private:
6575 class const_and_copies *m_const_and_copies;
6576 class avail_exprs_stack *m_avail_exprs_stack;
6577
6578 gcond *m_dummy_cond;
6579
6580 };
6581
6582 /* Called before processing dominator children of BB. We want to look
6583 at ASSERT_EXPRs and record information from them in the appropriate
6584 tables.
6585
6586 We could look at other statements here. It's not seen as likely
6587 to significantly increase the jump threads we discover. */
6588
6589 edge
6590 vrp_dom_walker::before_dom_children (basic_block bb)
6591 {
6592 gimple_stmt_iterator gsi;
6593
6594 m_avail_exprs_stack->push_marker ();
6595 m_const_and_copies->push_marker ();
6596 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6597 {
6598 gimple *stmt = gsi_stmt (gsi);
6599 if (gimple_assign_single_p (stmt)
6600 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6601 {
6602 tree rhs1 = gimple_assign_rhs1 (stmt);
6603 tree cond = TREE_OPERAND (rhs1, 1);
6604 tree inverted = invert_truthvalue (cond);
6605 vec<cond_equivalence> p;
6606 p.create (3);
6607 record_conditions (&p, cond, inverted);
6608 for (unsigned int i = 0; i < p.length (); i++)
6609 m_avail_exprs_stack->record_cond (&p[i]);
6610
6611 tree lhs = gimple_assign_lhs (stmt);
6612 m_const_and_copies->record_const_or_copy (lhs,
6613 TREE_OPERAND (rhs1, 0));
6614 p.release ();
6615 continue;
6616 }
6617 break;
6618 }
6619 return NULL;
6620 }
6621
6622 /* Called after processing dominator children of BB. This is where we
6623 actually call into the threader. */
6624 void
6625 vrp_dom_walker::after_dom_children (basic_block bb)
6626 {
6627 if (!m_dummy_cond)
6628 m_dummy_cond = gimple_build_cond (NE_EXPR,
6629 integer_zero_node, integer_zero_node,
6630 NULL, NULL);
6631
6632 x_vr_values = vr_values;
6633 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6634 m_avail_exprs_stack, NULL,
6635 simplify_stmt_for_jump_threading);
6636 x_vr_values = NULL;
6637
6638 m_avail_exprs_stack->pop_to_marker ();
6639 m_const_and_copies->pop_to_marker ();
6640 }
6641
6642 /* Blocks which have more than one predecessor and more than
6643 one successor present jump threading opportunities, i.e.,
6644 when the block is reached from a specific predecessor, we
6645 may be able to determine which of the outgoing edges will
6646 be traversed. When this optimization applies, we are able
6647 to avoid conditionals at runtime and we may expose secondary
6648 optimization opportunities.
6649
6650 This routine is effectively a driver for the generic jump
6651 threading code. It basically just presents the generic code
6652 with edges that may be suitable for jump threading.
6653
6654 Unlike DOM, we do not iterate VRP if jump threading was successful.
6655 While iterating may expose new opportunities for VRP, it is expected
6656 those opportunities would be very limited and the compile time cost
6657 to expose those opportunities would be significant.
6658
6659 As jump threading opportunities are discovered, they are registered
6660 for later realization. */
6661
6662 static void
6663 identify_jump_threads (class vr_values *vr_values)
6664 {
6665 /* Ugh. When substituting values earlier in this pass we can
6666 wipe the dominance information. So rebuild the dominator
6667 information as we need it within the jump threading code. */
6668 calculate_dominance_info (CDI_DOMINATORS);
6669
6670 /* We do not allow VRP information to be used for jump threading
6671 across a back edge in the CFG. Otherwise it becomes too
6672 difficult to avoid eliminating loop exit tests. Of course
6673 EDGE_DFS_BACK is not accurate at this time so we have to
6674 recompute it. */
6675 mark_dfs_back_edges ();
6676
6677 /* Allocate our unwinder stack to unwind any temporary equivalences
6678 that might be recorded. */
6679 const_and_copies *equiv_stack = new const_and_copies ();
6680
6681 hash_table<expr_elt_hasher> *avail_exprs
6682 = new hash_table<expr_elt_hasher> (1024);
6683 avail_exprs_stack *avail_exprs_stack
6684 = new class avail_exprs_stack (avail_exprs);
6685
6686 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6687 walker.vr_values = vr_values;
6688 walker.walk (cfun->cfg->x_entry_block_ptr);
6689
6690 /* We do not actually update the CFG or SSA graphs at this point as
6691 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6692 handle ASSERT_EXPRs gracefully. */
6693 delete equiv_stack;
6694 delete avail_exprs;
6695 delete avail_exprs_stack;
6696 }
6697
6698 /* Traverse all the blocks folding conditionals with known ranges. */
6699
6700 void
6701 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6702 {
6703 size_t i;
6704
6705 /* We have completed propagating through the lattice. */
6706 vr_values.set_lattice_propagation_complete ();
6707
6708 if (dump_file)
6709 {
6710 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6711 vr_values.dump_all_value_ranges (dump_file);
6712 fprintf (dump_file, "\n");
6713 }
6714
6715 /* Set value range to non pointer SSA_NAMEs. */
6716 for (i = 0; i < num_ssa_names; i++)
6717 {
6718 tree name = ssa_name (i);
6719 if (!name)
6720 continue;
6721
6722 const value_range *vr = get_value_range (name);
6723 if (!name || !vr->constant_p ())
6724 continue;
6725
6726 if (POINTER_TYPE_P (TREE_TYPE (name))
6727 && range_includes_zero_p (vr) == 0)
6728 set_ptr_nonnull (name);
6729 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6730 set_range_info (name, *vr);
6731 }
6732
6733 /* If we're checking array refs, we want to merge information on
6734 the executability of each edge between vrp_folder and the
6735 check_array_bounds_dom_walker: each can clear the
6736 EDGE_EXECUTABLE flag on edges, in different ways.
6737
6738 Hence, if we're going to call check_all_array_refs, set
6739 the flag on every edge now, rather than in
6740 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6741 it from some edges. */
6742 if (warn_array_bounds && warn_array_bounds_p)
6743 set_all_edges_as_executable (cfun);
6744
6745 class vrp_folder vrp_folder;
6746 vrp_folder.vr_values = &vr_values;
6747 vrp_folder.substitute_and_fold ();
6748
6749 if (warn_array_bounds && warn_array_bounds_p)
6750 check_all_array_refs ();
6751 }
6752
6753 /* Main entry point to VRP (Value Range Propagation). This pass is
6754 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6755 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6756 Programming Language Design and Implementation, pp. 67-78, 1995.
6757 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6758
6759 This is essentially an SSA-CCP pass modified to deal with ranges
6760 instead of constants.
6761
6762 While propagating ranges, we may find that two or more SSA name
6763 have equivalent, though distinct ranges. For instance,
6764
6765 1 x_9 = p_3->a;
6766 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6767 3 if (p_4 == q_2)
6768 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6769 5 endif
6770 6 if (q_2)
6771
6772 In the code above, pointer p_5 has range [q_2, q_2], but from the
6773 code we can also determine that p_5 cannot be NULL and, if q_2 had
6774 a non-varying range, p_5's range should also be compatible with it.
6775
6776 These equivalences are created by two expressions: ASSERT_EXPR and
6777 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6778 result of another assertion, then we can use the fact that p_5 and
6779 p_4 are equivalent when evaluating p_5's range.
6780
6781 Together with value ranges, we also propagate these equivalences
6782 between names so that we can take advantage of information from
6783 multiple ranges when doing final replacement. Note that this
6784 equivalency relation is transitive but not symmetric.
6785
6786 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6787 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6788 in contexts where that assertion does not hold (e.g., in line 6).
6789
6790 TODO, the main difference between this pass and Patterson's is that
6791 we do not propagate edge probabilities. We only compute whether
6792 edges can be taken or not. That is, instead of having a spectrum
6793 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6794 DON'T KNOW. In the future, it may be worthwhile to propagate
6795 probabilities to aid branch prediction. */
6796
6797 static unsigned int
6798 execute_vrp (bool warn_array_bounds_p)
6799 {
6800
6801 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6802 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6803 scev_initialize ();
6804
6805 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6806 Inserting assertions may split edges which will invalidate
6807 EDGE_DFS_BACK. */
6808 insert_range_assertions ();
6809
6810 threadedge_initialize_values ();
6811
6812 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6813 mark_dfs_back_edges ();
6814
6815 class vrp_prop vrp_prop;
6816 vrp_prop.vrp_initialize ();
6817 vrp_prop.ssa_propagate ();
6818 vrp_prop.vrp_finalize (warn_array_bounds_p);
6819
6820 /* We must identify jump threading opportunities before we release
6821 the datastructures built by VRP. */
6822 identify_jump_threads (&vrp_prop.vr_values);
6823
6824 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6825 was set by a type conversion can often be rewritten to use the
6826 RHS of the type conversion.
6827
6828 However, doing so inhibits jump threading through the comparison.
6829 So that transformation is not performed until after jump threading
6830 is complete. */
6831 basic_block bb;
6832 FOR_EACH_BB_FN (bb, cfun)
6833 {
6834 gimple *last = last_stmt (bb);
6835 if (last && gimple_code (last) == GIMPLE_COND)
6836 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6837 }
6838
6839 free_numbers_of_iterations_estimates (cfun);
6840
6841 /* ASSERT_EXPRs must be removed before finalizing jump threads
6842 as finalizing jump threads calls the CFG cleanup code which
6843 does not properly handle ASSERT_EXPRs. */
6844 remove_range_assertions ();
6845
6846 /* If we exposed any new variables, go ahead and put them into
6847 SSA form now, before we handle jump threading. This simplifies
6848 interactions between rewriting of _DECL nodes into SSA form
6849 and rewriting SSA_NAME nodes into SSA form after block
6850 duplication and CFG manipulation. */
6851 update_ssa (TODO_update_ssa);
6852
6853 /* We identified all the jump threading opportunities earlier, but could
6854 not transform the CFG at that time. This routine transforms the
6855 CFG and arranges for the dominator tree to be rebuilt if necessary.
6856
6857 Note the SSA graph update will occur during the normal TODO
6858 processing by the pass manager. */
6859 thread_through_all_blocks (false);
6860
6861 vrp_prop.vr_values.cleanup_edges_and_switches ();
6862 threadedge_finalize_values ();
6863
6864 scev_finalize ();
6865 loop_optimizer_finalize ();
6866 return 0;
6867 }
6868
6869 namespace {
6870
6871 const pass_data pass_data_vrp =
6872 {
6873 GIMPLE_PASS, /* type */
6874 "vrp", /* name */
6875 OPTGROUP_NONE, /* optinfo_flags */
6876 TV_TREE_VRP, /* tv_id */
6877 PROP_ssa, /* properties_required */
6878 0, /* properties_provided */
6879 0, /* properties_destroyed */
6880 0, /* todo_flags_start */
6881 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6882 };
6883
6884 class pass_vrp : public gimple_opt_pass
6885 {
6886 public:
6887 pass_vrp (gcc::context *ctxt)
6888 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6889 {}
6890
6891 /* opt_pass methods: */
6892 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6893 void set_pass_param (unsigned int n, bool param)
6894 {
6895 gcc_assert (n == 0);
6896 warn_array_bounds_p = param;
6897 }
6898 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6899 virtual unsigned int execute (function *)
6900 { return execute_vrp (warn_array_bounds_p); }
6901
6902 private:
6903 bool warn_array_bounds_p;
6904 }; // class pass_vrp
6905
6906 } // anon namespace
6907
6908 gimple_opt_pass *
6909 make_pass_vrp (gcc::context *ctxt)
6910 {
6911 return new pass_vrp (ctxt);
6912 }
6913
6914
6915 /* Worker for determine_value_range. */
6916
6917 static void
6918 determine_value_range_1 (value_range_base *vr, tree expr)
6919 {
6920 if (BINARY_CLASS_P (expr))
6921 {
6922 value_range_base vr0, vr1;
6923 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6924 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6925 range_fold_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6926 &vr0, &vr1);
6927 }
6928 else if (UNARY_CLASS_P (expr))
6929 {
6930 value_range_base vr0;
6931 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6932 range_fold_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6933 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6934 }
6935 else if (TREE_CODE (expr) == INTEGER_CST)
6936 vr->set (expr);
6937 else
6938 {
6939 value_range_kind kind;
6940 wide_int min, max;
6941 /* For SSA names try to extract range info computed by VRP. Otherwise
6942 fall back to varying. */
6943 if (TREE_CODE (expr) == SSA_NAME
6944 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6945 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6946 vr->set (kind, wide_int_to_tree (TREE_TYPE (expr), min),
6947 wide_int_to_tree (TREE_TYPE (expr), max));
6948 else
6949 vr->set_varying (TREE_TYPE (expr));
6950 }
6951 }
6952
6953 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6954 the determined range type. */
6955
6956 value_range_kind
6957 determine_value_range (tree expr, wide_int *min, wide_int *max)
6958 {
6959 value_range_base vr;
6960 determine_value_range_1 (&vr, expr);
6961 if (vr.constant_p ())
6962 {
6963 *min = wi::to_wide (vr.min ());
6964 *max = wi::to_wide (vr.max ());
6965 return vr.kind ();
6966 }
6967
6968 return VR_VARYING;
6969 }