Implement debugging functions for assert_info's.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "range-op.h"
71
72 static bool
73 ranges_from_anti_range (const value_range_base *ar,
74 value_range_base *vr0, value_range_base *vr1,
75 bool handle_pointers = false);
76
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
79 static sbitmap *live;
80
81 void
82 value_range::set_equiv (bitmap equiv)
83 {
84 if (undefined_p () || varying_p ())
85 equiv = NULL;
86 /* Since updating the equivalence set involves deep copying the
87 bitmaps, only do it if absolutely necessary.
88
89 All equivalence bitmaps are allocated from the same obstack. So
90 we can use the obstack associated with EQUIV to allocate vr->equiv. */
91 if (m_equiv == NULL
92 && equiv != NULL)
93 m_equiv = BITMAP_ALLOC (equiv->obstack);
94
95 if (equiv != m_equiv)
96 {
97 if (equiv && !bitmap_empty_p (equiv))
98 bitmap_copy (m_equiv, equiv);
99 else
100 bitmap_clear (m_equiv);
101 }
102 }
103
104 /* Initialize value_range. */
105
106 void
107 value_range::set (enum value_range_kind kind, tree min, tree max,
108 bitmap equiv)
109 {
110 value_range_base::set (kind, min, max);
111 set_equiv (equiv);
112 if (flag_checking)
113 check ();
114 }
115
116 value_range_base::value_range_base (value_range_kind kind, tree min, tree max)
117 {
118 set (kind, min, max);
119 }
120
121 value_range::value_range (value_range_kind kind, tree min, tree max,
122 bitmap equiv)
123 {
124 m_equiv = NULL;
125 set (kind, min, max, equiv);
126 }
127
128 value_range::value_range (const value_range_base &other)
129 {
130 m_equiv = NULL;
131 set (other.kind (), other.min(), other.max (), NULL);
132 }
133
134 value_range_base::value_range_base (tree type)
135 {
136 set_varying (type);
137 }
138
139 value_range_base::value_range_base (enum value_range_kind kind,
140 tree type,
141 const wide_int &wmin,
142 const wide_int &wmax)
143 {
144 tree min = wide_int_to_tree (type, wmin);
145 tree max = wide_int_to_tree (type, wmax);
146 gcc_checking_assert (kind == VR_RANGE || kind == VR_ANTI_RANGE);
147 set (kind, min, max);
148 }
149
150 value_range_base::value_range_base (tree type,
151 const wide_int &wmin,
152 const wide_int &wmax)
153 {
154 tree min = wide_int_to_tree (type, wmin);
155 tree max = wide_int_to_tree (type, wmax);
156 set (VR_RANGE, min, max);
157 }
158
159 value_range_base::value_range_base (tree min, tree max)
160 {
161 set (VR_RANGE, min, max);
162 }
163
164 /* Like set, but keep the equivalences in place. */
165
166 void
167 value_range::update (value_range_kind kind, tree min, tree max)
168 {
169 set (kind, min, max,
170 (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL);
171 }
172
173 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
174
175 Note: The code that avoids the bitmap sharing looks at the existing
176 this->m_equiv, so this function cannot be used to initalize an
177 object. Use the constructors for initialization. */
178
179 void
180 value_range::deep_copy (const value_range *from)
181 {
182 set (from->m_kind, from->min (), from->max (), from->m_equiv);
183 }
184
185 void
186 value_range::move (value_range *from)
187 {
188 set (from->m_kind, from->min (), from->max ());
189 m_equiv = from->m_equiv;
190 from->m_equiv = NULL;
191 }
192
193 /* Check the validity of the range. */
194
195 void
196 value_range_base::check ()
197 {
198 switch (m_kind)
199 {
200 case VR_RANGE:
201 case VR_ANTI_RANGE:
202 {
203 int cmp;
204
205 gcc_assert (m_min && m_max);
206
207 gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max));
208
209 /* Creating ~[-MIN, +MAX] is stupid because that would be
210 the empty set. */
211 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE)
212 gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max));
213
214 cmp = compare_values (m_min, m_max);
215 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
216 break;
217 }
218 case VR_UNDEFINED:
219 gcc_assert (!min () && !max ());
220 break;
221 case VR_VARYING:
222 gcc_assert (m_min && m_max);
223 break;
224 default:
225 gcc_unreachable ();
226 }
227 }
228
229 void
230 value_range::check ()
231 {
232 value_range_base::check ();
233 switch (m_kind)
234 {
235 case VR_UNDEFINED:
236 case VR_VARYING:
237 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
238 default:;
239 }
240 }
241
242 /* Equality operator. We purposely do not overload ==, to avoid
243 confusion with the equality bitmap in the derived value_range
244 class. */
245
246 bool
247 value_range_base::equal_p (const value_range_base &other) const
248 {
249 /* Ignore types for undefined. All undefines are equal. */
250 if (undefined_p ())
251 return m_kind == other.m_kind;
252
253 return (m_kind == other.m_kind
254 && vrp_operand_equal_p (m_min, other.m_min)
255 && vrp_operand_equal_p (m_max, other.m_max));
256 }
257
258 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
259 IGNORE_EQUIVS is TRUE. */
260
261 bool
262 value_range::equal_p (const value_range &other, bool ignore_equivs) const
263 {
264 return (value_range_base::equal_p (other)
265 && (ignore_equivs
266 || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
267 }
268
269 /* Return TRUE if this is a symbolic range. */
270
271 bool
272 value_range_base::symbolic_p () const
273 {
274 return (!varying_p ()
275 && !undefined_p ()
276 && (!is_gimple_min_invariant (m_min)
277 || !is_gimple_min_invariant (m_max)));
278 }
279
280 /* NOTE: This is not the inverse of symbolic_p because the range
281 could also be varying or undefined. Ideally they should be inverse
282 of each other, with varying only applying to symbolics. Varying of
283 constants would be represented as [-MIN, +MAX]. */
284
285 bool
286 value_range_base::constant_p () const
287 {
288 return (!varying_p ()
289 && !undefined_p ()
290 && TREE_CODE (m_min) == INTEGER_CST
291 && TREE_CODE (m_max) == INTEGER_CST);
292 }
293
294 void
295 value_range_base::set_undefined ()
296 {
297 m_kind = VR_UNDEFINED;
298 m_min = m_max = NULL;
299 }
300
301 void
302 value_range::set_undefined ()
303 {
304 set (VR_UNDEFINED, NULL, NULL, NULL);
305 }
306
307 void
308 value_range_base::set_varying (tree type)
309 {
310 m_kind = VR_VARYING;
311 if (supports_type_p (type))
312 {
313 m_min = vrp_val_min (type, true);
314 m_max = vrp_val_max (type, true);
315 }
316 else
317 /* We can't do anything range-wise with these types. */
318 m_min = m_max = error_mark_node;
319 }
320
321 void
322 value_range::set_varying (tree type)
323 {
324 value_range_base::set_varying (type);
325 equiv_clear ();
326 }
327
328 /* Return TRUE if it is possible that range contains VAL. */
329
330 bool
331 value_range_base::may_contain_p (tree val) const
332 {
333 return value_inside_range (val) != 0;
334 }
335
336 void
337 value_range::equiv_clear ()
338 {
339 if (m_equiv)
340 bitmap_clear (m_equiv);
341 }
342
343 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
344 bitmap. If no equivalence table has been created, OBSTACK is the
345 obstack to use (NULL for the default obstack).
346
347 This is the central point where equivalence processing can be
348 turned on/off. */
349
350 void
351 value_range::equiv_add (const_tree var,
352 const value_range *var_vr,
353 bitmap_obstack *obstack)
354 {
355 if (!m_equiv)
356 m_equiv = BITMAP_ALLOC (obstack);
357 unsigned ver = SSA_NAME_VERSION (var);
358 bitmap_set_bit (m_equiv, ver);
359 if (var_vr && var_vr->m_equiv)
360 bitmap_ior_into (m_equiv, var_vr->m_equiv);
361 }
362
363 /* If range is a singleton, place it in RESULT and return TRUE.
364 Note: A singleton can be any gimple invariant, not just constants.
365 So, [&x, &x] counts as a singleton. */
366
367 bool
368 value_range_base::singleton_p (tree *result) const
369 {
370 if (m_kind == VR_ANTI_RANGE)
371 {
372 if (nonzero_p ())
373 {
374 if (TYPE_PRECISION (type ()) == 1)
375 {
376 if (result)
377 *result = m_max;
378 return true;
379 }
380 return false;
381 }
382 if (num_pairs () == 1)
383 {
384 value_range_base vr0, vr1;
385 ranges_from_anti_range (this, &vr0, &vr1, true);
386 return vr0.singleton_p (result);
387 }
388 }
389 if (m_kind == VR_RANGE
390 && vrp_operand_equal_p (min (), max ())
391 && is_gimple_min_invariant (min ()))
392 {
393 if (result)
394 *result = min ();
395 return true;
396 }
397 return false;
398 }
399
400 tree
401 value_range_base::type () const
402 {
403 gcc_checking_assert (m_min);
404 return TREE_TYPE (min ());
405 }
406
407 void
408 value_range_base::dump (FILE *file) const
409 {
410 if (undefined_p ())
411 fprintf (file, "UNDEFINED");
412 else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
413 {
414 tree ttype = type ();
415
416 print_generic_expr (file, ttype);
417 fprintf (file, " ");
418
419 fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : "");
420
421 if (INTEGRAL_TYPE_P (ttype)
422 && !TYPE_UNSIGNED (ttype)
423 && vrp_val_is_min (min ())
424 && TYPE_PRECISION (ttype) != 1)
425 fprintf (file, "-INF");
426 else
427 print_generic_expr (file, min ());
428
429 fprintf (file, ", ");
430
431 if (supports_type_p (ttype)
432 && vrp_val_is_max (max (), true)
433 && TYPE_PRECISION (ttype) != 1)
434 fprintf (file, "+INF");
435 else
436 print_generic_expr (file, max ());
437
438 fprintf (file, "]");
439 }
440 else if (varying_p ())
441 {
442 print_generic_expr (file, type ());
443 fprintf (file, " VARYING");
444 }
445 else
446 gcc_unreachable ();
447 }
448
449 void
450 value_range_base::dump () const
451 {
452 dump (stderr);
453 }
454
455 void
456 value_range::dump (FILE *file) const
457 {
458 value_range_base::dump (file);
459 if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
460 && m_equiv)
461 {
462 bitmap_iterator bi;
463 unsigned i, c = 0;
464
465 fprintf (file, " EQUIVALENCES: { ");
466
467 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
468 {
469 print_generic_expr (file, ssa_name (i));
470 fprintf (file, " ");
471 c++;
472 }
473
474 fprintf (file, "} (%u elements)", c);
475 }
476 }
477
478 void
479 value_range::dump () const
480 {
481 dump (stderr);
482 }
483
484 void
485 dump_value_range (FILE *file, const value_range *vr)
486 {
487 if (!vr)
488 fprintf (file, "[]");
489 else
490 vr->dump (file);
491 }
492
493 void
494 dump_value_range (FILE *file, const value_range_base *vr)
495 {
496 if (!vr)
497 fprintf (file, "[]");
498 else
499 vr->dump (file);
500 }
501
502 DEBUG_FUNCTION void
503 debug (const value_range_base *vr)
504 {
505 dump_value_range (stderr, vr);
506 }
507
508 DEBUG_FUNCTION void
509 debug (const value_range_base &vr)
510 {
511 dump_value_range (stderr, &vr);
512 }
513
514 DEBUG_FUNCTION void
515 debug (const value_range *vr)
516 {
517 dump_value_range (stderr, vr);
518 }
519
520 DEBUG_FUNCTION void
521 debug (const value_range &vr)
522 {
523 dump_value_range (stderr, &vr);
524 }
525
526 /* Return true if the SSA name NAME is live on the edge E. */
527
528 static bool
529 live_on_edge (edge e, tree name)
530 {
531 return (live[e->dest->index]
532 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
533 }
534
535 /* Location information for ASSERT_EXPRs. Each instance of this
536 structure describes an ASSERT_EXPR for an SSA name. Since a single
537 SSA name may have more than one assertion associated with it, these
538 locations are kept in a linked list attached to the corresponding
539 SSA name. */
540 struct assert_locus
541 {
542 /* Basic block where the assertion would be inserted. */
543 basic_block bb;
544
545 /* Some assertions need to be inserted on an edge (e.g., assertions
546 generated by COND_EXPRs). In those cases, BB will be NULL. */
547 edge e;
548
549 /* Pointer to the statement that generated this assertion. */
550 gimple_stmt_iterator si;
551
552 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
553 enum tree_code comp_code;
554
555 /* Value being compared against. */
556 tree val;
557
558 /* Expression to compare. */
559 tree expr;
560
561 /* Next node in the linked list. */
562 assert_locus *next;
563 };
564
565 /* If bit I is present, it means that SSA name N_i has a list of
566 assertions that should be inserted in the IL. */
567 static bitmap need_assert_for;
568
569 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
570 holds a list of ASSERT_LOCUS_T nodes that describe where
571 ASSERT_EXPRs for SSA name N_I should be inserted. */
572 static assert_locus **asserts_for;
573
574 /* Return the maximum value for TYPE. */
575
576 tree
577 vrp_val_max (const_tree type, bool handle_pointers)
578 {
579 if (INTEGRAL_TYPE_P (type))
580 return TYPE_MAX_VALUE (type);
581 if (POINTER_TYPE_P (type) && handle_pointers)
582 {
583 wide_int max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
584 return wide_int_to_tree (const_cast<tree> (type), max);
585 }
586 return NULL_TREE;
587 }
588
589 /* Return the minimum value for TYPE. */
590
591 tree
592 vrp_val_min (const_tree type, bool handle_pointers)
593 {
594 if (INTEGRAL_TYPE_P (type))
595 return TYPE_MIN_VALUE (type);
596 if (POINTER_TYPE_P (type) && handle_pointers)
597 return build_zero_cst (const_cast<tree> (type));
598 return NULL_TREE;
599 }
600
601 /* Return whether VAL is equal to the maximum value of its type.
602 We can't do a simple equality comparison with TYPE_MAX_VALUE because
603 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
604 is not == to the integer constant with the same value in the type. */
605
606 bool
607 vrp_val_is_max (const_tree val, bool handle_pointers)
608 {
609 tree type_max = vrp_val_max (TREE_TYPE (val), handle_pointers);
610 return (val == type_max
611 || (type_max != NULL_TREE
612 && operand_equal_p (val, type_max, 0)));
613 }
614
615 /* Return whether VAL is equal to the minimum value of its type. */
616
617 bool
618 vrp_val_is_min (const_tree val, bool handle_pointers)
619 {
620 tree type_min = vrp_val_min (TREE_TYPE (val), handle_pointers);
621 return (val == type_min
622 || (type_min != NULL_TREE
623 && operand_equal_p (val, type_min, 0)));
624 }
625
626 /* VR_TYPE describes a range with mininum value *MIN and maximum
627 value *MAX. Restrict the range to the set of values that have
628 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
629 return the new range type.
630
631 SGN gives the sign of the values described by the range. */
632
633 enum value_range_kind
634 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
635 wide_int *min, wide_int *max,
636 const wide_int &nonzero_bits,
637 signop sgn)
638 {
639 if (vr_type == VR_ANTI_RANGE)
640 {
641 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
642 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
643 to create an inclusive upper bound for A and an inclusive lower
644 bound for B. */
645 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
646 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
647
648 /* If the calculation of A_MAX wrapped, A is effectively empty
649 and A_MAX is the highest value that satisfies NONZERO_BITS.
650 Likewise if the calculation of B_MIN wrapped, B is effectively
651 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
652 bool a_empty = wi::ge_p (a_max, *min, sgn);
653 bool b_empty = wi::le_p (b_min, *max, sgn);
654
655 /* If both A and B are empty, there are no valid values. */
656 if (a_empty && b_empty)
657 return VR_UNDEFINED;
658
659 /* If exactly one of A or B is empty, return a VR_RANGE for the
660 other one. */
661 if (a_empty || b_empty)
662 {
663 *min = b_min;
664 *max = a_max;
665 gcc_checking_assert (wi::le_p (*min, *max, sgn));
666 return VR_RANGE;
667 }
668
669 /* Update the VR_ANTI_RANGE bounds. */
670 *min = a_max + 1;
671 *max = b_min - 1;
672 gcc_checking_assert (wi::le_p (*min, *max, sgn));
673
674 /* Now check whether the excluded range includes any values that
675 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
676 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
677 {
678 unsigned int precision = min->get_precision ();
679 *min = wi::min_value (precision, sgn);
680 *max = wi::max_value (precision, sgn);
681 vr_type = VR_RANGE;
682 }
683 }
684 if (vr_type == VR_RANGE)
685 {
686 *max = wi::round_down_for_mask (*max, nonzero_bits);
687
688 /* Check that the range contains at least one valid value. */
689 if (wi::gt_p (*min, *max, sgn))
690 return VR_UNDEFINED;
691
692 *min = wi::round_up_for_mask (*min, nonzero_bits);
693 gcc_checking_assert (wi::le_p (*min, *max, sgn));
694 }
695 return vr_type;
696 }
697
698
699 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
700 This means adjusting VRTYPE, MIN and MAX representing the case of a
701 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
702 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
703 In corner cases where MAX+1 or MIN-1 wraps this will fall back
704 to varying.
705 This routine exists to ease canonicalization in the case where we
706 extract ranges from var + CST op limit. */
707
708 void
709 value_range_base::set (enum value_range_kind kind, tree min, tree max)
710 {
711 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
712 if (kind == VR_UNDEFINED)
713 {
714 set_undefined ();
715 return;
716 }
717 else if (kind == VR_VARYING)
718 {
719 gcc_assert (TREE_TYPE (min) == TREE_TYPE (max));
720 tree typ = TREE_TYPE (min);
721 if (supports_type_p (typ))
722 {
723 gcc_assert (vrp_val_min (typ, true));
724 gcc_assert (vrp_val_max (typ, true));
725 }
726 set_varying (typ);
727 return;
728 }
729
730 /* Convert POLY_INT_CST bounds into worst-case INTEGER_CST bounds. */
731 if (POLY_INT_CST_P (min))
732 {
733 tree type_min = vrp_val_min (TREE_TYPE (min), true);
734 widest_int lb
735 = constant_lower_bound_with_limit (wi::to_poly_widest (min),
736 wi::to_widest (type_min));
737 min = wide_int_to_tree (TREE_TYPE (min), lb);
738 }
739 if (POLY_INT_CST_P (max))
740 {
741 tree type_max = vrp_val_max (TREE_TYPE (max), true);
742 widest_int ub
743 = constant_upper_bound_with_limit (wi::to_poly_widest (max),
744 wi::to_widest (type_max));
745 max = wide_int_to_tree (TREE_TYPE (max), ub);
746 }
747
748 /* Nothing to canonicalize for symbolic ranges. */
749 if (TREE_CODE (min) != INTEGER_CST
750 || TREE_CODE (max) != INTEGER_CST)
751 {
752 m_kind = kind;
753 m_min = min;
754 m_max = max;
755 return;
756 }
757
758 /* Wrong order for min and max, to swap them and the VR type we need
759 to adjust them. */
760 if (tree_int_cst_lt (max, min))
761 {
762 tree one, tmp;
763
764 /* For one bit precision if max < min, then the swapped
765 range covers all values, so for VR_RANGE it is varying and
766 for VR_ANTI_RANGE empty range, so drop to varying as well. */
767 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
768 {
769 set_varying (TREE_TYPE (min));
770 return;
771 }
772
773 one = build_int_cst (TREE_TYPE (min), 1);
774 tmp = int_const_binop (PLUS_EXPR, max, one);
775 max = int_const_binop (MINUS_EXPR, min, one);
776 min = tmp;
777
778 /* There's one corner case, if we had [C+1, C] before we now have
779 that again. But this represents an empty value range, so drop
780 to varying in this case. */
781 if (tree_int_cst_lt (max, min))
782 {
783 set_varying (TREE_TYPE (min));
784 return;
785 }
786
787 kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
788 }
789
790 tree type = TREE_TYPE (min);
791
792 /* Anti-ranges that can be represented as ranges should be so. */
793 if (kind == VR_ANTI_RANGE)
794 {
795 /* For -fstrict-enums we may receive out-of-range ranges so consider
796 values < -INF and values > INF as -INF/INF as well. */
797 bool is_min = (INTEGRAL_TYPE_P (type)
798 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
799 bool is_max = (INTEGRAL_TYPE_P (type)
800 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
801
802 if (is_min && is_max)
803 {
804 /* We cannot deal with empty ranges, drop to varying.
805 ??? This could be VR_UNDEFINED instead. */
806 set_varying (type);
807 return;
808 }
809 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
810 && (is_min || is_max))
811 {
812 /* Non-empty boolean ranges can always be represented
813 as a singleton range. */
814 if (is_min)
815 min = max = vrp_val_max (TREE_TYPE (min));
816 else
817 min = max = vrp_val_min (TREE_TYPE (min));
818 kind = VR_RANGE;
819 }
820 else if (is_min
821 /* Allow non-zero pointers to be normalized to [1,MAX]. */
822 || (POINTER_TYPE_P (TREE_TYPE (min))
823 && integer_zerop (min)))
824 {
825 tree one = build_int_cst (TREE_TYPE (max), 1);
826 min = int_const_binop (PLUS_EXPR, max, one);
827 max = vrp_val_max (TREE_TYPE (max), true);
828 kind = VR_RANGE;
829 }
830 else if (is_max)
831 {
832 tree one = build_int_cst (TREE_TYPE (min), 1);
833 max = int_const_binop (MINUS_EXPR, min, one);
834 min = vrp_val_min (TREE_TYPE (min));
835 kind = VR_RANGE;
836 }
837 }
838
839 /* Normalize [MIN, MAX] into VARYING and ~[MIN, MAX] into UNDEFINED.
840
841 Avoid using TYPE_{MIN,MAX}_VALUE because -fstrict-enums can
842 restrict those to a subset of what actually fits in the type.
843 Instead use the extremes of the type precision which will allow
844 compare_range_with_value() to check if a value is inside a range,
845 whereas if we used TYPE_*_VAL, said function would just punt
846 upon seeing a VARYING. */
847 unsigned prec = TYPE_PRECISION (type);
848 signop sign = TYPE_SIGN (type);
849 if (wi::eq_p (wi::to_wide (min), wi::min_value (prec, sign))
850 && wi::eq_p (wi::to_wide (max), wi::max_value (prec, sign)))
851 {
852 if (kind == VR_RANGE)
853 set_varying (type);
854 else if (kind == VR_ANTI_RANGE)
855 set_undefined ();
856 else
857 gcc_unreachable ();
858 return;
859 }
860
861 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
862 to make sure VRP iteration terminates, otherwise we can get into
863 oscillations. */
864
865 m_kind = kind;
866 m_min = min;
867 m_max = max;
868 if (flag_checking)
869 check ();
870 }
871
872 void
873 value_range_base::set (tree val)
874 {
875 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
876 if (TREE_OVERFLOW_P (val))
877 val = drop_tree_overflow (val);
878 set (VR_RANGE, val, val);
879 }
880
881 void
882 value_range::set (tree val)
883 {
884 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
885 if (TREE_OVERFLOW_P (val))
886 val = drop_tree_overflow (val);
887 set (VR_RANGE, val, val, NULL);
888 }
889
890 /* Set value range VR to a nonzero range of type TYPE. */
891
892 void
893 value_range_base::set_nonzero (tree type)
894 {
895 tree zero = build_int_cst (type, 0);
896 set (VR_ANTI_RANGE, zero, zero);
897 }
898
899 /* Set value range VR to a ZERO range of type TYPE. */
900
901 void
902 value_range_base::set_zero (tree type)
903 {
904 set (build_int_cst (type, 0));
905 }
906
907 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
908
909 bool
910 vrp_operand_equal_p (const_tree val1, const_tree val2)
911 {
912 if (val1 == val2)
913 return true;
914 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
915 return false;
916 return true;
917 }
918
919 /* Return true, if the bitmaps B1 and B2 are equal. */
920
921 bool
922 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
923 {
924 return (b1 == b2
925 || ((!b1 || bitmap_empty_p (b1))
926 && (!b2 || bitmap_empty_p (b2)))
927 || (b1 && b2
928 && bitmap_equal_p (b1, b2)));
929 }
930
931 static bool
932 range_has_numeric_bounds_p (const value_range_base *vr)
933 {
934 return (vr->min ()
935 && TREE_CODE (vr->min ()) == INTEGER_CST
936 && TREE_CODE (vr->max ()) == INTEGER_CST);
937 }
938
939 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
940 a singleton. */
941
942 bool
943 range_int_cst_p (const value_range_base *vr)
944 {
945 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
946 }
947
948 /* Return true if VR is a INTEGER_CST singleton. */
949
950 bool
951 range_int_cst_singleton_p (const value_range_base *vr)
952 {
953 return (range_int_cst_p (vr)
954 && tree_int_cst_equal (vr->min (), vr->max ()));
955 }
956
957 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
958 otherwise. We only handle additive operations and set NEG to true if the
959 symbol is negated and INV to the invariant part, if any. */
960
961 tree
962 get_single_symbol (tree t, bool *neg, tree *inv)
963 {
964 bool neg_;
965 tree inv_;
966
967 *inv = NULL_TREE;
968 *neg = false;
969
970 if (TREE_CODE (t) == PLUS_EXPR
971 || TREE_CODE (t) == POINTER_PLUS_EXPR
972 || TREE_CODE (t) == MINUS_EXPR)
973 {
974 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
975 {
976 neg_ = (TREE_CODE (t) == MINUS_EXPR);
977 inv_ = TREE_OPERAND (t, 0);
978 t = TREE_OPERAND (t, 1);
979 }
980 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
981 {
982 neg_ = false;
983 inv_ = TREE_OPERAND (t, 1);
984 t = TREE_OPERAND (t, 0);
985 }
986 else
987 return NULL_TREE;
988 }
989 else
990 {
991 neg_ = false;
992 inv_ = NULL_TREE;
993 }
994
995 if (TREE_CODE (t) == NEGATE_EXPR)
996 {
997 t = TREE_OPERAND (t, 0);
998 neg_ = !neg_;
999 }
1000
1001 if (TREE_CODE (t) != SSA_NAME)
1002 return NULL_TREE;
1003
1004 if (inv_ && TREE_OVERFLOW_P (inv_))
1005 inv_ = drop_tree_overflow (inv_);
1006
1007 *neg = neg_;
1008 *inv = inv_;
1009 return t;
1010 }
1011
1012 /* The reverse operation: build a symbolic expression with TYPE
1013 from symbol SYM, negated according to NEG, and invariant INV. */
1014
1015 static tree
1016 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
1017 {
1018 const bool pointer_p = POINTER_TYPE_P (type);
1019 tree t = sym;
1020
1021 if (neg)
1022 t = build1 (NEGATE_EXPR, type, t);
1023
1024 if (integer_zerop (inv))
1025 return t;
1026
1027 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
1028 }
1029
1030 /* Return
1031 1 if VAL < VAL2
1032 0 if !(VAL < VAL2)
1033 -2 if those are incomparable. */
1034 int
1035 operand_less_p (tree val, tree val2)
1036 {
1037 /* LT is folded faster than GE and others. Inline the common case. */
1038 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1039 return tree_int_cst_lt (val, val2);
1040 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
1041 return val == val2 ? 0 : -2;
1042 else
1043 {
1044 int cmp = compare_values (val, val2);
1045 if (cmp == -1)
1046 return 1;
1047 else if (cmp == 0 || cmp == 1)
1048 return 0;
1049 else
1050 return -2;
1051 }
1052
1053 return 0;
1054 }
1055
1056 /* Compare two values VAL1 and VAL2. Return
1057
1058 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1059 -1 if VAL1 < VAL2,
1060 0 if VAL1 == VAL2,
1061 +1 if VAL1 > VAL2, and
1062 +2 if VAL1 != VAL2
1063
1064 This is similar to tree_int_cst_compare but supports pointer values
1065 and values that cannot be compared at compile time.
1066
1067 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1068 true if the return value is only valid if we assume that signed
1069 overflow is undefined. */
1070
1071 int
1072 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1073 {
1074 if (val1 == val2)
1075 return 0;
1076
1077 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1078 both integers. */
1079 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1080 == POINTER_TYPE_P (TREE_TYPE (val2)));
1081
1082 /* Convert the two values into the same type. This is needed because
1083 sizetype causes sign extension even for unsigned types. */
1084 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
1085 val2 = fold_convert (TREE_TYPE (val1), val2);
1086
1087 const bool overflow_undefined
1088 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1089 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1090 tree inv1, inv2;
1091 bool neg1, neg2;
1092 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1093 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1094
1095 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1096 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1097 if (sym1 && sym2)
1098 {
1099 /* Both values must use the same name with the same sign. */
1100 if (sym1 != sym2 || neg1 != neg2)
1101 return -2;
1102
1103 /* [-]NAME + CST == [-]NAME + CST. */
1104 if (inv1 == inv2)
1105 return 0;
1106
1107 /* If overflow is defined we cannot simplify more. */
1108 if (!overflow_undefined)
1109 return -2;
1110
1111 if (strict_overflow_p != NULL
1112 /* Symbolic range building sets TREE_NO_WARNING to declare
1113 that overflow doesn't happen. */
1114 && (!inv1 || !TREE_NO_WARNING (val1))
1115 && (!inv2 || !TREE_NO_WARNING (val2)))
1116 *strict_overflow_p = true;
1117
1118 if (!inv1)
1119 inv1 = build_int_cst (TREE_TYPE (val1), 0);
1120 if (!inv2)
1121 inv2 = build_int_cst (TREE_TYPE (val2), 0);
1122
1123 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
1124 TYPE_SIGN (TREE_TYPE (val1)));
1125 }
1126
1127 const bool cst1 = is_gimple_min_invariant (val1);
1128 const bool cst2 = is_gimple_min_invariant (val2);
1129
1130 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1131 it might be possible to say something depending on the constants. */
1132 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1133 {
1134 if (!overflow_undefined)
1135 return -2;
1136
1137 if (strict_overflow_p != NULL
1138 /* Symbolic range building sets TREE_NO_WARNING to declare
1139 that overflow doesn't happen. */
1140 && (!sym1 || !TREE_NO_WARNING (val1))
1141 && (!sym2 || !TREE_NO_WARNING (val2)))
1142 *strict_overflow_p = true;
1143
1144 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1145 tree cst = cst1 ? val1 : val2;
1146 tree inv = cst1 ? inv2 : inv1;
1147
1148 /* Compute the difference between the constants. If it overflows or
1149 underflows, this means that we can trivially compare the NAME with
1150 it and, consequently, the two values with each other. */
1151 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
1152 if (wi::cmp (0, wi::to_wide (inv), sgn)
1153 != wi::cmp (diff, wi::to_wide (cst), sgn))
1154 {
1155 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
1156 return cst1 ? res : -res;
1157 }
1158
1159 return -2;
1160 }
1161
1162 /* We cannot say anything more for non-constants. */
1163 if (!cst1 || !cst2)
1164 return -2;
1165
1166 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1167 {
1168 /* We cannot compare overflowed values. */
1169 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1170 return -2;
1171
1172 if (TREE_CODE (val1) == INTEGER_CST
1173 && TREE_CODE (val2) == INTEGER_CST)
1174 return tree_int_cst_compare (val1, val2);
1175
1176 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
1177 {
1178 if (known_eq (wi::to_poly_widest (val1),
1179 wi::to_poly_widest (val2)))
1180 return 0;
1181 if (known_lt (wi::to_poly_widest (val1),
1182 wi::to_poly_widest (val2)))
1183 return -1;
1184 if (known_gt (wi::to_poly_widest (val1),
1185 wi::to_poly_widest (val2)))
1186 return 1;
1187 }
1188
1189 return -2;
1190 }
1191 else
1192 {
1193 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1194 {
1195 /* We cannot compare overflowed values. */
1196 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1197 return -2;
1198
1199 return tree_int_cst_compare (val1, val2);
1200 }
1201
1202 /* First see if VAL1 and VAL2 are not the same. */
1203 if (operand_equal_p (val1, val2, 0))
1204 return 0;
1205
1206 fold_defer_overflow_warnings ();
1207
1208 /* If VAL1 is a lower address than VAL2, return -1. */
1209 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
1210 if (t && integer_onep (t))
1211 {
1212 fold_undefer_and_ignore_overflow_warnings ();
1213 return -1;
1214 }
1215
1216 /* If VAL1 is a higher address than VAL2, return +1. */
1217 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
1218 if (t && integer_onep (t))
1219 {
1220 fold_undefer_and_ignore_overflow_warnings ();
1221 return 1;
1222 }
1223
1224 /* If VAL1 is different than VAL2, return +2. */
1225 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1226 fold_undefer_and_ignore_overflow_warnings ();
1227 if (t && integer_onep (t))
1228 return 2;
1229
1230 return -2;
1231 }
1232 }
1233
1234 /* Compare values like compare_values_warnv. */
1235
1236 int
1237 compare_values (tree val1, tree val2)
1238 {
1239 bool sop;
1240 return compare_values_warnv (val1, val2, &sop);
1241 }
1242
1243
1244 /* Return 1 if VAL is inside value range.
1245 0 if VAL is not inside value range.
1246 -2 if we cannot tell either way.
1247
1248 Benchmark compile/20001226-1.c compilation time after changing this
1249 function. */
1250
1251 int
1252 value_range_base::value_inside_range (tree val) const
1253 {
1254 int cmp1, cmp2;
1255
1256 if (varying_p ())
1257 return 1;
1258
1259 if (undefined_p ())
1260 return 0;
1261
1262 cmp1 = operand_less_p (val, m_min);
1263 if (cmp1 == -2)
1264 return -2;
1265 if (cmp1 == 1)
1266 return m_kind != VR_RANGE;
1267
1268 cmp2 = operand_less_p (m_max, val);
1269 if (cmp2 == -2)
1270 return -2;
1271
1272 if (m_kind == VR_RANGE)
1273 return !cmp2;
1274 else
1275 return !!cmp2;
1276 }
1277
1278 /* For range [LB, UB] compute two wide_int bit masks.
1279
1280 In the MAY_BE_NONZERO bit mask, if some bit is unset, it means that
1281 for all numbers in the range the bit is 0, otherwise it might be 0
1282 or 1.
1283
1284 In the MUST_BE_NONZERO bit mask, if some bit is set, it means that
1285 for all numbers in the range the bit is 1, otherwise it might be 0
1286 or 1. */
1287
1288 static inline void
1289 wide_int_range_set_zero_nonzero_bits (signop sign,
1290 const wide_int &lb, const wide_int &ub,
1291 wide_int &may_be_nonzero,
1292 wide_int &must_be_nonzero)
1293 {
1294 may_be_nonzero = wi::minus_one (lb.get_precision ());
1295 must_be_nonzero = wi::zero (lb.get_precision ());
1296
1297 if (wi::eq_p (lb, ub))
1298 {
1299 may_be_nonzero = lb;
1300 must_be_nonzero = may_be_nonzero;
1301 }
1302 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
1303 {
1304 wide_int xor_mask = lb ^ ub;
1305 may_be_nonzero = lb | ub;
1306 must_be_nonzero = lb & ub;
1307 if (xor_mask != 0)
1308 {
1309 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1310 may_be_nonzero.get_precision ());
1311 may_be_nonzero = may_be_nonzero | mask;
1312 must_be_nonzero = wi::bit_and_not (must_be_nonzero, mask);
1313 }
1314 }
1315 }
1316
1317 /* value_range wrapper for wide_int_range_set_zero_nonzero_bits above.
1318
1319 Return TRUE if VR was a constant range and we were able to compute
1320 the bit masks. */
1321
1322 bool
1323 vrp_set_zero_nonzero_bits (const tree expr_type,
1324 const value_range_base *vr,
1325 wide_int *may_be_nonzero,
1326 wide_int *must_be_nonzero)
1327 {
1328 if (!range_int_cst_p (vr))
1329 {
1330 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1331 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1332 return false;
1333 }
1334 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
1335 wi::to_wide (vr->min ()),
1336 wi::to_wide (vr->max ()),
1337 *may_be_nonzero, *must_be_nonzero);
1338 return true;
1339 }
1340
1341 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1342 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1343 false otherwise. If *AR can be represented with a single range
1344 *VR1 will be VR_UNDEFINED. */
1345
1346 static bool
1347 ranges_from_anti_range (const value_range_base *ar,
1348 value_range_base *vr0, value_range_base *vr1,
1349 bool handle_pointers)
1350 {
1351 tree type = ar->type ();
1352
1353 vr0->set_undefined ();
1354 vr1->set_undefined ();
1355
1356 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1357 [A+1, +INF]. Not sure if this helps in practice, though. */
1358
1359 if (ar->kind () != VR_ANTI_RANGE
1360 || TREE_CODE (ar->min ()) != INTEGER_CST
1361 || TREE_CODE (ar->max ()) != INTEGER_CST
1362 || !vrp_val_min (type, handle_pointers)
1363 || !vrp_val_max (type, handle_pointers))
1364 return false;
1365
1366 if (tree_int_cst_lt (vrp_val_min (type, handle_pointers), ar->min ()))
1367 vr0->set (VR_RANGE,
1368 vrp_val_min (type, handle_pointers),
1369 wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1));
1370 if (tree_int_cst_lt (ar->max (), vrp_val_max (type, handle_pointers)))
1371 vr1->set (VR_RANGE,
1372 wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1),
1373 vrp_val_max (type, handle_pointers));
1374 if (vr0->undefined_p ())
1375 {
1376 *vr0 = *vr1;
1377 vr1->set_undefined ();
1378 }
1379
1380 return !vr0->undefined_p ();
1381 }
1382
1383 /* If BOUND will include a symbolic bound, adjust it accordingly,
1384 otherwise leave it as is.
1385
1386 CODE is the original operation that combined the bounds (PLUS_EXPR
1387 or MINUS_EXPR).
1388
1389 TYPE is the type of the original operation.
1390
1391 SYM_OPn is the symbolic for OPn if it has a symbolic.
1392
1393 NEG_OPn is TRUE if the OPn was negated. */
1394
1395 static void
1396 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1397 tree sym_op0, tree sym_op1,
1398 bool neg_op0, bool neg_op1)
1399 {
1400 bool minus_p = (code == MINUS_EXPR);
1401 /* If the result bound is constant, we're done; otherwise, build the
1402 symbolic lower bound. */
1403 if (sym_op0 == sym_op1)
1404 ;
1405 else if (sym_op0)
1406 bound = build_symbolic_expr (type, sym_op0,
1407 neg_op0, bound);
1408 else if (sym_op1)
1409 {
1410 /* We may not negate if that might introduce
1411 undefined overflow. */
1412 if (!minus_p
1413 || neg_op1
1414 || TYPE_OVERFLOW_WRAPS (type))
1415 bound = build_symbolic_expr (type, sym_op1,
1416 neg_op1 ^ minus_p, bound);
1417 else
1418 bound = NULL_TREE;
1419 }
1420 }
1421
1422 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1423 int bound according to CODE. CODE is the operation combining the
1424 bound (either a PLUS_EXPR or a MINUS_EXPR).
1425
1426 TYPE is the type of the combine operation.
1427
1428 WI is the wide int to store the result.
1429
1430 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1431 if over/underflow occurred. */
1432
1433 static void
1434 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1435 tree type, tree op0, tree op1)
1436 {
1437 bool minus_p = (code == MINUS_EXPR);
1438 const signop sgn = TYPE_SIGN (type);
1439 const unsigned int prec = TYPE_PRECISION (type);
1440
1441 /* Combine the bounds, if any. */
1442 if (op0 && op1)
1443 {
1444 if (minus_p)
1445 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1446 else
1447 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1448 }
1449 else if (op0)
1450 wi = wi::to_wide (op0);
1451 else if (op1)
1452 {
1453 if (minus_p)
1454 wi = wi::neg (wi::to_wide (op1), &ovf);
1455 else
1456 wi = wi::to_wide (op1);
1457 }
1458 else
1459 wi = wi::shwi (0, prec);
1460 }
1461
1462 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1463 put the result in VR.
1464
1465 TYPE is the type of the range.
1466
1467 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1468 occurred while originally calculating WMIN or WMAX. -1 indicates
1469 underflow. +1 indicates overflow. 0 indicates neither. */
1470
1471 static void
1472 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
1473 tree type,
1474 const wide_int &wmin, const wide_int &wmax,
1475 wi::overflow_type min_ovf,
1476 wi::overflow_type max_ovf)
1477 {
1478 const signop sgn = TYPE_SIGN (type);
1479 const unsigned int prec = TYPE_PRECISION (type);
1480
1481 /* For one bit precision if max < min, then the swapped
1482 range covers all values. */
1483 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
1484 {
1485 kind = VR_VARYING;
1486 return;
1487 }
1488
1489 if (TYPE_OVERFLOW_WRAPS (type))
1490 {
1491 /* If overflow wraps, truncate the values and adjust the
1492 range kind and bounds appropriately. */
1493 wide_int tmin = wide_int::from (wmin, prec, sgn);
1494 wide_int tmax = wide_int::from (wmax, prec, sgn);
1495 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1496 {
1497 /* If the limits are swapped, we wrapped around and cover
1498 the entire range. */
1499 if (wi::gt_p (tmin, tmax, sgn))
1500 kind = VR_VARYING;
1501 else
1502 {
1503 kind = VR_RANGE;
1504 /* No overflow or both overflow or underflow. The
1505 range kind stays VR_RANGE. */
1506 min = wide_int_to_tree (type, tmin);
1507 max = wide_int_to_tree (type, tmax);
1508 }
1509 return;
1510 }
1511 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1512 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1513 {
1514 /* Min underflow or max overflow. The range kind
1515 changes to VR_ANTI_RANGE. */
1516 bool covers = false;
1517 wide_int tem = tmin;
1518 tmin = tmax + 1;
1519 if (wi::cmp (tmin, tmax, sgn) < 0)
1520 covers = true;
1521 tmax = tem - 1;
1522 if (wi::cmp (tmax, tem, sgn) > 0)
1523 covers = true;
1524 /* If the anti-range would cover nothing, drop to varying.
1525 Likewise if the anti-range bounds are outside of the
1526 types values. */
1527 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1528 {
1529 kind = VR_VARYING;
1530 return;
1531 }
1532 kind = VR_ANTI_RANGE;
1533 min = wide_int_to_tree (type, tmin);
1534 max = wide_int_to_tree (type, tmax);
1535 return;
1536 }
1537 else
1538 {
1539 /* Other underflow and/or overflow, drop to VR_VARYING. */
1540 kind = VR_VARYING;
1541 return;
1542 }
1543 }
1544 else
1545 {
1546 /* If overflow does not wrap, saturate to the types min/max
1547 value. */
1548 wide_int type_min = wi::min_value (prec, sgn);
1549 wide_int type_max = wi::max_value (prec, sgn);
1550 kind = VR_RANGE;
1551 if (min_ovf == wi::OVF_UNDERFLOW)
1552 min = wide_int_to_tree (type, type_min);
1553 else if (min_ovf == wi::OVF_OVERFLOW)
1554 min = wide_int_to_tree (type, type_max);
1555 else
1556 min = wide_int_to_tree (type, wmin);
1557
1558 if (max_ovf == wi::OVF_UNDERFLOW)
1559 max = wide_int_to_tree (type, type_min);
1560 else if (max_ovf == wi::OVF_OVERFLOW)
1561 max = wide_int_to_tree (type, type_max);
1562 else
1563 max = wide_int_to_tree (type, wmax);
1564 }
1565 }
1566
1567 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
1568
1569 static void
1570 extract_range_from_pointer_plus_expr (value_range_base *vr,
1571 enum tree_code code,
1572 tree expr_type,
1573 const value_range_base *vr0,
1574 const value_range_base *vr1)
1575 {
1576 gcc_checking_assert (POINTER_TYPE_P (expr_type)
1577 && code == POINTER_PLUS_EXPR);
1578 /* For pointer types, we are really only interested in asserting
1579 whether the expression evaluates to non-NULL.
1580 With -fno-delete-null-pointer-checks we need to be more
1581 conservative. As some object might reside at address 0,
1582 then some offset could be added to it and the same offset
1583 subtracted again and the result would be NULL.
1584 E.g.
1585 static int a[12]; where &a[0] is NULL and
1586 ptr = &a[6];
1587 ptr -= 6;
1588 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1589 where the first range doesn't include zero and the second one
1590 doesn't either. As the second operand is sizetype (unsigned),
1591 consider all ranges where the MSB could be set as possible
1592 subtractions where the result might be NULL. */
1593 if ((!range_includes_zero_p (vr0)
1594 || !range_includes_zero_p (vr1))
1595 && !TYPE_OVERFLOW_WRAPS (expr_type)
1596 && (flag_delete_null_pointer_checks
1597 || (range_int_cst_p (vr1)
1598 && !tree_int_cst_sign_bit (vr1->max ()))))
1599 vr->set_nonzero (expr_type);
1600 else if (vr0->zero_p () && vr1->zero_p ())
1601 vr->set_zero (expr_type);
1602 else
1603 vr->set_varying (expr_type);
1604 }
1605
1606 /* Extract range information from a PLUS/MINUS_EXPR and store the
1607 result in *VR. */
1608
1609 static void
1610 extract_range_from_plus_minus_expr (value_range_base *vr,
1611 enum tree_code code,
1612 tree expr_type,
1613 const value_range_base *vr0_,
1614 const value_range_base *vr1_)
1615 {
1616 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
1617
1618 value_range_base vr0 = *vr0_, vr1 = *vr1_;
1619 value_range_base vrtem0, vrtem1;
1620
1621 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1622 and express ~[] op X as ([]' op X) U ([]'' op X). */
1623 if (vr0.kind () == VR_ANTI_RANGE
1624 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1625 {
1626 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
1627 if (!vrtem1.undefined_p ())
1628 {
1629 value_range_base vrres;
1630 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
1631 &vrtem1, vr1_);
1632 vr->union_ (&vrres);
1633 }
1634 return;
1635 }
1636 /* Likewise for X op ~[]. */
1637 if (vr1.kind () == VR_ANTI_RANGE
1638 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1639 {
1640 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
1641 if (!vrtem1.undefined_p ())
1642 {
1643 value_range_base vrres;
1644 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
1645 vr0_, &vrtem1);
1646 vr->union_ (&vrres);
1647 }
1648 return;
1649 }
1650
1651 value_range_kind kind;
1652 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
1653 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
1654 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
1655 tree min = NULL_TREE, max = NULL_TREE;
1656
1657 /* This will normalize things such that calculating
1658 [0,0] - VR_VARYING is not dropped to varying, but is
1659 calculated as [MIN+1, MAX]. */
1660 if (vr0.varying_p ())
1661 {
1662 vr0_kind = VR_RANGE;
1663 vr0_min = vrp_val_min (expr_type);
1664 vr0_max = vrp_val_max (expr_type);
1665 }
1666 if (vr1.varying_p ())
1667 {
1668 vr1_kind = VR_RANGE;
1669 vr1_min = vrp_val_min (expr_type);
1670 vr1_max = vrp_val_max (expr_type);
1671 }
1672
1673 const bool minus_p = (code == MINUS_EXPR);
1674 tree min_op0 = vr0_min;
1675 tree min_op1 = minus_p ? vr1_max : vr1_min;
1676 tree max_op0 = vr0_max;
1677 tree max_op1 = minus_p ? vr1_min : vr1_max;
1678 tree sym_min_op0 = NULL_TREE;
1679 tree sym_min_op1 = NULL_TREE;
1680 tree sym_max_op0 = NULL_TREE;
1681 tree sym_max_op1 = NULL_TREE;
1682 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1683
1684 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1685
1686 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1687 single-symbolic ranges, try to compute the precise resulting range,
1688 but only if we know that this resulting range will also be constant
1689 or single-symbolic. */
1690 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
1691 && (TREE_CODE (min_op0) == INTEGER_CST
1692 || (sym_min_op0
1693 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1694 && (TREE_CODE (min_op1) == INTEGER_CST
1695 || (sym_min_op1
1696 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1697 && (!(sym_min_op0 && sym_min_op1)
1698 || (sym_min_op0 == sym_min_op1
1699 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1700 && (TREE_CODE (max_op0) == INTEGER_CST
1701 || (sym_max_op0
1702 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1703 && (TREE_CODE (max_op1) == INTEGER_CST
1704 || (sym_max_op1
1705 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1706 && (!(sym_max_op0 && sym_max_op1)
1707 || (sym_max_op0 == sym_max_op1
1708 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1709 {
1710 wide_int wmin, wmax;
1711 wi::overflow_type min_ovf = wi::OVF_NONE;
1712 wi::overflow_type max_ovf = wi::OVF_NONE;
1713
1714 /* Build the bounds. */
1715 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1716 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1717
1718 /* If the resulting range will be symbolic, we need to eliminate any
1719 explicit or implicit overflow introduced in the above computation
1720 because compare_values could make an incorrect use of it. That's
1721 why we require one of the ranges to be a singleton. */
1722 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
1723 && ((bool)min_ovf || (bool)max_ovf
1724 || (min_op0 != max_op0 && min_op1 != max_op1)))
1725 {
1726 vr->set_varying (expr_type);
1727 return;
1728 }
1729
1730 /* Adjust the range for possible overflow. */
1731 set_value_range_with_overflow (kind, min, max, expr_type,
1732 wmin, wmax, min_ovf, max_ovf);
1733 if (kind == VR_VARYING)
1734 {
1735 vr->set_varying (expr_type);
1736 return;
1737 }
1738
1739 /* Build the symbolic bounds if needed. */
1740 adjust_symbolic_bound (min, code, expr_type,
1741 sym_min_op0, sym_min_op1,
1742 neg_min_op0, neg_min_op1);
1743 adjust_symbolic_bound (max, code, expr_type,
1744 sym_max_op0, sym_max_op1,
1745 neg_max_op0, neg_max_op1);
1746 }
1747 else
1748 {
1749 /* For other cases, for example if we have a PLUS_EXPR with two
1750 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1751 to compute a precise range for such a case.
1752 ??? General even mixed range kind operations can be expressed
1753 by for example transforming ~[3, 5] + [1, 2] to range-only
1754 operations and a union primitive:
1755 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1756 [-INF+1, 4] U [6, +INF(OVF)]
1757 though usually the union is not exactly representable with
1758 a single range or anti-range as the above is
1759 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1760 but one could use a scheme similar to equivalences for this. */
1761 vr->set_varying (expr_type);
1762 return;
1763 }
1764
1765 /* If either MIN or MAX overflowed, then set the resulting range to
1766 VARYING. */
1767 if (min == NULL_TREE
1768 || TREE_OVERFLOW_P (min)
1769 || max == NULL_TREE
1770 || TREE_OVERFLOW_P (max))
1771 {
1772 vr->set_varying (expr_type);
1773 return;
1774 }
1775
1776 int cmp = compare_values (min, max);
1777 if (cmp == -2 || cmp == 1)
1778 {
1779 /* If the new range has its limits swapped around (MIN > MAX),
1780 then the operation caused one of them to wrap around, mark
1781 the new range VARYING. */
1782 vr->set_varying (expr_type);
1783 }
1784 else
1785 vr->set (kind, min, max);
1786 }
1787
1788 /* Return the range-ops handler for CODE and EXPR_TYPE. If no
1789 suitable operator is found, return NULL and set VR to VARYING. */
1790
1791 static const range_operator *
1792 get_range_op_handler (value_range_base *vr,
1793 enum tree_code code,
1794 tree expr_type)
1795 {
1796 const range_operator *op = range_op_handler (code, expr_type);
1797 if (!op)
1798 vr->set_varying (expr_type);
1799 return op;
1800 }
1801
1802 /* If the types passed are supported, return TRUE, otherwise set VR to
1803 VARYING and return FALSE. */
1804
1805 static bool
1806 supported_types_p (value_range_base *vr,
1807 tree type0,
1808 tree type1 = NULL)
1809 {
1810 if (!value_range_base::supports_type_p (type0)
1811 || (type1 && !value_range_base::supports_type_p (type1)))
1812 {
1813 vr->set_varying (type0);
1814 return false;
1815 }
1816 return true;
1817 }
1818
1819 /* If any of the ranges passed are defined, return TRUE, otherwise set
1820 VR to UNDEFINED and return FALSE. */
1821
1822 static bool
1823 defined_ranges_p (value_range_base *vr,
1824 const value_range_base *vr0,
1825 const value_range_base *vr1 = NULL)
1826 {
1827 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
1828 {
1829 vr->set_undefined ();
1830 return false;
1831 }
1832 return true;
1833 }
1834
1835 static value_range_base
1836 drop_undefines_to_varying (const value_range_base *vr, tree expr_type)
1837 {
1838 if (vr->undefined_p ())
1839 return value_range_base (expr_type);
1840 else
1841 return *vr;
1842 }
1843
1844 /* If any operand is symbolic, perform a binary operation on them and
1845 return TRUE, otherwise return FALSE. */
1846
1847 static bool
1848 range_fold_binary_symbolics_p (value_range_base *vr,
1849 tree_code code,
1850 tree expr_type,
1851 const value_range_base *vr0,
1852 const value_range_base *vr1)
1853 {
1854 if (vr0->symbolic_p () || vr1->symbolic_p ())
1855 {
1856 if ((code == PLUS_EXPR || code == MINUS_EXPR))
1857 {
1858 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0, vr1);
1859 return true;
1860 }
1861 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1862 {
1863 extract_range_from_pointer_plus_expr (vr, code, expr_type, vr0, vr1);
1864 return true;
1865 }
1866 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1867 *vr = op->fold_range (expr_type,
1868 vr0->normalize_symbolics (),
1869 vr1->normalize_symbolics ());
1870 return true;
1871 }
1872 return false;
1873 }
1874
1875 /* If operand is symbolic, perform a unary operation on it and return
1876 TRUE, otherwise return FALSE. */
1877
1878 static bool
1879 range_fold_unary_symbolics_p (value_range_base *vr,
1880 tree_code code,
1881 tree expr_type,
1882 const value_range_base *vr0)
1883 {
1884 if (vr0->symbolic_p ())
1885 {
1886 if (code == NEGATE_EXPR)
1887 {
1888 /* -X is simply 0 - X. */
1889 value_range_base zero;
1890 zero.set_zero (vr0->type ());
1891 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1892 return true;
1893 }
1894 if (code == BIT_NOT_EXPR)
1895 {
1896 /* ~X is simply -1 - X. */
1897 value_range_base minusone;
1898 minusone.set (build_int_cst (vr0->type (), -1));
1899 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1900 return true;
1901 }
1902 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1903 *vr = op->fold_range (expr_type,
1904 vr0->normalize_symbolics (),
1905 value_range_base (expr_type));
1906 return true;
1907 }
1908 return false;
1909 }
1910
1911 /* Perform a binary operation on a pair of ranges. */
1912
1913 void
1914 range_fold_binary_expr (value_range_base *vr,
1915 enum tree_code code,
1916 tree expr_type,
1917 const value_range_base *vr0_,
1918 const value_range_base *vr1_)
1919 {
1920 if (!supported_types_p (vr, expr_type)
1921 || !defined_ranges_p (vr, vr0_, vr1_))
1922 return;
1923 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1924 if (!op)
1925 return;
1926
1927 value_range_base vr0 = drop_undefines_to_varying (vr0_, expr_type);
1928 value_range_base vr1 = drop_undefines_to_varying (vr1_, expr_type);
1929 if (range_fold_binary_symbolics_p (vr, code, expr_type, &vr0, &vr1))
1930 return;
1931
1932 *vr = op->fold_range (expr_type,
1933 vr0.normalize_addresses (),
1934 vr1.normalize_addresses ());
1935 }
1936
1937 /* Perform a unary operation on a range. */
1938
1939 void
1940 range_fold_unary_expr (value_range_base *vr,
1941 enum tree_code code, tree expr_type,
1942 const value_range_base *vr0,
1943 tree vr0_type)
1944 {
1945 if (!supported_types_p (vr, expr_type, vr0_type)
1946 || !defined_ranges_p (vr, vr0))
1947 return;
1948 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1949 if (!op)
1950 return;
1951
1952 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1953 return;
1954
1955 *vr = op->fold_range (expr_type,
1956 vr0->normalize_addresses (),
1957 value_range_base (expr_type));
1958 }
1959
1960 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1961 create a new SSA name N and return the assertion assignment
1962 'N = ASSERT_EXPR <V, V OP W>'. */
1963
1964 static gimple *
1965 build_assert_expr_for (tree cond, tree v)
1966 {
1967 tree a;
1968 gassign *assertion;
1969
1970 gcc_assert (TREE_CODE (v) == SSA_NAME
1971 && COMPARISON_CLASS_P (cond));
1972
1973 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
1974 assertion = gimple_build_assign (NULL_TREE, a);
1975
1976 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
1977 operand of the ASSERT_EXPR. Create it so the new name and the old one
1978 are registered in the replacement table so that we can fix the SSA web
1979 after adding all the ASSERT_EXPRs. */
1980 tree new_def = create_new_def_for (v, assertion, NULL);
1981 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
1982 given we have to be able to fully propagate those out to re-create
1983 valid SSA when removing the asserts. */
1984 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
1985 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
1986
1987 return assertion;
1988 }
1989
1990
1991 /* Return false if EXPR is a predicate expression involving floating
1992 point values. */
1993
1994 static inline bool
1995 fp_predicate (gimple *stmt)
1996 {
1997 GIMPLE_CHECK (stmt, GIMPLE_COND);
1998
1999 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2000 }
2001
2002 /* If the range of values taken by OP can be inferred after STMT executes,
2003 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2004 describes the inferred range. Return true if a range could be
2005 inferred. */
2006
2007 bool
2008 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2009 {
2010 *val_p = NULL_TREE;
2011 *comp_code_p = ERROR_MARK;
2012
2013 /* Do not attempt to infer anything in names that flow through
2014 abnormal edges. */
2015 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2016 return false;
2017
2018 /* If STMT is the last statement of a basic block with no normal
2019 successors, there is no point inferring anything about any of its
2020 operands. We would not be able to find a proper insertion point
2021 for the assertion, anyway. */
2022 if (stmt_ends_bb_p (stmt))
2023 {
2024 edge_iterator ei;
2025 edge e;
2026
2027 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2028 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2029 break;
2030 if (e == NULL)
2031 return false;
2032 }
2033
2034 if (infer_nonnull_range (stmt, op))
2035 {
2036 *val_p = build_int_cst (TREE_TYPE (op), 0);
2037 *comp_code_p = NE_EXPR;
2038 return true;
2039 }
2040
2041 return false;
2042 }
2043
2044
2045 void dump_asserts_for (FILE *, tree);
2046 void debug_asserts_for (tree);
2047 void dump_all_asserts (FILE *);
2048 void debug_all_asserts (void);
2049
2050 /* Dump all the registered assertions for NAME to FILE. */
2051
2052 void
2053 dump_asserts_for (FILE *file, tree name)
2054 {
2055 assert_locus *loc;
2056
2057 fprintf (file, "Assertions to be inserted for ");
2058 print_generic_expr (file, name);
2059 fprintf (file, "\n");
2060
2061 loc = asserts_for[SSA_NAME_VERSION (name)];
2062 while (loc)
2063 {
2064 fprintf (file, "\t");
2065 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2066 fprintf (file, "\n\tBB #%d", loc->bb->index);
2067 if (loc->e)
2068 {
2069 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2070 loc->e->dest->index);
2071 dump_edge_info (file, loc->e, dump_flags, 0);
2072 }
2073 fprintf (file, "\n\tPREDICATE: ");
2074 print_generic_expr (file, loc->expr);
2075 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2076 print_generic_expr (file, loc->val);
2077 fprintf (file, "\n\n");
2078 loc = loc->next;
2079 }
2080
2081 fprintf (file, "\n");
2082 }
2083
2084
2085 /* Dump all the registered assertions for NAME to stderr. */
2086
2087 DEBUG_FUNCTION void
2088 debug_asserts_for (tree name)
2089 {
2090 dump_asserts_for (stderr, name);
2091 }
2092
2093
2094 /* Dump all the registered assertions for all the names to FILE. */
2095
2096 void
2097 dump_all_asserts (FILE *file)
2098 {
2099 unsigned i;
2100 bitmap_iterator bi;
2101
2102 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2103 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2104 dump_asserts_for (file, ssa_name (i));
2105 fprintf (file, "\n");
2106 }
2107
2108
2109 /* Dump all the registered assertions for all the names to stderr. */
2110
2111 DEBUG_FUNCTION void
2112 debug_all_asserts (void)
2113 {
2114 dump_all_asserts (stderr);
2115 }
2116
2117 /* Dump assert_info structure. */
2118
2119 void
2120 dump_assert_info (FILE *file, const assert_info &assert)
2121 {
2122 fprintf (file, "Assert for: ");
2123 print_generic_expr (file, assert.name);
2124 fprintf (file, "\n\tPREDICATE: expr=[");
2125 print_generic_expr (file, assert.expr);
2126 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
2127 fprintf (file, "val=[");
2128 print_generic_expr (file, assert.val);
2129 fprintf (file, "]\n\n");
2130 }
2131
2132 DEBUG_FUNCTION void
2133 debug (const assert_info &assert)
2134 {
2135 dump_assert_info (stderr, assert);
2136 }
2137
2138 /* Dump a vector of assert_info's. */
2139
2140 void
2141 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
2142 {
2143 for (unsigned i = 0; i < asserts.length (); ++i)
2144 {
2145 dump_assert_info (file, asserts[i]);
2146 fprintf (file, "\n");
2147 }
2148 }
2149
2150 DEBUG_FUNCTION void
2151 debug (const vec<assert_info> &asserts)
2152 {
2153 dump_asserts_info (stderr, asserts);
2154 }
2155
2156 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2157
2158 static void
2159 add_assert_info (vec<assert_info> &asserts,
2160 tree name, tree expr, enum tree_code comp_code, tree val)
2161 {
2162 assert_info info;
2163 info.comp_code = comp_code;
2164 info.name = name;
2165 if (TREE_OVERFLOW_P (val))
2166 val = drop_tree_overflow (val);
2167 info.val = val;
2168 info.expr = expr;
2169 asserts.safe_push (info);
2170 if (dump_enabled_p ())
2171 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
2172 "Adding assert for %T from %T %s %T\n",
2173 name, expr, op_symbol_code (comp_code), val);
2174 }
2175
2176 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2177 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2178 E->DEST, then register this location as a possible insertion point
2179 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2180
2181 BB, E and SI provide the exact insertion point for the new
2182 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2183 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2184 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2185 must not be NULL. */
2186
2187 static void
2188 register_new_assert_for (tree name, tree expr,
2189 enum tree_code comp_code,
2190 tree val,
2191 basic_block bb,
2192 edge e,
2193 gimple_stmt_iterator si)
2194 {
2195 assert_locus *n, *loc, *last_loc;
2196 basic_block dest_bb;
2197
2198 gcc_checking_assert (bb == NULL || e == NULL);
2199
2200 if (e == NULL)
2201 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2202 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2203
2204 /* Never build an assert comparing against an integer constant with
2205 TREE_OVERFLOW set. This confuses our undefined overflow warning
2206 machinery. */
2207 if (TREE_OVERFLOW_P (val))
2208 val = drop_tree_overflow (val);
2209
2210 /* The new assertion A will be inserted at BB or E. We need to
2211 determine if the new location is dominated by a previously
2212 registered location for A. If we are doing an edge insertion,
2213 assume that A will be inserted at E->DEST. Note that this is not
2214 necessarily true.
2215
2216 If E is a critical edge, it will be split. But even if E is
2217 split, the new block will dominate the same set of blocks that
2218 E->DEST dominates.
2219
2220 The reverse, however, is not true, blocks dominated by E->DEST
2221 will not be dominated by the new block created to split E. So,
2222 if the insertion location is on a critical edge, we will not use
2223 the new location to move another assertion previously registered
2224 at a block dominated by E->DEST. */
2225 dest_bb = (bb) ? bb : e->dest;
2226
2227 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2228 VAL at a block dominating DEST_BB, then we don't need to insert a new
2229 one. Similarly, if the same assertion already exists at a block
2230 dominated by DEST_BB and the new location is not on a critical
2231 edge, then update the existing location for the assertion (i.e.,
2232 move the assertion up in the dominance tree).
2233
2234 Note, this is implemented as a simple linked list because there
2235 should not be more than a handful of assertions registered per
2236 name. If this becomes a performance problem, a table hashed by
2237 COMP_CODE and VAL could be implemented. */
2238 loc = asserts_for[SSA_NAME_VERSION (name)];
2239 last_loc = loc;
2240 while (loc)
2241 {
2242 if (loc->comp_code == comp_code
2243 && (loc->val == val
2244 || operand_equal_p (loc->val, val, 0))
2245 && (loc->expr == expr
2246 || operand_equal_p (loc->expr, expr, 0)))
2247 {
2248 /* If E is not a critical edge and DEST_BB
2249 dominates the existing location for the assertion, move
2250 the assertion up in the dominance tree by updating its
2251 location information. */
2252 if ((e == NULL || !EDGE_CRITICAL_P (e))
2253 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2254 {
2255 loc->bb = dest_bb;
2256 loc->e = e;
2257 loc->si = si;
2258 return;
2259 }
2260 }
2261
2262 /* Update the last node of the list and move to the next one. */
2263 last_loc = loc;
2264 loc = loc->next;
2265 }
2266
2267 /* If we didn't find an assertion already registered for
2268 NAME COMP_CODE VAL, add a new one at the end of the list of
2269 assertions associated with NAME. */
2270 n = XNEW (struct assert_locus);
2271 n->bb = dest_bb;
2272 n->e = e;
2273 n->si = si;
2274 n->comp_code = comp_code;
2275 n->val = val;
2276 n->expr = expr;
2277 n->next = NULL;
2278
2279 if (last_loc)
2280 last_loc->next = n;
2281 else
2282 asserts_for[SSA_NAME_VERSION (name)] = n;
2283
2284 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2285 }
2286
2287 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2288 Extract a suitable test code and value and store them into *CODE_P and
2289 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2290
2291 If no extraction was possible, return FALSE, otherwise return TRUE.
2292
2293 If INVERT is true, then we invert the result stored into *CODE_P. */
2294
2295 static bool
2296 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2297 tree cond_op0, tree cond_op1,
2298 bool invert, enum tree_code *code_p,
2299 tree *val_p)
2300 {
2301 enum tree_code comp_code;
2302 tree val;
2303
2304 /* Otherwise, we have a comparison of the form NAME COMP VAL
2305 or VAL COMP NAME. */
2306 if (name == cond_op1)
2307 {
2308 /* If the predicate is of the form VAL COMP NAME, flip
2309 COMP around because we need to register NAME as the
2310 first operand in the predicate. */
2311 comp_code = swap_tree_comparison (cond_code);
2312 val = cond_op0;
2313 }
2314 else if (name == cond_op0)
2315 {
2316 /* The comparison is of the form NAME COMP VAL, so the
2317 comparison code remains unchanged. */
2318 comp_code = cond_code;
2319 val = cond_op1;
2320 }
2321 else
2322 gcc_unreachable ();
2323
2324 /* Invert the comparison code as necessary. */
2325 if (invert)
2326 comp_code = invert_tree_comparison (comp_code, 0);
2327
2328 /* VRP only handles integral and pointer types. */
2329 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2330 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2331 return false;
2332
2333 /* Do not register always-false predicates.
2334 FIXME: this works around a limitation in fold() when dealing with
2335 enumerations. Given 'enum { N1, N2 } x;', fold will not
2336 fold 'if (x > N2)' to 'if (0)'. */
2337 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2338 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2339 {
2340 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2341 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2342
2343 if (comp_code == GT_EXPR
2344 && (!max
2345 || compare_values (val, max) == 0))
2346 return false;
2347
2348 if (comp_code == LT_EXPR
2349 && (!min
2350 || compare_values (val, min) == 0))
2351 return false;
2352 }
2353 *code_p = comp_code;
2354 *val_p = val;
2355 return true;
2356 }
2357
2358 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2359 (otherwise return VAL). VAL and MASK must be zero-extended for
2360 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2361 (to transform signed values into unsigned) and at the end xor
2362 SGNBIT back. */
2363
2364 static wide_int
2365 masked_increment (const wide_int &val_in, const wide_int &mask,
2366 const wide_int &sgnbit, unsigned int prec)
2367 {
2368 wide_int bit = wi::one (prec), res;
2369 unsigned int i;
2370
2371 wide_int val = val_in ^ sgnbit;
2372 for (i = 0; i < prec; i++, bit += bit)
2373 {
2374 res = mask;
2375 if ((res & bit) == 0)
2376 continue;
2377 res = bit - 1;
2378 res = wi::bit_and_not (val + bit, res);
2379 res &= mask;
2380 if (wi::gtu_p (res, val))
2381 return res ^ sgnbit;
2382 }
2383 return val ^ sgnbit;
2384 }
2385
2386 /* Helper for overflow_comparison_p
2387
2388 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2389 OP1's defining statement to see if it ultimately has the form
2390 OP0 CODE (OP0 PLUS INTEGER_CST)
2391
2392 If so, return TRUE indicating this is an overflow test and store into
2393 *NEW_CST an updated constant that can be used in a narrowed range test.
2394
2395 REVERSED indicates if the comparison was originally:
2396
2397 OP1 CODE' OP0.
2398
2399 This affects how we build the updated constant. */
2400
2401 static bool
2402 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2403 bool follow_assert_exprs, bool reversed, tree *new_cst)
2404 {
2405 /* See if this is a relational operation between two SSA_NAMES with
2406 unsigned, overflow wrapping values. If so, check it more deeply. */
2407 if ((code == LT_EXPR || code == LE_EXPR
2408 || code == GE_EXPR || code == GT_EXPR)
2409 && TREE_CODE (op0) == SSA_NAME
2410 && TREE_CODE (op1) == SSA_NAME
2411 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2412 && TYPE_UNSIGNED (TREE_TYPE (op0))
2413 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2414 {
2415 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2416
2417 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2418 if (follow_assert_exprs)
2419 {
2420 while (gimple_assign_single_p (op1_def)
2421 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2422 {
2423 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2424 if (TREE_CODE (op1) != SSA_NAME)
2425 break;
2426 op1_def = SSA_NAME_DEF_STMT (op1);
2427 }
2428 }
2429
2430 /* Now look at the defining statement of OP1 to see if it adds
2431 or subtracts a nonzero constant from another operand. */
2432 if (op1_def
2433 && is_gimple_assign (op1_def)
2434 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2435 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2436 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2437 {
2438 tree target = gimple_assign_rhs1 (op1_def);
2439
2440 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2441 for one where TARGET appears on the RHS. */
2442 if (follow_assert_exprs)
2443 {
2444 /* Now see if that "other operand" is op0, following the chain
2445 of ASSERT_EXPRs if necessary. */
2446 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2447 while (op0 != target
2448 && gimple_assign_single_p (op0_def)
2449 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2450 {
2451 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2452 if (TREE_CODE (op0) != SSA_NAME)
2453 break;
2454 op0_def = SSA_NAME_DEF_STMT (op0);
2455 }
2456 }
2457
2458 /* If we did not find our target SSA_NAME, then this is not
2459 an overflow test. */
2460 if (op0 != target)
2461 return false;
2462
2463 tree type = TREE_TYPE (op0);
2464 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2465 tree inc = gimple_assign_rhs2 (op1_def);
2466 if (reversed)
2467 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2468 else
2469 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2470 return true;
2471 }
2472 }
2473 return false;
2474 }
2475
2476 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2477 OP1's defining statement to see if it ultimately has the form
2478 OP0 CODE (OP0 PLUS INTEGER_CST)
2479
2480 If so, return TRUE indicating this is an overflow test and store into
2481 *NEW_CST an updated constant that can be used in a narrowed range test.
2482
2483 These statements are left as-is in the IL to facilitate discovery of
2484 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2485 the alternate range representation is often useful within VRP. */
2486
2487 bool
2488 overflow_comparison_p (tree_code code, tree name, tree val,
2489 bool use_equiv_p, tree *new_cst)
2490 {
2491 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2492 return true;
2493 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2494 use_equiv_p, true, new_cst);
2495 }
2496
2497
2498 /* Try to register an edge assertion for SSA name NAME on edge E for
2499 the condition COND contributing to the conditional jump pointed to by BSI.
2500 Invert the condition COND if INVERT is true. */
2501
2502 static void
2503 register_edge_assert_for_2 (tree name, edge e,
2504 enum tree_code cond_code,
2505 tree cond_op0, tree cond_op1, bool invert,
2506 vec<assert_info> &asserts)
2507 {
2508 tree val;
2509 enum tree_code comp_code;
2510
2511 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2512 cond_op0,
2513 cond_op1,
2514 invert, &comp_code, &val))
2515 return;
2516
2517 /* Queue the assert. */
2518 tree x;
2519 if (overflow_comparison_p (comp_code, name, val, false, &x))
2520 {
2521 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2522 ? GT_EXPR : LE_EXPR);
2523 add_assert_info (asserts, name, name, new_code, x);
2524 }
2525 add_assert_info (asserts, name, name, comp_code, val);
2526
2527 /* In the case of NAME <= CST and NAME being defined as
2528 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2529 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2530 This catches range and anti-range tests. */
2531 if ((comp_code == LE_EXPR
2532 || comp_code == GT_EXPR)
2533 && TREE_CODE (val) == INTEGER_CST
2534 && TYPE_UNSIGNED (TREE_TYPE (val)))
2535 {
2536 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2537 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2538
2539 /* Extract CST2 from the (optional) addition. */
2540 if (is_gimple_assign (def_stmt)
2541 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2542 {
2543 name2 = gimple_assign_rhs1 (def_stmt);
2544 cst2 = gimple_assign_rhs2 (def_stmt);
2545 if (TREE_CODE (name2) == SSA_NAME
2546 && TREE_CODE (cst2) == INTEGER_CST)
2547 def_stmt = SSA_NAME_DEF_STMT (name2);
2548 }
2549
2550 /* Extract NAME2 from the (optional) sign-changing cast. */
2551 if (gimple_assign_cast_p (def_stmt))
2552 {
2553 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2554 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2555 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2556 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2557 name3 = gimple_assign_rhs1 (def_stmt);
2558 }
2559
2560 /* If name3 is used later, create an ASSERT_EXPR for it. */
2561 if (name3 != NULL_TREE
2562 && TREE_CODE (name3) == SSA_NAME
2563 && (cst2 == NULL_TREE
2564 || TREE_CODE (cst2) == INTEGER_CST)
2565 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2566 {
2567 tree tmp;
2568
2569 /* Build an expression for the range test. */
2570 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2571 if (cst2 != NULL_TREE)
2572 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2573 add_assert_info (asserts, name3, tmp, comp_code, val);
2574 }
2575
2576 /* If name2 is used later, create an ASSERT_EXPR for it. */
2577 if (name2 != NULL_TREE
2578 && TREE_CODE (name2) == SSA_NAME
2579 && TREE_CODE (cst2) == INTEGER_CST
2580 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2581 {
2582 tree tmp;
2583
2584 /* Build an expression for the range test. */
2585 tmp = name2;
2586 if (TREE_TYPE (name) != TREE_TYPE (name2))
2587 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2588 if (cst2 != NULL_TREE)
2589 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2590 add_assert_info (asserts, name2, tmp, comp_code, val);
2591 }
2592 }
2593
2594 /* In the case of post-in/decrement tests like if (i++) ... and uses
2595 of the in/decremented value on the edge the extra name we want to
2596 assert for is not on the def chain of the name compared. Instead
2597 it is in the set of use stmts.
2598 Similar cases happen for conversions that were simplified through
2599 fold_{sign_changed,widened}_comparison. */
2600 if ((comp_code == NE_EXPR
2601 || comp_code == EQ_EXPR)
2602 && TREE_CODE (val) == INTEGER_CST)
2603 {
2604 imm_use_iterator ui;
2605 gimple *use_stmt;
2606 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2607 {
2608 if (!is_gimple_assign (use_stmt))
2609 continue;
2610
2611 /* Cut off to use-stmts that are dominating the predecessor. */
2612 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2613 continue;
2614
2615 tree name2 = gimple_assign_lhs (use_stmt);
2616 if (TREE_CODE (name2) != SSA_NAME)
2617 continue;
2618
2619 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2620 tree cst;
2621 if (code == PLUS_EXPR
2622 || code == MINUS_EXPR)
2623 {
2624 cst = gimple_assign_rhs2 (use_stmt);
2625 if (TREE_CODE (cst) != INTEGER_CST)
2626 continue;
2627 cst = int_const_binop (code, val, cst);
2628 }
2629 else if (CONVERT_EXPR_CODE_P (code))
2630 {
2631 /* For truncating conversions we cannot record
2632 an inequality. */
2633 if (comp_code == NE_EXPR
2634 && (TYPE_PRECISION (TREE_TYPE (name2))
2635 < TYPE_PRECISION (TREE_TYPE (name))))
2636 continue;
2637 cst = fold_convert (TREE_TYPE (name2), val);
2638 }
2639 else
2640 continue;
2641
2642 if (TREE_OVERFLOW_P (cst))
2643 cst = drop_tree_overflow (cst);
2644 add_assert_info (asserts, name2, name2, comp_code, cst);
2645 }
2646 }
2647
2648 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2649 && TREE_CODE (val) == INTEGER_CST)
2650 {
2651 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2652 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2653 tree val2 = NULL_TREE;
2654 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2655 wide_int mask = wi::zero (prec);
2656 unsigned int nprec = prec;
2657 enum tree_code rhs_code = ERROR_MARK;
2658
2659 if (is_gimple_assign (def_stmt))
2660 rhs_code = gimple_assign_rhs_code (def_stmt);
2661
2662 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2663 assert that A != CST1 -+ CST2. */
2664 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2665 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2666 {
2667 tree op0 = gimple_assign_rhs1 (def_stmt);
2668 tree op1 = gimple_assign_rhs2 (def_stmt);
2669 if (TREE_CODE (op0) == SSA_NAME
2670 && TREE_CODE (op1) == INTEGER_CST)
2671 {
2672 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2673 ? MINUS_EXPR : PLUS_EXPR);
2674 op1 = int_const_binop (reverse_op, val, op1);
2675 if (TREE_OVERFLOW (op1))
2676 op1 = drop_tree_overflow (op1);
2677 add_assert_info (asserts, op0, op0, comp_code, op1);
2678 }
2679 }
2680
2681 /* Add asserts for NAME cmp CST and NAME being defined
2682 as NAME = (int) NAME2. */
2683 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2684 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2685 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2686 && gimple_assign_cast_p (def_stmt))
2687 {
2688 name2 = gimple_assign_rhs1 (def_stmt);
2689 if (CONVERT_EXPR_CODE_P (rhs_code)
2690 && TREE_CODE (name2) == SSA_NAME
2691 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2692 && TYPE_UNSIGNED (TREE_TYPE (name2))
2693 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2694 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2695 || !tree_int_cst_equal (val,
2696 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2697 {
2698 tree tmp, cst;
2699 enum tree_code new_comp_code = comp_code;
2700
2701 cst = fold_convert (TREE_TYPE (name2),
2702 TYPE_MIN_VALUE (TREE_TYPE (val)));
2703 /* Build an expression for the range test. */
2704 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2705 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2706 fold_convert (TREE_TYPE (name2), val));
2707 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2708 {
2709 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2710 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2711 build_int_cst (TREE_TYPE (name2), 1));
2712 }
2713 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2714 }
2715 }
2716
2717 /* Add asserts for NAME cmp CST and NAME being defined as
2718 NAME = NAME2 >> CST2.
2719
2720 Extract CST2 from the right shift. */
2721 if (rhs_code == RSHIFT_EXPR)
2722 {
2723 name2 = gimple_assign_rhs1 (def_stmt);
2724 cst2 = gimple_assign_rhs2 (def_stmt);
2725 if (TREE_CODE (name2) == SSA_NAME
2726 && tree_fits_uhwi_p (cst2)
2727 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2728 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2729 && type_has_mode_precision_p (TREE_TYPE (val)))
2730 {
2731 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2732 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2733 }
2734 }
2735 if (val2 != NULL_TREE
2736 && TREE_CODE (val2) == INTEGER_CST
2737 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2738 TREE_TYPE (val),
2739 val2, cst2), val))
2740 {
2741 enum tree_code new_comp_code = comp_code;
2742 tree tmp, new_val;
2743
2744 tmp = name2;
2745 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2746 {
2747 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2748 {
2749 tree type = build_nonstandard_integer_type (prec, 1);
2750 tmp = build1 (NOP_EXPR, type, name2);
2751 val2 = fold_convert (type, val2);
2752 }
2753 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2754 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2755 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2756 }
2757 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2758 {
2759 wide_int minval
2760 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2761 new_val = val2;
2762 if (minval == wi::to_wide (new_val))
2763 new_val = NULL_TREE;
2764 }
2765 else
2766 {
2767 wide_int maxval
2768 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2769 mask |= wi::to_wide (val2);
2770 if (wi::eq_p (mask, maxval))
2771 new_val = NULL_TREE;
2772 else
2773 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2774 }
2775
2776 if (new_val)
2777 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2778 }
2779
2780 /* If we have a conversion that doesn't change the value of the source
2781 simply register the same assert for it. */
2782 if (CONVERT_EXPR_CODE_P (rhs_code))
2783 {
2784 wide_int rmin, rmax;
2785 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2786 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2787 && TREE_CODE (rhs1) == SSA_NAME
2788 /* Make sure the relation preserves the upper/lower boundary of
2789 the range conservatively. */
2790 && (comp_code == NE_EXPR
2791 || comp_code == EQ_EXPR
2792 || (TYPE_SIGN (TREE_TYPE (name))
2793 == TYPE_SIGN (TREE_TYPE (rhs1)))
2794 || ((comp_code == LE_EXPR
2795 || comp_code == LT_EXPR)
2796 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2797 || ((comp_code == GE_EXPR
2798 || comp_code == GT_EXPR)
2799 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
2800 /* And the conversion does not alter the value we compare
2801 against and all values in rhs1 can be represented in
2802 the converted to type. */
2803 && int_fits_type_p (val, TREE_TYPE (rhs1))
2804 && ((TYPE_PRECISION (TREE_TYPE (name))
2805 > TYPE_PRECISION (TREE_TYPE (rhs1)))
2806 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
2807 && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
2808 && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
2809 add_assert_info (asserts, rhs1, rhs1,
2810 comp_code, fold_convert (TREE_TYPE (rhs1), val));
2811 }
2812
2813 /* Add asserts for NAME cmp CST and NAME being defined as
2814 NAME = NAME2 & CST2.
2815
2816 Extract CST2 from the and.
2817
2818 Also handle
2819 NAME = (unsigned) NAME2;
2820 casts where NAME's type is unsigned and has smaller precision
2821 than NAME2's type as if it was NAME = NAME2 & MASK. */
2822 names[0] = NULL_TREE;
2823 names[1] = NULL_TREE;
2824 cst2 = NULL_TREE;
2825 if (rhs_code == BIT_AND_EXPR
2826 || (CONVERT_EXPR_CODE_P (rhs_code)
2827 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2828 && TYPE_UNSIGNED (TREE_TYPE (val))
2829 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2830 > prec))
2831 {
2832 name2 = gimple_assign_rhs1 (def_stmt);
2833 if (rhs_code == BIT_AND_EXPR)
2834 cst2 = gimple_assign_rhs2 (def_stmt);
2835 else
2836 {
2837 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2838 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2839 }
2840 if (TREE_CODE (name2) == SSA_NAME
2841 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2842 && TREE_CODE (cst2) == INTEGER_CST
2843 && !integer_zerop (cst2)
2844 && (nprec > 1
2845 || TYPE_UNSIGNED (TREE_TYPE (val))))
2846 {
2847 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2848 if (gimple_assign_cast_p (def_stmt2))
2849 {
2850 names[1] = gimple_assign_rhs1 (def_stmt2);
2851 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2852 || TREE_CODE (names[1]) != SSA_NAME
2853 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2854 || (TYPE_PRECISION (TREE_TYPE (name2))
2855 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2856 names[1] = NULL_TREE;
2857 }
2858 names[0] = name2;
2859 }
2860 }
2861 if (names[0] || names[1])
2862 {
2863 wide_int minv, maxv, valv, cst2v;
2864 wide_int tem, sgnbit;
2865 bool valid_p = false, valn, cst2n;
2866 enum tree_code ccode = comp_code;
2867
2868 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2869 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2870 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2871 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2872 /* If CST2 doesn't have most significant bit set,
2873 but VAL is negative, we have comparison like
2874 if ((x & 0x123) > -4) (always true). Just give up. */
2875 if (!cst2n && valn)
2876 ccode = ERROR_MARK;
2877 if (cst2n)
2878 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2879 else
2880 sgnbit = wi::zero (nprec);
2881 minv = valv & cst2v;
2882 switch (ccode)
2883 {
2884 case EQ_EXPR:
2885 /* Minimum unsigned value for equality is VAL & CST2
2886 (should be equal to VAL, otherwise we probably should
2887 have folded the comparison into false) and
2888 maximum unsigned value is VAL | ~CST2. */
2889 maxv = valv | ~cst2v;
2890 valid_p = true;
2891 break;
2892
2893 case NE_EXPR:
2894 tem = valv | ~cst2v;
2895 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2896 if (valv == 0)
2897 {
2898 cst2n = false;
2899 sgnbit = wi::zero (nprec);
2900 goto gt_expr;
2901 }
2902 /* If (VAL | ~CST2) is all ones, handle it as
2903 (X & CST2) < VAL. */
2904 if (tem == -1)
2905 {
2906 cst2n = false;
2907 valn = false;
2908 sgnbit = wi::zero (nprec);
2909 goto lt_expr;
2910 }
2911 if (!cst2n && wi::neg_p (cst2v))
2912 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2913 if (sgnbit != 0)
2914 {
2915 if (valv == sgnbit)
2916 {
2917 cst2n = true;
2918 valn = true;
2919 goto gt_expr;
2920 }
2921 if (tem == wi::mask (nprec - 1, false, nprec))
2922 {
2923 cst2n = true;
2924 goto lt_expr;
2925 }
2926 if (!cst2n)
2927 sgnbit = wi::zero (nprec);
2928 }
2929 break;
2930
2931 case GE_EXPR:
2932 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2933 is VAL and maximum unsigned value is ~0. For signed
2934 comparison, if CST2 doesn't have most significant bit
2935 set, handle it similarly. If CST2 has MSB set,
2936 the minimum is the same, and maximum is ~0U/2. */
2937 if (minv != valv)
2938 {
2939 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2940 VAL. */
2941 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2942 if (minv == valv)
2943 break;
2944 }
2945 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2946 valid_p = true;
2947 break;
2948
2949 case GT_EXPR:
2950 gt_expr:
2951 /* Find out smallest MINV where MINV > VAL
2952 && (MINV & CST2) == MINV, if any. If VAL is signed and
2953 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2954 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2955 if (minv == valv)
2956 break;
2957 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2958 valid_p = true;
2959 break;
2960
2961 case LE_EXPR:
2962 /* Minimum unsigned value for <= is 0 and maximum
2963 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2964 Otherwise, find smallest VAL2 where VAL2 > VAL
2965 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2966 as maximum.
2967 For signed comparison, if CST2 doesn't have most
2968 significant bit set, handle it similarly. If CST2 has
2969 MSB set, the maximum is the same and minimum is INT_MIN. */
2970 if (minv == valv)
2971 maxv = valv;
2972 else
2973 {
2974 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2975 if (maxv == valv)
2976 break;
2977 maxv -= 1;
2978 }
2979 maxv |= ~cst2v;
2980 minv = sgnbit;
2981 valid_p = true;
2982 break;
2983
2984 case LT_EXPR:
2985 lt_expr:
2986 /* Minimum unsigned value for < is 0 and maximum
2987 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2988 Otherwise, find smallest VAL2 where VAL2 > VAL
2989 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2990 as maximum.
2991 For signed comparison, if CST2 doesn't have most
2992 significant bit set, handle it similarly. If CST2 has
2993 MSB set, the maximum is the same and minimum is INT_MIN. */
2994 if (minv == valv)
2995 {
2996 if (valv == sgnbit)
2997 break;
2998 maxv = valv;
2999 }
3000 else
3001 {
3002 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3003 if (maxv == valv)
3004 break;
3005 }
3006 maxv -= 1;
3007 maxv |= ~cst2v;
3008 minv = sgnbit;
3009 valid_p = true;
3010 break;
3011
3012 default:
3013 break;
3014 }
3015 if (valid_p
3016 && (maxv - minv) != -1)
3017 {
3018 tree tmp, new_val, type;
3019 int i;
3020
3021 for (i = 0; i < 2; i++)
3022 if (names[i])
3023 {
3024 wide_int maxv2 = maxv;
3025 tmp = names[i];
3026 type = TREE_TYPE (names[i]);
3027 if (!TYPE_UNSIGNED (type))
3028 {
3029 type = build_nonstandard_integer_type (nprec, 1);
3030 tmp = build1 (NOP_EXPR, type, names[i]);
3031 }
3032 if (minv != 0)
3033 {
3034 tmp = build2 (PLUS_EXPR, type, tmp,
3035 wide_int_to_tree (type, -minv));
3036 maxv2 = maxv - minv;
3037 }
3038 new_val = wide_int_to_tree (type, maxv2);
3039 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3040 }
3041 }
3042 }
3043 }
3044 }
3045
3046 /* OP is an operand of a truth value expression which is known to have
3047 a particular value. Register any asserts for OP and for any
3048 operands in OP's defining statement.
3049
3050 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3051 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3052
3053 static void
3054 register_edge_assert_for_1 (tree op, enum tree_code code,
3055 edge e, vec<assert_info> &asserts)
3056 {
3057 gimple *op_def;
3058 tree val;
3059 enum tree_code rhs_code;
3060
3061 /* We only care about SSA_NAMEs. */
3062 if (TREE_CODE (op) != SSA_NAME)
3063 return;
3064
3065 /* We know that OP will have a zero or nonzero value. */
3066 val = build_int_cst (TREE_TYPE (op), 0);
3067 add_assert_info (asserts, op, op, code, val);
3068
3069 /* Now look at how OP is set. If it's set from a comparison,
3070 a truth operation or some bit operations, then we may be able
3071 to register information about the operands of that assignment. */
3072 op_def = SSA_NAME_DEF_STMT (op);
3073 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3074 return;
3075
3076 rhs_code = gimple_assign_rhs_code (op_def);
3077
3078 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3079 {
3080 bool invert = (code == EQ_EXPR ? true : false);
3081 tree op0 = gimple_assign_rhs1 (op_def);
3082 tree op1 = gimple_assign_rhs2 (op_def);
3083
3084 if (TREE_CODE (op0) == SSA_NAME)
3085 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3086 if (TREE_CODE (op1) == SSA_NAME)
3087 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3088 }
3089 else if ((code == NE_EXPR
3090 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3091 || (code == EQ_EXPR
3092 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3093 {
3094 /* Recurse on each operand. */
3095 tree op0 = gimple_assign_rhs1 (op_def);
3096 tree op1 = gimple_assign_rhs2 (op_def);
3097 if (TREE_CODE (op0) == SSA_NAME
3098 && has_single_use (op0))
3099 register_edge_assert_for_1 (op0, code, e, asserts);
3100 if (TREE_CODE (op1) == SSA_NAME
3101 && has_single_use (op1))
3102 register_edge_assert_for_1 (op1, code, e, asserts);
3103 }
3104 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3105 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3106 {
3107 /* Recurse, flipping CODE. */
3108 code = invert_tree_comparison (code, false);
3109 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3110 }
3111 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3112 {
3113 /* Recurse through the copy. */
3114 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3115 }
3116 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3117 {
3118 /* Recurse through the type conversion, unless it is a narrowing
3119 conversion or conversion from non-integral type. */
3120 tree rhs = gimple_assign_rhs1 (op_def);
3121 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3122 && (TYPE_PRECISION (TREE_TYPE (rhs))
3123 <= TYPE_PRECISION (TREE_TYPE (op))))
3124 register_edge_assert_for_1 (rhs, code, e, asserts);
3125 }
3126 }
3127
3128 /* Check if comparison
3129 NAME COND_OP INTEGER_CST
3130 has a form of
3131 (X & 11...100..0) COND_OP XX...X00...0
3132 Such comparison can yield assertions like
3133 X >= XX...X00...0
3134 X <= XX...X11...1
3135 in case of COND_OP being EQ_EXPR or
3136 X < XX...X00...0
3137 X > XX...X11...1
3138 in case of NE_EXPR. */
3139
3140 static bool
3141 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3142 tree *new_name, tree *low, enum tree_code *low_code,
3143 tree *high, enum tree_code *high_code)
3144 {
3145 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3146
3147 if (!is_gimple_assign (def_stmt)
3148 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3149 return false;
3150
3151 tree t = gimple_assign_rhs1 (def_stmt);
3152 tree maskt = gimple_assign_rhs2 (def_stmt);
3153 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3154 return false;
3155
3156 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3157 wide_int inv_mask = ~mask;
3158 /* Must have been removed by now so don't bother optimizing. */
3159 if (mask == 0 || inv_mask == 0)
3160 return false;
3161
3162 /* Assume VALT is INTEGER_CST. */
3163 wi::tree_to_wide_ref val = wi::to_wide (valt);
3164
3165 if ((inv_mask & (inv_mask + 1)) != 0
3166 || (val & mask) != val)
3167 return false;
3168
3169 bool is_range = cond_code == EQ_EXPR;
3170
3171 tree type = TREE_TYPE (t);
3172 wide_int min = wi::min_value (type),
3173 max = wi::max_value (type);
3174
3175 if (is_range)
3176 {
3177 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3178 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3179 }
3180 else
3181 {
3182 /* We can still generate assertion if one of alternatives
3183 is known to always be false. */
3184 if (val == min)
3185 {
3186 *low_code = (enum tree_code) 0;
3187 *high_code = GT_EXPR;
3188 }
3189 else if ((val | inv_mask) == max)
3190 {
3191 *low_code = LT_EXPR;
3192 *high_code = (enum tree_code) 0;
3193 }
3194 else
3195 return false;
3196 }
3197
3198 *new_name = t;
3199 *low = wide_int_to_tree (type, val);
3200 *high = wide_int_to_tree (type, val | inv_mask);
3201
3202 return true;
3203 }
3204
3205 /* Try to register an edge assertion for SSA name NAME on edge E for
3206 the condition COND contributing to the conditional jump pointed to by
3207 SI. */
3208
3209 void
3210 register_edge_assert_for (tree name, edge e,
3211 enum tree_code cond_code, tree cond_op0,
3212 tree cond_op1, vec<assert_info> &asserts)
3213 {
3214 tree val;
3215 enum tree_code comp_code;
3216 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3217
3218 /* Do not attempt to infer anything in names that flow through
3219 abnormal edges. */
3220 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3221 return;
3222
3223 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3224 cond_op0, cond_op1,
3225 is_else_edge,
3226 &comp_code, &val))
3227 return;
3228
3229 /* Register ASSERT_EXPRs for name. */
3230 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3231 cond_op1, is_else_edge, asserts);
3232
3233
3234 /* If COND is effectively an equality test of an SSA_NAME against
3235 the value zero or one, then we may be able to assert values
3236 for SSA_NAMEs which flow into COND. */
3237
3238 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3239 statement of NAME we can assert both operands of the BIT_AND_EXPR
3240 have nonzero value. */
3241 if (((comp_code == EQ_EXPR && integer_onep (val))
3242 || (comp_code == NE_EXPR && integer_zerop (val))))
3243 {
3244 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3245
3246 if (is_gimple_assign (def_stmt)
3247 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3248 {
3249 tree op0 = gimple_assign_rhs1 (def_stmt);
3250 tree op1 = gimple_assign_rhs2 (def_stmt);
3251 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3252 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3253 }
3254 }
3255
3256 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3257 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3258 have zero value. */
3259 if (((comp_code == EQ_EXPR && integer_zerop (val))
3260 || (comp_code == NE_EXPR && integer_onep (val))))
3261 {
3262 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3263
3264 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3265 necessarily zero value, or if type-precision is one. */
3266 if (is_gimple_assign (def_stmt)
3267 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3268 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3269 || comp_code == EQ_EXPR)))
3270 {
3271 tree op0 = gimple_assign_rhs1 (def_stmt);
3272 tree op1 = gimple_assign_rhs2 (def_stmt);
3273 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3274 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3275 }
3276 }
3277
3278 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3279 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3280 && TREE_CODE (val) == INTEGER_CST)
3281 {
3282 enum tree_code low_code, high_code;
3283 tree low, high;
3284 if (is_masked_range_test (name, val, comp_code, &name, &low,
3285 &low_code, &high, &high_code))
3286 {
3287 if (low_code != ERROR_MARK)
3288 register_edge_assert_for_2 (name, e, low_code, name,
3289 low, /*invert*/false, asserts);
3290 if (high_code != ERROR_MARK)
3291 register_edge_assert_for_2 (name, e, high_code, name,
3292 high, /*invert*/false, asserts);
3293 }
3294 }
3295 }
3296
3297 /* Finish found ASSERTS for E and register them at GSI. */
3298
3299 static void
3300 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3301 vec<assert_info> &asserts)
3302 {
3303 for (unsigned i = 0; i < asserts.length (); ++i)
3304 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3305 reachable from E. */
3306 if (live_on_edge (e, asserts[i].name))
3307 register_new_assert_for (asserts[i].name, asserts[i].expr,
3308 asserts[i].comp_code, asserts[i].val,
3309 NULL, e, gsi);
3310 }
3311
3312
3313
3314 /* Determine whether the outgoing edges of BB should receive an
3315 ASSERT_EXPR for each of the operands of BB's LAST statement.
3316 The last statement of BB must be a COND_EXPR.
3317
3318 If any of the sub-graphs rooted at BB have an interesting use of
3319 the predicate operands, an assert location node is added to the
3320 list of assertions for the corresponding operands. */
3321
3322 static void
3323 find_conditional_asserts (basic_block bb, gcond *last)
3324 {
3325 gimple_stmt_iterator bsi;
3326 tree op;
3327 edge_iterator ei;
3328 edge e;
3329 ssa_op_iter iter;
3330
3331 bsi = gsi_for_stmt (last);
3332
3333 /* Look for uses of the operands in each of the sub-graphs
3334 rooted at BB. We need to check each of the outgoing edges
3335 separately, so that we know what kind of ASSERT_EXPR to
3336 insert. */
3337 FOR_EACH_EDGE (e, ei, bb->succs)
3338 {
3339 if (e->dest == bb)
3340 continue;
3341
3342 /* Register the necessary assertions for each operand in the
3343 conditional predicate. */
3344 auto_vec<assert_info, 8> asserts;
3345 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3346 register_edge_assert_for (op, e,
3347 gimple_cond_code (last),
3348 gimple_cond_lhs (last),
3349 gimple_cond_rhs (last), asserts);
3350 finish_register_edge_assert_for (e, bsi, asserts);
3351 }
3352 }
3353
3354 struct case_info
3355 {
3356 tree expr;
3357 basic_block bb;
3358 };
3359
3360 /* Compare two case labels sorting first by the destination bb index
3361 and then by the case value. */
3362
3363 static int
3364 compare_case_labels (const void *p1, const void *p2)
3365 {
3366 const struct case_info *ci1 = (const struct case_info *) p1;
3367 const struct case_info *ci2 = (const struct case_info *) p2;
3368 int idx1 = ci1->bb->index;
3369 int idx2 = ci2->bb->index;
3370
3371 if (idx1 < idx2)
3372 return -1;
3373 else if (idx1 == idx2)
3374 {
3375 /* Make sure the default label is first in a group. */
3376 if (!CASE_LOW (ci1->expr))
3377 return -1;
3378 else if (!CASE_LOW (ci2->expr))
3379 return 1;
3380 else
3381 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3382 CASE_LOW (ci2->expr));
3383 }
3384 else
3385 return 1;
3386 }
3387
3388 /* Determine whether the outgoing edges of BB should receive an
3389 ASSERT_EXPR for each of the operands of BB's LAST statement.
3390 The last statement of BB must be a SWITCH_EXPR.
3391
3392 If any of the sub-graphs rooted at BB have an interesting use of
3393 the predicate operands, an assert location node is added to the
3394 list of assertions for the corresponding operands. */
3395
3396 static void
3397 find_switch_asserts (basic_block bb, gswitch *last)
3398 {
3399 gimple_stmt_iterator bsi;
3400 tree op;
3401 edge e;
3402 struct case_info *ci;
3403 size_t n = gimple_switch_num_labels (last);
3404 #if GCC_VERSION >= 4000
3405 unsigned int idx;
3406 #else
3407 /* Work around GCC 3.4 bug (PR 37086). */
3408 volatile unsigned int idx;
3409 #endif
3410
3411 bsi = gsi_for_stmt (last);
3412 op = gimple_switch_index (last);
3413 if (TREE_CODE (op) != SSA_NAME)
3414 return;
3415
3416 /* Build a vector of case labels sorted by destination label. */
3417 ci = XNEWVEC (struct case_info, n);
3418 for (idx = 0; idx < n; ++idx)
3419 {
3420 ci[idx].expr = gimple_switch_label (last, idx);
3421 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3422 }
3423 edge default_edge = find_edge (bb, ci[0].bb);
3424 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3425
3426 for (idx = 0; idx < n; ++idx)
3427 {
3428 tree min, max;
3429 tree cl = ci[idx].expr;
3430 basic_block cbb = ci[idx].bb;
3431
3432 min = CASE_LOW (cl);
3433 max = CASE_HIGH (cl);
3434
3435 /* If there are multiple case labels with the same destination
3436 we need to combine them to a single value range for the edge. */
3437 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3438 {
3439 /* Skip labels until the last of the group. */
3440 do {
3441 ++idx;
3442 } while (idx < n && cbb == ci[idx].bb);
3443 --idx;
3444
3445 /* Pick up the maximum of the case label range. */
3446 if (CASE_HIGH (ci[idx].expr))
3447 max = CASE_HIGH (ci[idx].expr);
3448 else
3449 max = CASE_LOW (ci[idx].expr);
3450 }
3451
3452 /* Can't extract a useful assertion out of a range that includes the
3453 default label. */
3454 if (min == NULL_TREE)
3455 continue;
3456
3457 /* Find the edge to register the assert expr on. */
3458 e = find_edge (bb, cbb);
3459
3460 /* Register the necessary assertions for the operand in the
3461 SWITCH_EXPR. */
3462 auto_vec<assert_info, 8> asserts;
3463 register_edge_assert_for (op, e,
3464 max ? GE_EXPR : EQ_EXPR,
3465 op, fold_convert (TREE_TYPE (op), min),
3466 asserts);
3467 if (max)
3468 register_edge_assert_for (op, e, LE_EXPR, op,
3469 fold_convert (TREE_TYPE (op), max),
3470 asserts);
3471 finish_register_edge_assert_for (e, bsi, asserts);
3472 }
3473
3474 XDELETEVEC (ci);
3475
3476 if (!live_on_edge (default_edge, op))
3477 return;
3478
3479 /* Now register along the default label assertions that correspond to the
3480 anti-range of each label. */
3481 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3482 if (insertion_limit == 0)
3483 return;
3484
3485 /* We can't do this if the default case shares a label with another case. */
3486 tree default_cl = gimple_switch_default_label (last);
3487 for (idx = 1; idx < n; idx++)
3488 {
3489 tree min, max;
3490 tree cl = gimple_switch_label (last, idx);
3491 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3492 continue;
3493
3494 min = CASE_LOW (cl);
3495 max = CASE_HIGH (cl);
3496
3497 /* Combine contiguous case ranges to reduce the number of assertions
3498 to insert. */
3499 for (idx = idx + 1; idx < n; idx++)
3500 {
3501 tree next_min, next_max;
3502 tree next_cl = gimple_switch_label (last, idx);
3503 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3504 break;
3505
3506 next_min = CASE_LOW (next_cl);
3507 next_max = CASE_HIGH (next_cl);
3508
3509 wide_int difference = (wi::to_wide (next_min)
3510 - wi::to_wide (max ? max : min));
3511 if (wi::eq_p (difference, 1))
3512 max = next_max ? next_max : next_min;
3513 else
3514 break;
3515 }
3516 idx--;
3517
3518 if (max == NULL_TREE)
3519 {
3520 /* Register the assertion OP != MIN. */
3521 auto_vec<assert_info, 8> asserts;
3522 min = fold_convert (TREE_TYPE (op), min);
3523 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3524 asserts);
3525 finish_register_edge_assert_for (default_edge, bsi, asserts);
3526 }
3527 else
3528 {
3529 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3530 which will give OP the anti-range ~[MIN,MAX]. */
3531 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3532 min = fold_convert (TREE_TYPE (uop), min);
3533 max = fold_convert (TREE_TYPE (uop), max);
3534
3535 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3536 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3537 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3538 NULL, default_edge, bsi);
3539 }
3540
3541 if (--insertion_limit == 0)
3542 break;
3543 }
3544 }
3545
3546
3547 /* Traverse all the statements in block BB looking for statements that
3548 may generate useful assertions for the SSA names in their operand.
3549 If a statement produces a useful assertion A for name N_i, then the
3550 list of assertions already generated for N_i is scanned to
3551 determine if A is actually needed.
3552
3553 If N_i already had the assertion A at a location dominating the
3554 current location, then nothing needs to be done. Otherwise, the
3555 new location for A is recorded instead.
3556
3557 1- For every statement S in BB, all the variables used by S are
3558 added to bitmap FOUND_IN_SUBGRAPH.
3559
3560 2- If statement S uses an operand N in a way that exposes a known
3561 value range for N, then if N was not already generated by an
3562 ASSERT_EXPR, create a new assert location for N. For instance,
3563 if N is a pointer and the statement dereferences it, we can
3564 assume that N is not NULL.
3565
3566 3- COND_EXPRs are a special case of #2. We can derive range
3567 information from the predicate but need to insert different
3568 ASSERT_EXPRs for each of the sub-graphs rooted at the
3569 conditional block. If the last statement of BB is a conditional
3570 expression of the form 'X op Y', then
3571
3572 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3573
3574 b) If the conditional is the only entry point to the sub-graph
3575 corresponding to the THEN_CLAUSE, recurse into it. On
3576 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3577 an ASSERT_EXPR is added for the corresponding variable.
3578
3579 c) Repeat step (b) on the ELSE_CLAUSE.
3580
3581 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3582
3583 For instance,
3584
3585 if (a == 9)
3586 b = a;
3587 else
3588 b = c + 1;
3589
3590 In this case, an assertion on the THEN clause is useful to
3591 determine that 'a' is always 9 on that edge. However, an assertion
3592 on the ELSE clause would be unnecessary.
3593
3594 4- If BB does not end in a conditional expression, then we recurse
3595 into BB's dominator children.
3596
3597 At the end of the recursive traversal, every SSA name will have a
3598 list of locations where ASSERT_EXPRs should be added. When a new
3599 location for name N is found, it is registered by calling
3600 register_new_assert_for. That function keeps track of all the
3601 registered assertions to prevent adding unnecessary assertions.
3602 For instance, if a pointer P_4 is dereferenced more than once in a
3603 dominator tree, only the location dominating all the dereference of
3604 P_4 will receive an ASSERT_EXPR. */
3605
3606 static void
3607 find_assert_locations_1 (basic_block bb, sbitmap live)
3608 {
3609 gimple *last;
3610
3611 last = last_stmt (bb);
3612
3613 /* If BB's last statement is a conditional statement involving integer
3614 operands, determine if we need to add ASSERT_EXPRs. */
3615 if (last
3616 && gimple_code (last) == GIMPLE_COND
3617 && !fp_predicate (last)
3618 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3619 find_conditional_asserts (bb, as_a <gcond *> (last));
3620
3621 /* If BB's last statement is a switch statement involving integer
3622 operands, determine if we need to add ASSERT_EXPRs. */
3623 if (last
3624 && gimple_code (last) == GIMPLE_SWITCH
3625 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3626 find_switch_asserts (bb, as_a <gswitch *> (last));
3627
3628 /* Traverse all the statements in BB marking used names and looking
3629 for statements that may infer assertions for their used operands. */
3630 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3631 gsi_prev (&si))
3632 {
3633 gimple *stmt;
3634 tree op;
3635 ssa_op_iter i;
3636
3637 stmt = gsi_stmt (si);
3638
3639 if (is_gimple_debug (stmt))
3640 continue;
3641
3642 /* See if we can derive an assertion for any of STMT's operands. */
3643 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3644 {
3645 tree value;
3646 enum tree_code comp_code;
3647
3648 /* If op is not live beyond this stmt, do not bother to insert
3649 asserts for it. */
3650 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3651 continue;
3652
3653 /* If OP is used in such a way that we can infer a value
3654 range for it, and we don't find a previous assertion for
3655 it, create a new assertion location node for OP. */
3656 if (infer_value_range (stmt, op, &comp_code, &value))
3657 {
3658 /* If we are able to infer a nonzero value range for OP,
3659 then walk backwards through the use-def chain to see if OP
3660 was set via a typecast.
3661
3662 If so, then we can also infer a nonzero value range
3663 for the operand of the NOP_EXPR. */
3664 if (comp_code == NE_EXPR && integer_zerop (value))
3665 {
3666 tree t = op;
3667 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3668
3669 while (is_gimple_assign (def_stmt)
3670 && CONVERT_EXPR_CODE_P
3671 (gimple_assign_rhs_code (def_stmt))
3672 && TREE_CODE
3673 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3674 && POINTER_TYPE_P
3675 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3676 {
3677 t = gimple_assign_rhs1 (def_stmt);
3678 def_stmt = SSA_NAME_DEF_STMT (t);
3679
3680 /* Note we want to register the assert for the
3681 operand of the NOP_EXPR after SI, not after the
3682 conversion. */
3683 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3684 register_new_assert_for (t, t, comp_code, value,
3685 bb, NULL, si);
3686 }
3687 }
3688
3689 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3690 }
3691 }
3692
3693 /* Update live. */
3694 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3695 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3696 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3697 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3698 }
3699
3700 /* Traverse all PHI nodes in BB, updating live. */
3701 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3702 gsi_next (&si))
3703 {
3704 use_operand_p arg_p;
3705 ssa_op_iter i;
3706 gphi *phi = si.phi ();
3707 tree res = gimple_phi_result (phi);
3708
3709 if (virtual_operand_p (res))
3710 continue;
3711
3712 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3713 {
3714 tree arg = USE_FROM_PTR (arg_p);
3715 if (TREE_CODE (arg) == SSA_NAME)
3716 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3717 }
3718
3719 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3720 }
3721 }
3722
3723 /* Do an RPO walk over the function computing SSA name liveness
3724 on-the-fly and deciding on assert expressions to insert. */
3725
3726 static void
3727 find_assert_locations (void)
3728 {
3729 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3730 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3731 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3732 int rpo_cnt, i;
3733
3734 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3735 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3736 for (i = 0; i < rpo_cnt; ++i)
3737 bb_rpo[rpo[i]] = i;
3738
3739 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3740 the order we compute liveness and insert asserts we otherwise
3741 fail to insert asserts into the loop latch. */
3742 loop_p loop;
3743 FOR_EACH_LOOP (loop, 0)
3744 {
3745 i = loop->latch->index;
3746 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3747 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3748 !gsi_end_p (gsi); gsi_next (&gsi))
3749 {
3750 gphi *phi = gsi.phi ();
3751 if (virtual_operand_p (gimple_phi_result (phi)))
3752 continue;
3753 tree arg = gimple_phi_arg_def (phi, j);
3754 if (TREE_CODE (arg) == SSA_NAME)
3755 {
3756 if (live[i] == NULL)
3757 {
3758 live[i] = sbitmap_alloc (num_ssa_names);
3759 bitmap_clear (live[i]);
3760 }
3761 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3762 }
3763 }
3764 }
3765
3766 for (i = rpo_cnt - 1; i >= 0; --i)
3767 {
3768 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3769 edge e;
3770 edge_iterator ei;
3771
3772 if (!live[rpo[i]])
3773 {
3774 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3775 bitmap_clear (live[rpo[i]]);
3776 }
3777
3778 /* Process BB and update the live information with uses in
3779 this block. */
3780 find_assert_locations_1 (bb, live[rpo[i]]);
3781
3782 /* Merge liveness into the predecessor blocks and free it. */
3783 if (!bitmap_empty_p (live[rpo[i]]))
3784 {
3785 int pred_rpo = i;
3786 FOR_EACH_EDGE (e, ei, bb->preds)
3787 {
3788 int pred = e->src->index;
3789 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3790 continue;
3791
3792 if (!live[pred])
3793 {
3794 live[pred] = sbitmap_alloc (num_ssa_names);
3795 bitmap_clear (live[pred]);
3796 }
3797 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3798
3799 if (bb_rpo[pred] < pred_rpo)
3800 pred_rpo = bb_rpo[pred];
3801 }
3802
3803 /* Record the RPO number of the last visited block that needs
3804 live information from this block. */
3805 last_rpo[rpo[i]] = pred_rpo;
3806 }
3807 else
3808 {
3809 sbitmap_free (live[rpo[i]]);
3810 live[rpo[i]] = NULL;
3811 }
3812
3813 /* We can free all successors live bitmaps if all their
3814 predecessors have been visited already. */
3815 FOR_EACH_EDGE (e, ei, bb->succs)
3816 if (last_rpo[e->dest->index] == i
3817 && live[e->dest->index])
3818 {
3819 sbitmap_free (live[e->dest->index]);
3820 live[e->dest->index] = NULL;
3821 }
3822 }
3823
3824 XDELETEVEC (rpo);
3825 XDELETEVEC (bb_rpo);
3826 XDELETEVEC (last_rpo);
3827 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3828 if (live[i])
3829 sbitmap_free (live[i]);
3830 XDELETEVEC (live);
3831 }
3832
3833 /* Create an ASSERT_EXPR for NAME and insert it in the location
3834 indicated by LOC. Return true if we made any edge insertions. */
3835
3836 static bool
3837 process_assert_insertions_for (tree name, assert_locus *loc)
3838 {
3839 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3840 gimple *stmt;
3841 tree cond;
3842 gimple *assert_stmt;
3843 edge_iterator ei;
3844 edge e;
3845
3846 /* If we have X <=> X do not insert an assert expr for that. */
3847 if (loc->expr == loc->val)
3848 return false;
3849
3850 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3851 assert_stmt = build_assert_expr_for (cond, name);
3852 if (loc->e)
3853 {
3854 /* We have been asked to insert the assertion on an edge. This
3855 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3856 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3857 || (gimple_code (gsi_stmt (loc->si))
3858 == GIMPLE_SWITCH));
3859
3860 gsi_insert_on_edge (loc->e, assert_stmt);
3861 return true;
3862 }
3863
3864 /* If the stmt iterator points at the end then this is an insertion
3865 at the beginning of a block. */
3866 if (gsi_end_p (loc->si))
3867 {
3868 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3869 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3870 return false;
3871
3872 }
3873 /* Otherwise, we can insert right after LOC->SI iff the
3874 statement must not be the last statement in the block. */
3875 stmt = gsi_stmt (loc->si);
3876 if (!stmt_ends_bb_p (stmt))
3877 {
3878 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3879 return false;
3880 }
3881
3882 /* If STMT must be the last statement in BB, we can only insert new
3883 assertions on the non-abnormal edge out of BB. Note that since
3884 STMT is not control flow, there may only be one non-abnormal/eh edge
3885 out of BB. */
3886 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3887 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3888 {
3889 gsi_insert_on_edge (e, assert_stmt);
3890 return true;
3891 }
3892
3893 gcc_unreachable ();
3894 }
3895
3896 /* Qsort helper for sorting assert locations. If stable is true, don't
3897 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3898 on the other side some pointers might be NULL. */
3899
3900 template <bool stable>
3901 static int
3902 compare_assert_loc (const void *pa, const void *pb)
3903 {
3904 assert_locus * const a = *(assert_locus * const *)pa;
3905 assert_locus * const b = *(assert_locus * const *)pb;
3906
3907 /* If stable, some asserts might be optimized away already, sort
3908 them last. */
3909 if (stable)
3910 {
3911 if (a == NULL)
3912 return b != NULL;
3913 else if (b == NULL)
3914 return -1;
3915 }
3916
3917 if (a->e == NULL && b->e != NULL)
3918 return 1;
3919 else if (a->e != NULL && b->e == NULL)
3920 return -1;
3921
3922 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3923 no need to test both a->e and b->e. */
3924
3925 /* Sort after destination index. */
3926 if (a->e == NULL)
3927 ;
3928 else if (a->e->dest->index > b->e->dest->index)
3929 return 1;
3930 else if (a->e->dest->index < b->e->dest->index)
3931 return -1;
3932
3933 /* Sort after comp_code. */
3934 if (a->comp_code > b->comp_code)
3935 return 1;
3936 else if (a->comp_code < b->comp_code)
3937 return -1;
3938
3939 hashval_t ha, hb;
3940
3941 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3942 uses DECL_UID of the VAR_DECL, so sorting might differ between
3943 -g and -g0. When doing the removal of redundant assert exprs
3944 and commonization to successors, this does not matter, but for
3945 the final sort needs to be stable. */
3946 if (stable)
3947 {
3948 ha = 0;
3949 hb = 0;
3950 }
3951 else
3952 {
3953 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3954 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3955 }
3956
3957 /* Break the tie using hashing and source/bb index. */
3958 if (ha == hb)
3959 return (a->e != NULL
3960 ? a->e->src->index - b->e->src->index
3961 : a->bb->index - b->bb->index);
3962 return ha > hb ? 1 : -1;
3963 }
3964
3965 /* Process all the insertions registered for every name N_i registered
3966 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3967 found in ASSERTS_FOR[i]. */
3968
3969 static void
3970 process_assert_insertions (void)
3971 {
3972 unsigned i;
3973 bitmap_iterator bi;
3974 bool update_edges_p = false;
3975 int num_asserts = 0;
3976
3977 if (dump_file && (dump_flags & TDF_DETAILS))
3978 dump_all_asserts (dump_file);
3979
3980 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3981 {
3982 assert_locus *loc = asserts_for[i];
3983 gcc_assert (loc);
3984
3985 auto_vec<assert_locus *, 16> asserts;
3986 for (; loc; loc = loc->next)
3987 asserts.safe_push (loc);
3988 asserts.qsort (compare_assert_loc<false>);
3989
3990 /* Push down common asserts to successors and remove redundant ones. */
3991 unsigned ecnt = 0;
3992 assert_locus *common = NULL;
3993 unsigned commonj = 0;
3994 for (unsigned j = 0; j < asserts.length (); ++j)
3995 {
3996 loc = asserts[j];
3997 if (! loc->e)
3998 common = NULL;
3999 else if (! common
4000 || loc->e->dest != common->e->dest
4001 || loc->comp_code != common->comp_code
4002 || ! operand_equal_p (loc->val, common->val, 0)
4003 || ! operand_equal_p (loc->expr, common->expr, 0))
4004 {
4005 commonj = j;
4006 common = loc;
4007 ecnt = 1;
4008 }
4009 else if (loc->e == asserts[j-1]->e)
4010 {
4011 /* Remove duplicate asserts. */
4012 if (commonj == j - 1)
4013 {
4014 commonj = j;
4015 common = loc;
4016 }
4017 free (asserts[j-1]);
4018 asserts[j-1] = NULL;
4019 }
4020 else
4021 {
4022 ecnt++;
4023 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4024 {
4025 /* We have the same assertion on all incoming edges of a BB.
4026 Insert it at the beginning of that block. */
4027 loc->bb = loc->e->dest;
4028 loc->e = NULL;
4029 loc->si = gsi_none ();
4030 common = NULL;
4031 /* Clear asserts commoned. */
4032 for (; commonj != j; ++commonj)
4033 if (asserts[commonj])
4034 {
4035 free (asserts[commonj]);
4036 asserts[commonj] = NULL;
4037 }
4038 }
4039 }
4040 }
4041
4042 /* The asserts vector sorting above might be unstable for
4043 -fcompare-debug, sort again to ensure a stable sort. */
4044 asserts.qsort (compare_assert_loc<true>);
4045 for (unsigned j = 0; j < asserts.length (); ++j)
4046 {
4047 loc = asserts[j];
4048 if (! loc)
4049 break;
4050 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4051 num_asserts++;
4052 free (loc);
4053 }
4054 }
4055
4056 if (update_edges_p)
4057 gsi_commit_edge_inserts ();
4058
4059 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4060 num_asserts);
4061 }
4062
4063
4064 /* Traverse the flowgraph looking for conditional jumps to insert range
4065 expressions. These range expressions are meant to provide information
4066 to optimizations that need to reason in terms of value ranges. They
4067 will not be expanded into RTL. For instance, given:
4068
4069 x = ...
4070 y = ...
4071 if (x < y)
4072 y = x - 2;
4073 else
4074 x = y + 3;
4075
4076 this pass will transform the code into:
4077
4078 x = ...
4079 y = ...
4080 if (x < y)
4081 {
4082 x = ASSERT_EXPR <x, x < y>
4083 y = x - 2
4084 }
4085 else
4086 {
4087 y = ASSERT_EXPR <y, x >= y>
4088 x = y + 3
4089 }
4090
4091 The idea is that once copy and constant propagation have run, other
4092 optimizations will be able to determine what ranges of values can 'x'
4093 take in different paths of the code, simply by checking the reaching
4094 definition of 'x'. */
4095
4096 static void
4097 insert_range_assertions (void)
4098 {
4099 need_assert_for = BITMAP_ALLOC (NULL);
4100 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4101
4102 calculate_dominance_info (CDI_DOMINATORS);
4103
4104 find_assert_locations ();
4105 if (!bitmap_empty_p (need_assert_for))
4106 {
4107 process_assert_insertions ();
4108 update_ssa (TODO_update_ssa_no_phi);
4109 }
4110
4111 if (dump_file && (dump_flags & TDF_DETAILS))
4112 {
4113 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4114 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4115 }
4116
4117 free (asserts_for);
4118 BITMAP_FREE (need_assert_for);
4119 }
4120
4121 class vrp_prop : public ssa_propagation_engine
4122 {
4123 public:
4124 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4125 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4126
4127 void vrp_initialize (void);
4128 void vrp_finalize (bool);
4129 void check_all_array_refs (void);
4130 bool check_array_ref (location_t, tree, bool);
4131 bool check_mem_ref (location_t, tree, bool);
4132 void search_for_addr_array (tree, location_t);
4133
4134 class vr_values vr_values;
4135 /* Temporary delegator to minimize code churn. */
4136 const value_range *get_value_range (const_tree op)
4137 { return vr_values.get_value_range (op); }
4138 void set_def_to_varying (const_tree def)
4139 { vr_values.set_def_to_varying (def); }
4140 void set_defs_to_varying (gimple *stmt)
4141 { vr_values.set_defs_to_varying (stmt); }
4142 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4143 tree *output_p, value_range *vr)
4144 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4145 bool update_value_range (const_tree op, value_range *vr)
4146 { return vr_values.update_value_range (op, vr); }
4147 void extract_range_basic (value_range *vr, gimple *stmt)
4148 { vr_values.extract_range_basic (vr, stmt); }
4149 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4150 { vr_values.extract_range_from_phi_node (phi, vr); }
4151 };
4152 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4153 and "struct" hacks. If VRP can determine that the
4154 array subscript is a constant, check if it is outside valid
4155 range. If the array subscript is a RANGE, warn if it is
4156 non-overlapping with valid range.
4157 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.
4158 Returns true if a warning has been issued. */
4159
4160 bool
4161 vrp_prop::check_array_ref (location_t location, tree ref,
4162 bool ignore_off_by_one)
4163 {
4164 tree low_sub, up_sub;
4165 tree low_bound, up_bound, up_bound_p1;
4166
4167 if (TREE_NO_WARNING (ref))
4168 return false;
4169
4170 low_sub = up_sub = TREE_OPERAND (ref, 1);
4171 up_bound = array_ref_up_bound (ref);
4172
4173 /* Set for accesses to interior zero-length arrays. */
4174 bool interior_zero_len = false;
4175
4176 if (!up_bound
4177 || TREE_CODE (up_bound) != INTEGER_CST
4178 || (warn_array_bounds < 2
4179 && array_at_struct_end_p (ref)))
4180 {
4181 /* Accesses to trailing arrays via pointers may access storage
4182 beyond the types array bounds. For such arrays, or for flexible
4183 array members, as well as for other arrays of an unknown size,
4184 replace the upper bound with a more permissive one that assumes
4185 the size of the largest object is PTRDIFF_MAX. */
4186 tree eltsize = array_ref_element_size (ref);
4187
4188 if (TREE_CODE (eltsize) != INTEGER_CST
4189 || integer_zerop (eltsize))
4190 {
4191 up_bound = NULL_TREE;
4192 up_bound_p1 = NULL_TREE;
4193 }
4194 else
4195 {
4196 tree ptrdiff_max = TYPE_MAX_VALUE (ptrdiff_type_node);
4197 tree maxbound = ptrdiff_max;
4198 tree arg = TREE_OPERAND (ref, 0);
4199 poly_int64 off;
4200
4201 if (TREE_CODE (arg) == COMPONENT_REF)
4202 {
4203 /* Try to determine the size of the trailing array from
4204 its initializer (if it has one). */
4205 if (tree refsize = component_ref_size (arg, &interior_zero_len))
4206 if (TREE_CODE (refsize) == INTEGER_CST)
4207 maxbound = refsize;
4208 }
4209
4210 if (maxbound == ptrdiff_max
4211 && get_addr_base_and_unit_offset (arg, &off)
4212 && known_gt (off, 0))
4213 maxbound = wide_int_to_tree (sizetype,
4214 wi::sub (wi::to_wide (maxbound),
4215 off));
4216 else
4217 maxbound = fold_convert (sizetype, maxbound);
4218
4219 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4220
4221 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4222 build_int_cst (ptrdiff_type_node, 1));
4223 }
4224 }
4225 else
4226 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4227 build_int_cst (TREE_TYPE (up_bound), 1));
4228
4229 low_bound = array_ref_low_bound (ref);
4230
4231 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4232
4233 bool warned = false;
4234
4235 /* Empty array. */
4236 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4237 warned = warning_at (location, OPT_Warray_bounds,
4238 "array subscript %E is above array bounds of %qT",
4239 low_bound, artype);
4240
4241 const value_range *vr = NULL;
4242 if (TREE_CODE (low_sub) == SSA_NAME)
4243 {
4244 vr = get_value_range (low_sub);
4245 if (!vr->undefined_p () && !vr->varying_p ())
4246 {
4247 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
4248 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
4249 }
4250 }
4251
4252 if (warned)
4253 ; /* Do nothing. */
4254 else if (vr && vr->kind () == VR_ANTI_RANGE)
4255 {
4256 if (up_bound
4257 && TREE_CODE (up_sub) == INTEGER_CST
4258 && (ignore_off_by_one
4259 ? tree_int_cst_lt (up_bound, up_sub)
4260 : tree_int_cst_le (up_bound, up_sub))
4261 && TREE_CODE (low_sub) == INTEGER_CST
4262 && tree_int_cst_le (low_sub, low_bound))
4263 warned = warning_at (location, OPT_Warray_bounds,
4264 "array subscript [%E, %E] is outside "
4265 "array bounds of %qT",
4266 low_sub, up_sub, artype);
4267 }
4268 else if (up_bound
4269 && TREE_CODE (up_sub) == INTEGER_CST
4270 && (ignore_off_by_one
4271 ? !tree_int_cst_le (up_sub, up_bound_p1)
4272 : !tree_int_cst_le (up_sub, up_bound)))
4273 warned = warning_at (location, OPT_Warray_bounds,
4274 "array subscript %E is above array bounds of %qT",
4275 up_sub, artype);
4276 else if (TREE_CODE (low_sub) == INTEGER_CST
4277 && tree_int_cst_lt (low_sub, low_bound))
4278 warned = warning_at (location, OPT_Warray_bounds,
4279 "array subscript %E is below array bounds of %qT",
4280 low_sub, artype);
4281
4282 if (!warned && interior_zero_len)
4283 warned = warning_at (location, OPT_Wzero_length_bounds,
4284 (TREE_CODE (low_sub) == INTEGER_CST
4285 ? G_("array subscript %E is outside the bounds "
4286 "of an interior zero-length array %qT")
4287 : G_("array subscript %qE is outside the bounds "
4288 "of an interior zero-length array %qT")),
4289 low_sub, artype);
4290
4291 if (warned)
4292 {
4293 if (dump_file && (dump_flags & TDF_DETAILS))
4294 {
4295 fprintf (dump_file, "Array bound warning for ");
4296 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4297 fprintf (dump_file, "\n");
4298 }
4299
4300 ref = TREE_OPERAND (ref, 0);
4301
4302 tree rec = NULL_TREE;
4303 if (TREE_CODE (ref) == COMPONENT_REF)
4304 {
4305 /* For a reference to a member of a struct object also mention
4306 the object if it's known. It may be defined in a different
4307 function than the out-of-bounds access. */
4308 rec = TREE_OPERAND (ref, 0);
4309 if (!VAR_P (rec))
4310 rec = NULL_TREE;
4311 ref = TREE_OPERAND (ref, 1);
4312 }
4313
4314 if (DECL_P (ref))
4315 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4316 if (rec && DECL_P (rec))
4317 inform (DECL_SOURCE_LOCATION (rec), "defined here %qD", rec);
4318
4319 TREE_NO_WARNING (ref) = 1;
4320 }
4321
4322 return warned;
4323 }
4324
4325 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4326 references to string constants. If VRP can determine that the array
4327 subscript is a constant, check if it is outside valid range.
4328 If the array subscript is a RANGE, warn if it is non-overlapping
4329 with valid range.
4330 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4331 (used to allow one-past-the-end indices for code that takes
4332 the address of the just-past-the-end element of an array).
4333 Returns true if a warning has been issued. */
4334
4335 bool
4336 vrp_prop::check_mem_ref (location_t location, tree ref,
4337 bool ignore_off_by_one)
4338 {
4339 if (TREE_NO_WARNING (ref))
4340 return false;
4341
4342 tree arg = TREE_OPERAND (ref, 0);
4343 /* The constant and variable offset of the reference. */
4344 tree cstoff = TREE_OPERAND (ref, 1);
4345 tree varoff = NULL_TREE;
4346
4347 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4348
4349 /* The array or string constant bounds in bytes. Initially set
4350 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4351 determined. */
4352 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4353
4354 /* The minimum and maximum intermediate offset. For a reference
4355 to be valid, not only does the final offset/subscript must be
4356 in bounds but all intermediate offsets should be as well.
4357 GCC may be able to deal gracefully with such out-of-bounds
4358 offsets so the checking is only enbaled at -Warray-bounds=2
4359 where it may help detect bugs in uses of the intermediate
4360 offsets that could otherwise not be detectable. */
4361 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4362 offset_int extrema[2] = { 0, wi::abs (ioff) };
4363
4364 /* The range of the byte offset into the reference. */
4365 offset_int offrange[2] = { 0, 0 };
4366
4367 const value_range *vr = NULL;
4368
4369 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4370 The loop computes the range of the final offset for expressions such
4371 as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
4372 some range. */
4373 const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
4374 for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
4375 {
4376 gimple *def = SSA_NAME_DEF_STMT (arg);
4377 if (!is_gimple_assign (def))
4378 break;
4379
4380 tree_code code = gimple_assign_rhs_code (def);
4381 if (code == POINTER_PLUS_EXPR)
4382 {
4383 arg = gimple_assign_rhs1 (def);
4384 varoff = gimple_assign_rhs2 (def);
4385 }
4386 else if (code == ASSERT_EXPR)
4387 {
4388 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4389 continue;
4390 }
4391 else
4392 return false;
4393
4394 /* VAROFF should always be a SSA_NAME here (and not even
4395 INTEGER_CST) but there's no point in taking chances. */
4396 if (TREE_CODE (varoff) != SSA_NAME)
4397 break;
4398
4399 vr = get_value_range (varoff);
4400 if (!vr || vr->undefined_p () || vr->varying_p ())
4401 break;
4402
4403 if (!vr->constant_p ())
4404 break;
4405
4406 if (vr->kind () == VR_RANGE)
4407 {
4408 offset_int min
4409 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
4410 offset_int max
4411 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
4412 if (min < max)
4413 {
4414 offrange[0] += min;
4415 offrange[1] += max;
4416 }
4417 else
4418 {
4419 /* When MIN >= MAX, the offset is effectively in a union
4420 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
4421 Since there is no way to represent such a range across
4422 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4423 to OFFRANGE. */
4424 offrange[0] += arrbounds[0];
4425 offrange[1] += arrbounds[1];
4426 }
4427 }
4428 else
4429 {
4430 /* For an anti-range, analogously to the above, conservatively
4431 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4432 offrange[0] += arrbounds[0];
4433 offrange[1] += arrbounds[1];
4434 }
4435
4436 /* Keep track of the minimum and maximum offset. */
4437 if (offrange[1] < 0 && offrange[1] < extrema[0])
4438 extrema[0] = offrange[1];
4439 if (offrange[0] > 0 && offrange[0] > extrema[1])
4440 extrema[1] = offrange[0];
4441
4442 if (offrange[0] < arrbounds[0])
4443 offrange[0] = arrbounds[0];
4444
4445 if (offrange[1] > arrbounds[1])
4446 offrange[1] = arrbounds[1];
4447 }
4448
4449 if (TREE_CODE (arg) == ADDR_EXPR)
4450 {
4451 arg = TREE_OPERAND (arg, 0);
4452 if (TREE_CODE (arg) != STRING_CST
4453 && TREE_CODE (arg) != VAR_DECL)
4454 return false;
4455 }
4456 else
4457 return false;
4458
4459 /* The type of the object being referred to. It can be an array,
4460 string literal, or a non-array type when the MEM_REF represents
4461 a reference/subscript via a pointer to an object that is not
4462 an element of an array. Incomplete types are excluded as well
4463 because their size is not known. */
4464 tree reftype = TREE_TYPE (arg);
4465 if (POINTER_TYPE_P (reftype)
4466 || !COMPLETE_TYPE_P (reftype)
4467 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST)
4468 return false;
4469
4470 /* Except in declared objects, references to trailing array members
4471 of structs and union objects are excluded because MEM_REF doesn't
4472 make it possible to identify the member where the reference
4473 originated. */
4474 if (RECORD_OR_UNION_TYPE_P (reftype)
4475 && (!VAR_P (arg)
4476 || (DECL_EXTERNAL (arg) && array_at_struct_end_p (ref))))
4477 return false;
4478
4479 arrbounds[0] = 0;
4480
4481 offset_int eltsize;
4482 if (TREE_CODE (reftype) == ARRAY_TYPE)
4483 {
4484 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4485 if (tree dom = TYPE_DOMAIN (reftype))
4486 {
4487 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4488 if (TREE_CODE (arg) == COMPONENT_REF)
4489 {
4490 offset_int size = maxobjsize;
4491 if (tree fldsize = component_ref_size (arg))
4492 size = wi::to_offset (fldsize);
4493 arrbounds[1] = wi::lrshift (size, wi::floor_log2 (eltsize));
4494 }
4495 else if (array_at_struct_end_p (arg) || !bnds[0] || !bnds[1])
4496 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4497 else
4498 arrbounds[1] = (wi::to_offset (bnds[1]) - wi::to_offset (bnds[0])
4499 + 1) * eltsize;
4500 }
4501 else
4502 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4503
4504 if (TREE_CODE (ref) == MEM_REF)
4505 {
4506 /* For MEM_REF determine a tighter bound of the non-array
4507 element type. */
4508 tree eltype = TREE_TYPE (reftype);
4509 while (TREE_CODE (eltype) == ARRAY_TYPE)
4510 eltype = TREE_TYPE (eltype);
4511 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4512 }
4513 }
4514 else
4515 {
4516 eltsize = 1;
4517 tree size = TYPE_SIZE_UNIT (reftype);
4518 if (VAR_P (arg))
4519 if (tree initsize = DECL_SIZE_UNIT (arg))
4520 if (tree_int_cst_lt (size, initsize))
4521 size = initsize;
4522
4523 arrbounds[1] = wi::to_offset (size);
4524 }
4525
4526 offrange[0] += ioff;
4527 offrange[1] += ioff;
4528
4529 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4530 is set (when taking the address of the one-past-last element
4531 of an array) but always use the stricter bound in diagnostics. */
4532 offset_int ubound = arrbounds[1];
4533 if (ignore_off_by_one)
4534 ubound += 1;
4535
4536 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4537 {
4538 /* Treat a reference to a non-array object as one to an array
4539 of a single element. */
4540 if (TREE_CODE (reftype) != ARRAY_TYPE)
4541 reftype = build_array_type_nelts (reftype, 1);
4542
4543 if (TREE_CODE (ref) == MEM_REF)
4544 {
4545 /* Extract the element type out of MEM_REF and use its size
4546 to compute the index to print in the diagnostic; arrays
4547 in MEM_REF don't mean anything. A type with no size like
4548 void is as good as having a size of 1. */
4549 tree type = TREE_TYPE (ref);
4550 while (TREE_CODE (type) == ARRAY_TYPE)
4551 type = TREE_TYPE (type);
4552 if (tree size = TYPE_SIZE_UNIT (type))
4553 {
4554 offrange[0] = offrange[0] / wi::to_offset (size);
4555 offrange[1] = offrange[1] / wi::to_offset (size);
4556 }
4557 }
4558 else
4559 {
4560 /* For anything other than MEM_REF, compute the index to
4561 print in the diagnostic as the offset over element size. */
4562 offrange[0] = offrange[0] / eltsize;
4563 offrange[1] = offrange[1] / eltsize;
4564 }
4565
4566 bool warned;
4567 if (offrange[0] == offrange[1])
4568 warned = warning_at (location, OPT_Warray_bounds,
4569 "array subscript %wi is outside array bounds "
4570 "of %qT",
4571 offrange[0].to_shwi (), reftype);
4572 else
4573 warned = warning_at (location, OPT_Warray_bounds,
4574 "array subscript [%wi, %wi] is outside "
4575 "array bounds of %qT",
4576 offrange[0].to_shwi (),
4577 offrange[1].to_shwi (), reftype);
4578 if (warned && DECL_P (arg))
4579 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4580
4581 if (warned)
4582 TREE_NO_WARNING (ref) = 1;
4583 return warned;
4584 }
4585
4586 if (warn_array_bounds < 2)
4587 return false;
4588
4589 /* At level 2 check also intermediate offsets. */
4590 int i = 0;
4591 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4592 {
4593 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4594
4595 if (warning_at (location, OPT_Warray_bounds,
4596 "intermediate array offset %wi is outside array bounds "
4597 "of %qT", tmpidx, reftype))
4598 {
4599 TREE_NO_WARNING (ref) = 1;
4600 return true;
4601 }
4602 }
4603
4604 return false;
4605 }
4606
4607 /* Searches if the expr T, located at LOCATION computes
4608 address of an ARRAY_REF, and call check_array_ref on it. */
4609
4610 void
4611 vrp_prop::search_for_addr_array (tree t, location_t location)
4612 {
4613 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4614 do
4615 {
4616 bool warned = false;
4617 if (TREE_CODE (t) == ARRAY_REF)
4618 warned = check_array_ref (location, t, true /*ignore_off_by_one*/);
4619 else if (TREE_CODE (t) == MEM_REF)
4620 warned = check_mem_ref (location, t, true /*ignore_off_by_one*/);
4621
4622 if (warned)
4623 TREE_NO_WARNING (t) = true;
4624
4625 t = TREE_OPERAND (t, 0);
4626 }
4627 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4628
4629 if (TREE_CODE (t) != MEM_REF
4630 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4631 || TREE_NO_WARNING (t))
4632 return;
4633
4634 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4635 tree low_bound, up_bound, el_sz;
4636 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4637 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4638 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4639 return;
4640
4641 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4642 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4643 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4644 if (!low_bound
4645 || TREE_CODE (low_bound) != INTEGER_CST
4646 || !up_bound
4647 || TREE_CODE (up_bound) != INTEGER_CST
4648 || !el_sz
4649 || TREE_CODE (el_sz) != INTEGER_CST)
4650 return;
4651
4652 offset_int idx;
4653 if (!mem_ref_offset (t).is_constant (&idx))
4654 return;
4655
4656 bool warned = false;
4657 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4658 if (idx < 0)
4659 {
4660 if (dump_file && (dump_flags & TDF_DETAILS))
4661 {
4662 fprintf (dump_file, "Array bound warning for ");
4663 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4664 fprintf (dump_file, "\n");
4665 }
4666 warned = warning_at (location, OPT_Warray_bounds,
4667 "array subscript %wi is below "
4668 "array bounds of %qT",
4669 idx.to_shwi (), TREE_TYPE (tem));
4670 }
4671 else if (idx > (wi::to_offset (up_bound)
4672 - wi::to_offset (low_bound) + 1))
4673 {
4674 if (dump_file && (dump_flags & TDF_DETAILS))
4675 {
4676 fprintf (dump_file, "Array bound warning for ");
4677 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4678 fprintf (dump_file, "\n");
4679 }
4680 warned = warning_at (location, OPT_Warray_bounds,
4681 "array subscript %wu is above "
4682 "array bounds of %qT",
4683 idx.to_uhwi (), TREE_TYPE (tem));
4684 }
4685
4686 if (warned)
4687 {
4688 if (DECL_P (t))
4689 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4690
4691 TREE_NO_WARNING (t) = 1;
4692 }
4693 }
4694
4695 /* walk_tree() callback that checks if *TP is
4696 an ARRAY_REF inside an ADDR_EXPR (in which an array
4697 subscript one outside the valid range is allowed). Call
4698 check_array_ref for each ARRAY_REF found. The location is
4699 passed in DATA. */
4700
4701 static tree
4702 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4703 {
4704 tree t = *tp;
4705 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4706 location_t location;
4707
4708 if (EXPR_HAS_LOCATION (t))
4709 location = EXPR_LOCATION (t);
4710 else
4711 location = gimple_location (wi->stmt);
4712
4713 *walk_subtree = TRUE;
4714
4715 bool warned = false;
4716 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4717 if (TREE_CODE (t) == ARRAY_REF)
4718 warned = vrp_prop->check_array_ref (location, t, false/*ignore_off_by_one*/);
4719 else if (TREE_CODE (t) == MEM_REF)
4720 warned = vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4721 else if (TREE_CODE (t) == ADDR_EXPR)
4722 {
4723 vrp_prop->search_for_addr_array (t, location);
4724 *walk_subtree = FALSE;
4725 }
4726 /* Propagate the no-warning bit to the outer expression. */
4727 if (warned)
4728 TREE_NO_WARNING (t) = true;
4729
4730 return NULL_TREE;
4731 }
4732
4733 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4734 to walk over all statements of all reachable BBs and call
4735 check_array_bounds on them. */
4736
4737 class check_array_bounds_dom_walker : public dom_walker
4738 {
4739 public:
4740 check_array_bounds_dom_walker (vrp_prop *prop)
4741 : dom_walker (CDI_DOMINATORS,
4742 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4743 flags, so that we can merge in information on
4744 non-executable edges from vrp_folder . */
4745 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4746 m_prop (prop) {}
4747 ~check_array_bounds_dom_walker () {}
4748
4749 edge before_dom_children (basic_block) FINAL OVERRIDE;
4750
4751 private:
4752 vrp_prop *m_prop;
4753 };
4754
4755 /* Implementation of dom_walker::before_dom_children.
4756
4757 Walk over all statements of BB and call check_array_bounds on them,
4758 and determine if there's a unique successor edge. */
4759
4760 edge
4761 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4762 {
4763 gimple_stmt_iterator si;
4764 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4765 {
4766 gimple *stmt = gsi_stmt (si);
4767 struct walk_stmt_info wi;
4768 if (!gimple_has_location (stmt)
4769 || is_gimple_debug (stmt))
4770 continue;
4771
4772 memset (&wi, 0, sizeof (wi));
4773
4774 wi.info = m_prop;
4775
4776 walk_gimple_op (stmt, check_array_bounds, &wi);
4777 }
4778
4779 /* Determine if there's a unique successor edge, and if so, return
4780 that back to dom_walker, ensuring that we don't visit blocks that
4781 became unreachable during the VRP propagation
4782 (PR tree-optimization/83312). */
4783 return find_taken_edge (bb, NULL_TREE);
4784 }
4785
4786 /* Walk over all statements of all reachable BBs and call check_array_bounds
4787 on them. */
4788
4789 void
4790 vrp_prop::check_all_array_refs ()
4791 {
4792 check_array_bounds_dom_walker w (this);
4793 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4794 }
4795
4796 /* Return true if all imm uses of VAR are either in STMT, or
4797 feed (optionally through a chain of single imm uses) GIMPLE_COND
4798 in basic block COND_BB. */
4799
4800 static bool
4801 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4802 {
4803 use_operand_p use_p, use2_p;
4804 imm_use_iterator iter;
4805
4806 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4807 if (USE_STMT (use_p) != stmt)
4808 {
4809 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4810 if (is_gimple_debug (use_stmt))
4811 continue;
4812 while (is_gimple_assign (use_stmt)
4813 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4814 && single_imm_use (gimple_assign_lhs (use_stmt),
4815 &use2_p, &use_stmt2))
4816 use_stmt = use_stmt2;
4817 if (gimple_code (use_stmt) != GIMPLE_COND
4818 || gimple_bb (use_stmt) != cond_bb)
4819 return false;
4820 }
4821 return true;
4822 }
4823
4824 /* Handle
4825 _4 = x_3 & 31;
4826 if (_4 != 0)
4827 goto <bb 6>;
4828 else
4829 goto <bb 7>;
4830 <bb 6>:
4831 __builtin_unreachable ();
4832 <bb 7>:
4833 x_5 = ASSERT_EXPR <x_3, ...>;
4834 If x_3 has no other immediate uses (checked by caller),
4835 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4836 from the non-zero bitmask. */
4837
4838 void
4839 maybe_set_nonzero_bits (edge e, tree var)
4840 {
4841 basic_block cond_bb = e->src;
4842 gimple *stmt = last_stmt (cond_bb);
4843 tree cst;
4844
4845 if (stmt == NULL
4846 || gimple_code (stmt) != GIMPLE_COND
4847 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4848 ? EQ_EXPR : NE_EXPR)
4849 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4850 || !integer_zerop (gimple_cond_rhs (stmt)))
4851 return;
4852
4853 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4854 if (!is_gimple_assign (stmt)
4855 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4856 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4857 return;
4858 if (gimple_assign_rhs1 (stmt) != var)
4859 {
4860 gimple *stmt2;
4861
4862 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4863 return;
4864 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4865 if (!gimple_assign_cast_p (stmt2)
4866 || gimple_assign_rhs1 (stmt2) != var
4867 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4868 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4869 != TYPE_PRECISION (TREE_TYPE (var))))
4870 return;
4871 }
4872 cst = gimple_assign_rhs2 (stmt);
4873 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4874 wi::to_wide (cst)));
4875 }
4876
4877 /* Convert range assertion expressions into the implied copies and
4878 copy propagate away the copies. Doing the trivial copy propagation
4879 here avoids the need to run the full copy propagation pass after
4880 VRP.
4881
4882 FIXME, this will eventually lead to copy propagation removing the
4883 names that had useful range information attached to them. For
4884 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4885 then N_i will have the range [3, +INF].
4886
4887 However, by converting the assertion into the implied copy
4888 operation N_i = N_j, we will then copy-propagate N_j into the uses
4889 of N_i and lose the range information. We may want to hold on to
4890 ASSERT_EXPRs a little while longer as the ranges could be used in
4891 things like jump threading.
4892
4893 The problem with keeping ASSERT_EXPRs around is that passes after
4894 VRP need to handle them appropriately.
4895
4896 Another approach would be to make the range information a first
4897 class property of the SSA_NAME so that it can be queried from
4898 any pass. This is made somewhat more complex by the need for
4899 multiple ranges to be associated with one SSA_NAME. */
4900
4901 static void
4902 remove_range_assertions (void)
4903 {
4904 basic_block bb;
4905 gimple_stmt_iterator si;
4906 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4907 a basic block preceeded by GIMPLE_COND branching to it and
4908 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4909 int is_unreachable;
4910
4911 /* Note that the BSI iterator bump happens at the bottom of the
4912 loop and no bump is necessary if we're removing the statement
4913 referenced by the current BSI. */
4914 FOR_EACH_BB_FN (bb, cfun)
4915 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4916 {
4917 gimple *stmt = gsi_stmt (si);
4918
4919 if (is_gimple_assign (stmt)
4920 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4921 {
4922 tree lhs = gimple_assign_lhs (stmt);
4923 tree rhs = gimple_assign_rhs1 (stmt);
4924 tree var;
4925
4926 var = ASSERT_EXPR_VAR (rhs);
4927
4928 if (TREE_CODE (var) == SSA_NAME
4929 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4930 && SSA_NAME_RANGE_INFO (lhs))
4931 {
4932 if (is_unreachable == -1)
4933 {
4934 is_unreachable = 0;
4935 if (single_pred_p (bb)
4936 && assert_unreachable_fallthru_edge_p
4937 (single_pred_edge (bb)))
4938 is_unreachable = 1;
4939 }
4940 /* Handle
4941 if (x_7 >= 10 && x_7 < 20)
4942 __builtin_unreachable ();
4943 x_8 = ASSERT_EXPR <x_7, ...>;
4944 if the only uses of x_7 are in the ASSERT_EXPR and
4945 in the condition. In that case, we can copy the
4946 range info from x_8 computed in this pass also
4947 for x_7. */
4948 if (is_unreachable
4949 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4950 single_pred (bb)))
4951 {
4952 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4953 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4954 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4955 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4956 }
4957 }
4958
4959 /* Propagate the RHS into every use of the LHS. For SSA names
4960 also propagate abnormals as it merely restores the original
4961 IL in this case (an replace_uses_by would assert). */
4962 if (TREE_CODE (var) == SSA_NAME)
4963 {
4964 imm_use_iterator iter;
4965 use_operand_p use_p;
4966 gimple *use_stmt;
4967 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4968 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4969 SET_USE (use_p, var);
4970 }
4971 else
4972 replace_uses_by (lhs, var);
4973
4974 /* And finally, remove the copy, it is not needed. */
4975 gsi_remove (&si, true);
4976 release_defs (stmt);
4977 }
4978 else
4979 {
4980 if (!is_gimple_debug (gsi_stmt (si)))
4981 is_unreachable = 0;
4982 gsi_next (&si);
4983 }
4984 }
4985 }
4986
4987 /* Return true if STMT is interesting for VRP. */
4988
4989 bool
4990 stmt_interesting_for_vrp (gimple *stmt)
4991 {
4992 if (gimple_code (stmt) == GIMPLE_PHI)
4993 {
4994 tree res = gimple_phi_result (stmt);
4995 return (!virtual_operand_p (res)
4996 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4997 || POINTER_TYPE_P (TREE_TYPE (res))));
4998 }
4999 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5000 {
5001 tree lhs = gimple_get_lhs (stmt);
5002
5003 /* In general, assignments with virtual operands are not useful
5004 for deriving ranges, with the obvious exception of calls to
5005 builtin functions. */
5006 if (lhs && TREE_CODE (lhs) == SSA_NAME
5007 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5008 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5009 && (is_gimple_call (stmt)
5010 || !gimple_vuse (stmt)))
5011 return true;
5012 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5013 switch (gimple_call_internal_fn (stmt))
5014 {
5015 case IFN_ADD_OVERFLOW:
5016 case IFN_SUB_OVERFLOW:
5017 case IFN_MUL_OVERFLOW:
5018 case IFN_ATOMIC_COMPARE_EXCHANGE:
5019 /* These internal calls return _Complex integer type,
5020 but are interesting to VRP nevertheless. */
5021 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5022 return true;
5023 break;
5024 default:
5025 break;
5026 }
5027 }
5028 else if (gimple_code (stmt) == GIMPLE_COND
5029 || gimple_code (stmt) == GIMPLE_SWITCH)
5030 return true;
5031
5032 return false;
5033 }
5034
5035 /* Initialization required by ssa_propagate engine. */
5036
5037 void
5038 vrp_prop::vrp_initialize ()
5039 {
5040 basic_block bb;
5041
5042 FOR_EACH_BB_FN (bb, cfun)
5043 {
5044 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5045 gsi_next (&si))
5046 {
5047 gphi *phi = si.phi ();
5048 if (!stmt_interesting_for_vrp (phi))
5049 {
5050 tree lhs = PHI_RESULT (phi);
5051 set_def_to_varying (lhs);
5052 prop_set_simulate_again (phi, false);
5053 }
5054 else
5055 prop_set_simulate_again (phi, true);
5056 }
5057
5058 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5059 gsi_next (&si))
5060 {
5061 gimple *stmt = gsi_stmt (si);
5062
5063 /* If the statement is a control insn, then we do not
5064 want to avoid simulating the statement once. Failure
5065 to do so means that those edges will never get added. */
5066 if (stmt_ends_bb_p (stmt))
5067 prop_set_simulate_again (stmt, true);
5068 else if (!stmt_interesting_for_vrp (stmt))
5069 {
5070 set_defs_to_varying (stmt);
5071 prop_set_simulate_again (stmt, false);
5072 }
5073 else
5074 prop_set_simulate_again (stmt, true);
5075 }
5076 }
5077 }
5078
5079 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5080 that includes the value VAL. The search is restricted to the range
5081 [START_IDX, n - 1] where n is the size of VEC.
5082
5083 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5084 returned.
5085
5086 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5087 it is placed in IDX and false is returned.
5088
5089 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5090 returned. */
5091
5092 bool
5093 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5094 {
5095 size_t n = gimple_switch_num_labels (stmt);
5096 size_t low, high;
5097
5098 /* Find case label for minimum of the value range or the next one.
5099 At each iteration we are searching in [low, high - 1]. */
5100
5101 for (low = start_idx, high = n; high != low; )
5102 {
5103 tree t;
5104 int cmp;
5105 /* Note that i != high, so we never ask for n. */
5106 size_t i = (high + low) / 2;
5107 t = gimple_switch_label (stmt, i);
5108
5109 /* Cache the result of comparing CASE_LOW and val. */
5110 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5111
5112 if (cmp == 0)
5113 {
5114 /* Ranges cannot be empty. */
5115 *idx = i;
5116 return true;
5117 }
5118 else if (cmp > 0)
5119 high = i;
5120 else
5121 {
5122 low = i + 1;
5123 if (CASE_HIGH (t) != NULL
5124 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5125 {
5126 *idx = i;
5127 return true;
5128 }
5129 }
5130 }
5131
5132 *idx = high;
5133 return false;
5134 }
5135
5136 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5137 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5138 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5139 then MAX_IDX < MIN_IDX.
5140 Returns true if the default label is not needed. */
5141
5142 bool
5143 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5144 size_t *max_idx)
5145 {
5146 size_t i, j;
5147 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5148 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5149
5150 if (i == j
5151 && min_take_default
5152 && max_take_default)
5153 {
5154 /* Only the default case label reached.
5155 Return an empty range. */
5156 *min_idx = 1;
5157 *max_idx = 0;
5158 return false;
5159 }
5160 else
5161 {
5162 bool take_default = min_take_default || max_take_default;
5163 tree low, high;
5164 size_t k;
5165
5166 if (max_take_default)
5167 j--;
5168
5169 /* If the case label range is continuous, we do not need
5170 the default case label. Verify that. */
5171 high = CASE_LOW (gimple_switch_label (stmt, i));
5172 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5173 high = CASE_HIGH (gimple_switch_label (stmt, i));
5174 for (k = i + 1; k <= j; ++k)
5175 {
5176 low = CASE_LOW (gimple_switch_label (stmt, k));
5177 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5178 {
5179 take_default = true;
5180 break;
5181 }
5182 high = low;
5183 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5184 high = CASE_HIGH (gimple_switch_label (stmt, k));
5185 }
5186
5187 *min_idx = i;
5188 *max_idx = j;
5189 return !take_default;
5190 }
5191 }
5192
5193 /* Evaluate statement STMT. If the statement produces a useful range,
5194 return SSA_PROP_INTERESTING and record the SSA name with the
5195 interesting range into *OUTPUT_P.
5196
5197 If STMT is a conditional branch and we can determine its truth
5198 value, the taken edge is recorded in *TAKEN_EDGE_P.
5199
5200 If STMT produces a varying value, return SSA_PROP_VARYING. */
5201
5202 enum ssa_prop_result
5203 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5204 {
5205 tree lhs = gimple_get_lhs (stmt);
5206 value_range vr;
5207 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5208
5209 if (*output_p)
5210 {
5211 if (update_value_range (*output_p, &vr))
5212 {
5213 if (dump_file && (dump_flags & TDF_DETAILS))
5214 {
5215 fprintf (dump_file, "Found new range for ");
5216 print_generic_expr (dump_file, *output_p);
5217 fprintf (dump_file, ": ");
5218 dump_value_range (dump_file, &vr);
5219 fprintf (dump_file, "\n");
5220 }
5221
5222 if (vr.varying_p ())
5223 return SSA_PROP_VARYING;
5224
5225 return SSA_PROP_INTERESTING;
5226 }
5227 return SSA_PROP_NOT_INTERESTING;
5228 }
5229
5230 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5231 switch (gimple_call_internal_fn (stmt))
5232 {
5233 case IFN_ADD_OVERFLOW:
5234 case IFN_SUB_OVERFLOW:
5235 case IFN_MUL_OVERFLOW:
5236 case IFN_ATOMIC_COMPARE_EXCHANGE:
5237 /* These internal calls return _Complex integer type,
5238 which VRP does not track, but the immediate uses
5239 thereof might be interesting. */
5240 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5241 {
5242 imm_use_iterator iter;
5243 use_operand_p use_p;
5244 enum ssa_prop_result res = SSA_PROP_VARYING;
5245
5246 set_def_to_varying (lhs);
5247
5248 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5249 {
5250 gimple *use_stmt = USE_STMT (use_p);
5251 if (!is_gimple_assign (use_stmt))
5252 continue;
5253 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5254 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5255 continue;
5256 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5257 tree use_lhs = gimple_assign_lhs (use_stmt);
5258 if (TREE_CODE (rhs1) != rhs_code
5259 || TREE_OPERAND (rhs1, 0) != lhs
5260 || TREE_CODE (use_lhs) != SSA_NAME
5261 || !stmt_interesting_for_vrp (use_stmt)
5262 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5263 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5264 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5265 continue;
5266
5267 /* If there is a change in the value range for any of the
5268 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5269 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5270 or IMAGPART_EXPR immediate uses, but none of them have
5271 a change in their value ranges, return
5272 SSA_PROP_NOT_INTERESTING. If there are no
5273 {REAL,IMAG}PART_EXPR uses at all,
5274 return SSA_PROP_VARYING. */
5275 value_range new_vr;
5276 extract_range_basic (&new_vr, use_stmt);
5277 const value_range *old_vr = get_value_range (use_lhs);
5278 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
5279 res = SSA_PROP_INTERESTING;
5280 else
5281 res = SSA_PROP_NOT_INTERESTING;
5282 new_vr.equiv_clear ();
5283 if (res == SSA_PROP_INTERESTING)
5284 {
5285 *output_p = lhs;
5286 return res;
5287 }
5288 }
5289
5290 return res;
5291 }
5292 break;
5293 default:
5294 break;
5295 }
5296
5297 /* All other statements produce nothing of interest for VRP, so mark
5298 their outputs varying and prevent further simulation. */
5299 set_defs_to_varying (stmt);
5300
5301 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5302 }
5303
5304 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5305 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5306 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5307 possible such range. The resulting range is not canonicalized. */
5308
5309 static void
5310 union_ranges (enum value_range_kind *vr0type,
5311 tree *vr0min, tree *vr0max,
5312 enum value_range_kind vr1type,
5313 tree vr1min, tree vr1max)
5314 {
5315 int cmpmin = compare_values (*vr0min, vr1min);
5316 int cmpmax = compare_values (*vr0max, vr1max);
5317 bool mineq = cmpmin == 0;
5318 bool maxeq = cmpmax == 0;
5319
5320 /* [] is vr0, () is vr1 in the following classification comments. */
5321 if (mineq && maxeq)
5322 {
5323 /* [( )] */
5324 if (*vr0type == vr1type)
5325 /* Nothing to do for equal ranges. */
5326 ;
5327 else if ((*vr0type == VR_RANGE
5328 && vr1type == VR_ANTI_RANGE)
5329 || (*vr0type == VR_ANTI_RANGE
5330 && vr1type == VR_RANGE))
5331 {
5332 /* For anti-range with range union the result is varying. */
5333 goto give_up;
5334 }
5335 else
5336 gcc_unreachable ();
5337 }
5338 else if (operand_less_p (*vr0max, vr1min) == 1
5339 || operand_less_p (vr1max, *vr0min) == 1)
5340 {
5341 /* [ ] ( ) or ( ) [ ]
5342 If the ranges have an empty intersection, result of the union
5343 operation is the anti-range or if both are anti-ranges
5344 it covers all. */
5345 if (*vr0type == VR_ANTI_RANGE
5346 && vr1type == VR_ANTI_RANGE)
5347 goto give_up;
5348 else if (*vr0type == VR_ANTI_RANGE
5349 && vr1type == VR_RANGE)
5350 ;
5351 else if (*vr0type == VR_RANGE
5352 && vr1type == VR_ANTI_RANGE)
5353 {
5354 *vr0type = vr1type;
5355 *vr0min = vr1min;
5356 *vr0max = vr1max;
5357 }
5358 else if (*vr0type == VR_RANGE
5359 && vr1type == VR_RANGE)
5360 {
5361 /* The result is the convex hull of both ranges. */
5362 if (operand_less_p (*vr0max, vr1min) == 1)
5363 {
5364 /* If the result can be an anti-range, create one. */
5365 if (TREE_CODE (*vr0max) == INTEGER_CST
5366 && TREE_CODE (vr1min) == INTEGER_CST
5367 && vrp_val_is_min (*vr0min)
5368 && vrp_val_is_max (vr1max))
5369 {
5370 tree min = int_const_binop (PLUS_EXPR,
5371 *vr0max,
5372 build_int_cst (TREE_TYPE (*vr0max), 1));
5373 tree max = int_const_binop (MINUS_EXPR,
5374 vr1min,
5375 build_int_cst (TREE_TYPE (vr1min), 1));
5376 if (!operand_less_p (max, min))
5377 {
5378 *vr0type = VR_ANTI_RANGE;
5379 *vr0min = min;
5380 *vr0max = max;
5381 }
5382 else
5383 *vr0max = vr1max;
5384 }
5385 else
5386 *vr0max = vr1max;
5387 }
5388 else
5389 {
5390 /* If the result can be an anti-range, create one. */
5391 if (TREE_CODE (vr1max) == INTEGER_CST
5392 && TREE_CODE (*vr0min) == INTEGER_CST
5393 && vrp_val_is_min (vr1min)
5394 && vrp_val_is_max (*vr0max))
5395 {
5396 tree min = int_const_binop (PLUS_EXPR,
5397 vr1max,
5398 build_int_cst (TREE_TYPE (vr1max), 1));
5399 tree max = int_const_binop (MINUS_EXPR,
5400 *vr0min,
5401 build_int_cst (TREE_TYPE (*vr0min), 1));
5402 if (!operand_less_p (max, min))
5403 {
5404 *vr0type = VR_ANTI_RANGE;
5405 *vr0min = min;
5406 *vr0max = max;
5407 }
5408 else
5409 *vr0min = vr1min;
5410 }
5411 else
5412 *vr0min = vr1min;
5413 }
5414 }
5415 else
5416 gcc_unreachable ();
5417 }
5418 else if ((maxeq || cmpmax == 1)
5419 && (mineq || cmpmin == -1))
5420 {
5421 /* [ ( ) ] or [( ) ] or [ ( )] */
5422 if (*vr0type == VR_RANGE
5423 && vr1type == VR_RANGE)
5424 ;
5425 else if (*vr0type == VR_ANTI_RANGE
5426 && vr1type == VR_ANTI_RANGE)
5427 {
5428 *vr0type = vr1type;
5429 *vr0min = vr1min;
5430 *vr0max = vr1max;
5431 }
5432 else if (*vr0type == VR_ANTI_RANGE
5433 && vr1type == VR_RANGE)
5434 {
5435 /* Arbitrarily choose the right or left gap. */
5436 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5437 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5438 build_int_cst (TREE_TYPE (vr1min), 1));
5439 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5440 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5441 build_int_cst (TREE_TYPE (vr1max), 1));
5442 else
5443 goto give_up;
5444 }
5445 else if (*vr0type == VR_RANGE
5446 && vr1type == VR_ANTI_RANGE)
5447 /* The result covers everything. */
5448 goto give_up;
5449 else
5450 gcc_unreachable ();
5451 }
5452 else if ((maxeq || cmpmax == -1)
5453 && (mineq || cmpmin == 1))
5454 {
5455 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5456 if (*vr0type == VR_RANGE
5457 && vr1type == VR_RANGE)
5458 {
5459 *vr0type = vr1type;
5460 *vr0min = vr1min;
5461 *vr0max = vr1max;
5462 }
5463 else if (*vr0type == VR_ANTI_RANGE
5464 && vr1type == VR_ANTI_RANGE)
5465 ;
5466 else if (*vr0type == VR_RANGE
5467 && vr1type == VR_ANTI_RANGE)
5468 {
5469 *vr0type = VR_ANTI_RANGE;
5470 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5471 {
5472 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5473 build_int_cst (TREE_TYPE (*vr0min), 1));
5474 *vr0min = vr1min;
5475 }
5476 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5477 {
5478 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5479 build_int_cst (TREE_TYPE (*vr0max), 1));
5480 *vr0max = vr1max;
5481 }
5482 else
5483 goto give_up;
5484 }
5485 else if (*vr0type == VR_ANTI_RANGE
5486 && vr1type == VR_RANGE)
5487 /* The result covers everything. */
5488 goto give_up;
5489 else
5490 gcc_unreachable ();
5491 }
5492 else if (cmpmin == -1
5493 && cmpmax == -1
5494 && (operand_less_p (vr1min, *vr0max) == 1
5495 || operand_equal_p (vr1min, *vr0max, 0)))
5496 {
5497 /* [ ( ] ) or [ ]( ) */
5498 if (*vr0type == VR_RANGE
5499 && vr1type == VR_RANGE)
5500 *vr0max = vr1max;
5501 else if (*vr0type == VR_ANTI_RANGE
5502 && vr1type == VR_ANTI_RANGE)
5503 *vr0min = vr1min;
5504 else if (*vr0type == VR_ANTI_RANGE
5505 && vr1type == VR_RANGE)
5506 {
5507 if (TREE_CODE (vr1min) == INTEGER_CST)
5508 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5509 build_int_cst (TREE_TYPE (vr1min), 1));
5510 else
5511 goto give_up;
5512 }
5513 else if (*vr0type == VR_RANGE
5514 && vr1type == VR_ANTI_RANGE)
5515 {
5516 if (TREE_CODE (*vr0max) == INTEGER_CST)
5517 {
5518 *vr0type = vr1type;
5519 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5520 build_int_cst (TREE_TYPE (*vr0max), 1));
5521 *vr0max = vr1max;
5522 }
5523 else
5524 goto give_up;
5525 }
5526 else
5527 gcc_unreachable ();
5528 }
5529 else if (cmpmin == 1
5530 && cmpmax == 1
5531 && (operand_less_p (*vr0min, vr1max) == 1
5532 || operand_equal_p (*vr0min, vr1max, 0)))
5533 {
5534 /* ( [ ) ] or ( )[ ] */
5535 if (*vr0type == VR_RANGE
5536 && vr1type == VR_RANGE)
5537 *vr0min = vr1min;
5538 else if (*vr0type == VR_ANTI_RANGE
5539 && vr1type == VR_ANTI_RANGE)
5540 *vr0max = vr1max;
5541 else if (*vr0type == VR_ANTI_RANGE
5542 && vr1type == VR_RANGE)
5543 {
5544 if (TREE_CODE (vr1max) == INTEGER_CST)
5545 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5546 build_int_cst (TREE_TYPE (vr1max), 1));
5547 else
5548 goto give_up;
5549 }
5550 else if (*vr0type == VR_RANGE
5551 && vr1type == VR_ANTI_RANGE)
5552 {
5553 if (TREE_CODE (*vr0min) == INTEGER_CST)
5554 {
5555 *vr0type = vr1type;
5556 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5557 build_int_cst (TREE_TYPE (*vr0min), 1));
5558 *vr0min = vr1min;
5559 }
5560 else
5561 goto give_up;
5562 }
5563 else
5564 gcc_unreachable ();
5565 }
5566 else
5567 goto give_up;
5568
5569 return;
5570
5571 give_up:
5572 *vr0type = VR_VARYING;
5573 *vr0min = NULL_TREE;
5574 *vr0max = NULL_TREE;
5575 }
5576
5577 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5578 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5579 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5580 possible such range. The resulting range is not canonicalized. */
5581
5582 static void
5583 intersect_ranges (enum value_range_kind *vr0type,
5584 tree *vr0min, tree *vr0max,
5585 enum value_range_kind vr1type,
5586 tree vr1min, tree vr1max)
5587 {
5588 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5589 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5590
5591 /* [] is vr0, () is vr1 in the following classification comments. */
5592 if (mineq && maxeq)
5593 {
5594 /* [( )] */
5595 if (*vr0type == vr1type)
5596 /* Nothing to do for equal ranges. */
5597 ;
5598 else if ((*vr0type == VR_RANGE
5599 && vr1type == VR_ANTI_RANGE)
5600 || (*vr0type == VR_ANTI_RANGE
5601 && vr1type == VR_RANGE))
5602 {
5603 /* For anti-range with range intersection the result is empty. */
5604 *vr0type = VR_UNDEFINED;
5605 *vr0min = NULL_TREE;
5606 *vr0max = NULL_TREE;
5607 }
5608 else
5609 gcc_unreachable ();
5610 }
5611 else if (operand_less_p (*vr0max, vr1min) == 1
5612 || operand_less_p (vr1max, *vr0min) == 1)
5613 {
5614 /* [ ] ( ) or ( ) [ ]
5615 If the ranges have an empty intersection, the result of the
5616 intersect operation is the range for intersecting an
5617 anti-range with a range or empty when intersecting two ranges. */
5618 if (*vr0type == VR_RANGE
5619 && vr1type == VR_ANTI_RANGE)
5620 ;
5621 else if (*vr0type == VR_ANTI_RANGE
5622 && vr1type == VR_RANGE)
5623 {
5624 *vr0type = vr1type;
5625 *vr0min = vr1min;
5626 *vr0max = vr1max;
5627 }
5628 else if (*vr0type == VR_RANGE
5629 && vr1type == VR_RANGE)
5630 {
5631 *vr0type = VR_UNDEFINED;
5632 *vr0min = NULL_TREE;
5633 *vr0max = NULL_TREE;
5634 }
5635 else if (*vr0type == VR_ANTI_RANGE
5636 && vr1type == VR_ANTI_RANGE)
5637 {
5638 /* If the anti-ranges are adjacent to each other merge them. */
5639 if (TREE_CODE (*vr0max) == INTEGER_CST
5640 && TREE_CODE (vr1min) == INTEGER_CST
5641 && operand_less_p (*vr0max, vr1min) == 1
5642 && integer_onep (int_const_binop (MINUS_EXPR,
5643 vr1min, *vr0max)))
5644 *vr0max = vr1max;
5645 else if (TREE_CODE (vr1max) == INTEGER_CST
5646 && TREE_CODE (*vr0min) == INTEGER_CST
5647 && operand_less_p (vr1max, *vr0min) == 1
5648 && integer_onep (int_const_binop (MINUS_EXPR,
5649 *vr0min, vr1max)))
5650 *vr0min = vr1min;
5651 /* Else arbitrarily take VR0. */
5652 }
5653 }
5654 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5655 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5656 {
5657 /* [ ( ) ] or [( ) ] or [ ( )] */
5658 if (*vr0type == VR_RANGE
5659 && vr1type == VR_RANGE)
5660 {
5661 /* If both are ranges the result is the inner one. */
5662 *vr0type = vr1type;
5663 *vr0min = vr1min;
5664 *vr0max = vr1max;
5665 }
5666 else if (*vr0type == VR_RANGE
5667 && vr1type == VR_ANTI_RANGE)
5668 {
5669 /* Choose the right gap if the left one is empty. */
5670 if (mineq)
5671 {
5672 if (TREE_CODE (vr1max) != INTEGER_CST)
5673 *vr0min = vr1max;
5674 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5675 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5676 *vr0min
5677 = int_const_binop (MINUS_EXPR, vr1max,
5678 build_int_cst (TREE_TYPE (vr1max), -1));
5679 else
5680 *vr0min
5681 = int_const_binop (PLUS_EXPR, vr1max,
5682 build_int_cst (TREE_TYPE (vr1max), 1));
5683 }
5684 /* Choose the left gap if the right one is empty. */
5685 else if (maxeq)
5686 {
5687 if (TREE_CODE (vr1min) != INTEGER_CST)
5688 *vr0max = vr1min;
5689 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5690 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5691 *vr0max
5692 = int_const_binop (PLUS_EXPR, vr1min,
5693 build_int_cst (TREE_TYPE (vr1min), -1));
5694 else
5695 *vr0max
5696 = int_const_binop (MINUS_EXPR, vr1min,
5697 build_int_cst (TREE_TYPE (vr1min), 1));
5698 }
5699 /* Choose the anti-range if the range is effectively varying. */
5700 else if (vrp_val_is_min (*vr0min)
5701 && vrp_val_is_max (*vr0max))
5702 {
5703 *vr0type = vr1type;
5704 *vr0min = vr1min;
5705 *vr0max = vr1max;
5706 }
5707 /* Else choose the range. */
5708 }
5709 else if (*vr0type == VR_ANTI_RANGE
5710 && vr1type == VR_ANTI_RANGE)
5711 /* If both are anti-ranges the result is the outer one. */
5712 ;
5713 else if (*vr0type == VR_ANTI_RANGE
5714 && vr1type == VR_RANGE)
5715 {
5716 /* The intersection is empty. */
5717 *vr0type = VR_UNDEFINED;
5718 *vr0min = NULL_TREE;
5719 *vr0max = NULL_TREE;
5720 }
5721 else
5722 gcc_unreachable ();
5723 }
5724 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5725 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5726 {
5727 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5728 if (*vr0type == VR_RANGE
5729 && vr1type == VR_RANGE)
5730 /* Choose the inner range. */
5731 ;
5732 else if (*vr0type == VR_ANTI_RANGE
5733 && vr1type == VR_RANGE)
5734 {
5735 /* Choose the right gap if the left is empty. */
5736 if (mineq)
5737 {
5738 *vr0type = VR_RANGE;
5739 if (TREE_CODE (*vr0max) != INTEGER_CST)
5740 *vr0min = *vr0max;
5741 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5742 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5743 *vr0min
5744 = int_const_binop (MINUS_EXPR, *vr0max,
5745 build_int_cst (TREE_TYPE (*vr0max), -1));
5746 else
5747 *vr0min
5748 = int_const_binop (PLUS_EXPR, *vr0max,
5749 build_int_cst (TREE_TYPE (*vr0max), 1));
5750 *vr0max = vr1max;
5751 }
5752 /* Choose the left gap if the right is empty. */
5753 else if (maxeq)
5754 {
5755 *vr0type = VR_RANGE;
5756 if (TREE_CODE (*vr0min) != INTEGER_CST)
5757 *vr0max = *vr0min;
5758 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5759 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5760 *vr0max
5761 = int_const_binop (PLUS_EXPR, *vr0min,
5762 build_int_cst (TREE_TYPE (*vr0min), -1));
5763 else
5764 *vr0max
5765 = int_const_binop (MINUS_EXPR, *vr0min,
5766 build_int_cst (TREE_TYPE (*vr0min), 1));
5767 *vr0min = vr1min;
5768 }
5769 /* Choose the anti-range if the range is effectively varying. */
5770 else if (vrp_val_is_min (vr1min)
5771 && vrp_val_is_max (vr1max))
5772 ;
5773 /* Choose the anti-range if it is ~[0,0], that range is special
5774 enough to special case when vr1's range is relatively wide.
5775 At least for types bigger than int - this covers pointers
5776 and arguments to functions like ctz. */
5777 else if (*vr0min == *vr0max
5778 && integer_zerop (*vr0min)
5779 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5780 >= TYPE_PRECISION (integer_type_node))
5781 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5782 && TREE_CODE (vr1max) == INTEGER_CST
5783 && TREE_CODE (vr1min) == INTEGER_CST
5784 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5785 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5786 ;
5787 /* Else choose the range. */
5788 else
5789 {
5790 *vr0type = vr1type;
5791 *vr0min = vr1min;
5792 *vr0max = vr1max;
5793 }
5794 }
5795 else if (*vr0type == VR_ANTI_RANGE
5796 && vr1type == VR_ANTI_RANGE)
5797 {
5798 /* If both are anti-ranges the result is the outer one. */
5799 *vr0type = vr1type;
5800 *vr0min = vr1min;
5801 *vr0max = vr1max;
5802 }
5803 else if (vr1type == VR_ANTI_RANGE
5804 && *vr0type == VR_RANGE)
5805 {
5806 /* The intersection is empty. */
5807 *vr0type = VR_UNDEFINED;
5808 *vr0min = NULL_TREE;
5809 *vr0max = NULL_TREE;
5810 }
5811 else
5812 gcc_unreachable ();
5813 }
5814 else if ((operand_less_p (vr1min, *vr0max) == 1
5815 || operand_equal_p (vr1min, *vr0max, 0))
5816 && operand_less_p (*vr0min, vr1min) == 1)
5817 {
5818 /* [ ( ] ) or [ ]( ) */
5819 if (*vr0type == VR_ANTI_RANGE
5820 && vr1type == VR_ANTI_RANGE)
5821 *vr0max = vr1max;
5822 else if (*vr0type == VR_RANGE
5823 && vr1type == VR_RANGE)
5824 *vr0min = vr1min;
5825 else if (*vr0type == VR_RANGE
5826 && vr1type == VR_ANTI_RANGE)
5827 {
5828 if (TREE_CODE (vr1min) == INTEGER_CST)
5829 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5830 build_int_cst (TREE_TYPE (vr1min), 1));
5831 else
5832 *vr0max = vr1min;
5833 }
5834 else if (*vr0type == VR_ANTI_RANGE
5835 && vr1type == VR_RANGE)
5836 {
5837 *vr0type = VR_RANGE;
5838 if (TREE_CODE (*vr0max) == INTEGER_CST)
5839 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5840 build_int_cst (TREE_TYPE (*vr0max), 1));
5841 else
5842 *vr0min = *vr0max;
5843 *vr0max = vr1max;
5844 }
5845 else
5846 gcc_unreachable ();
5847 }
5848 else if ((operand_less_p (*vr0min, vr1max) == 1
5849 || operand_equal_p (*vr0min, vr1max, 0))
5850 && operand_less_p (vr1min, *vr0min) == 1)
5851 {
5852 /* ( [ ) ] or ( )[ ] */
5853 if (*vr0type == VR_ANTI_RANGE
5854 && vr1type == VR_ANTI_RANGE)
5855 *vr0min = vr1min;
5856 else if (*vr0type == VR_RANGE
5857 && vr1type == VR_RANGE)
5858 *vr0max = vr1max;
5859 else if (*vr0type == VR_RANGE
5860 && vr1type == VR_ANTI_RANGE)
5861 {
5862 if (TREE_CODE (vr1max) == INTEGER_CST)
5863 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5864 build_int_cst (TREE_TYPE (vr1max), 1));
5865 else
5866 *vr0min = vr1max;
5867 }
5868 else if (*vr0type == VR_ANTI_RANGE
5869 && vr1type == VR_RANGE)
5870 {
5871 *vr0type = VR_RANGE;
5872 if (TREE_CODE (*vr0min) == INTEGER_CST)
5873 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5874 build_int_cst (TREE_TYPE (*vr0min), 1));
5875 else
5876 *vr0max = *vr0min;
5877 *vr0min = vr1min;
5878 }
5879 else
5880 gcc_unreachable ();
5881 }
5882
5883 /* If we know the intersection is empty, there's no need to
5884 conservatively add anything else to the set. */
5885 if (*vr0type == VR_UNDEFINED)
5886 return;
5887
5888 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5889 result for the intersection. That's always a conservative
5890 correct estimate unless VR1 is a constant singleton range
5891 in which case we choose that. */
5892 if (vr1type == VR_RANGE
5893 && is_gimple_min_invariant (vr1min)
5894 && vrp_operand_equal_p (vr1min, vr1max))
5895 {
5896 *vr0type = vr1type;
5897 *vr0min = vr1min;
5898 *vr0max = vr1max;
5899 }
5900 }
5901
5902
5903 /* Helper for the intersection operation for value ranges. Given two
5904 value ranges VR0 and VR1, return the intersection of the two
5905 ranges. This may not be the smallest possible such range. */
5906
5907 value_range_base
5908 value_range_base::intersect_helper (const value_range_base *vr0,
5909 const value_range_base *vr1)
5910 {
5911 /* If either range is VR_VARYING the other one wins. */
5912 if (vr1->varying_p ())
5913 return *vr0;
5914 if (vr0->varying_p ())
5915 return *vr1;
5916
5917 /* When either range is VR_UNDEFINED the resulting range is
5918 VR_UNDEFINED, too. */
5919 if (vr0->undefined_p ())
5920 return *vr0;
5921 if (vr1->undefined_p ())
5922 return *vr1;
5923
5924 value_range_kind vr0type = vr0->kind ();
5925 tree vr0min = vr0->min ();
5926 tree vr0max = vr0->max ();
5927 intersect_ranges (&vr0type, &vr0min, &vr0max,
5928 vr1->kind (), vr1->min (), vr1->max ());
5929 /* Make sure to canonicalize the result though as the inversion of a
5930 VR_RANGE can still be a VR_RANGE. Work on a temporary so we can
5931 fall back to vr0 when this turns things to varying. */
5932 value_range_base tem;
5933 if (vr0type == VR_UNDEFINED)
5934 tem.set_undefined ();
5935 else if (vr0type == VR_VARYING)
5936 tem.set_varying (vr0->type ());
5937 else
5938 tem.set (vr0type, vr0min, vr0max);
5939 /* If that failed, use the saved original VR0. */
5940 if (tem.varying_p ())
5941 return *vr0;
5942
5943 return tem;
5944 }
5945
5946 void
5947 value_range_base::intersect (const value_range_base *other)
5948 {
5949 if (dump_file && (dump_flags & TDF_DETAILS))
5950 {
5951 fprintf (dump_file, "Intersecting\n ");
5952 dump_value_range (dump_file, this);
5953 fprintf (dump_file, "\nand\n ");
5954 dump_value_range (dump_file, other);
5955 fprintf (dump_file, "\n");
5956 }
5957
5958 *this = intersect_helper (this, other);
5959
5960 if (dump_file && (dump_flags & TDF_DETAILS))
5961 {
5962 fprintf (dump_file, "to\n ");
5963 dump_value_range (dump_file, this);
5964 fprintf (dump_file, "\n");
5965 }
5966 }
5967
5968 void
5969 value_range::intersect (const value_range *other)
5970 {
5971 if (dump_file && (dump_flags & TDF_DETAILS))
5972 {
5973 fprintf (dump_file, "Intersecting\n ");
5974 dump_value_range (dump_file, this);
5975 fprintf (dump_file, "\nand\n ");
5976 dump_value_range (dump_file, other);
5977 fprintf (dump_file, "\n");
5978 }
5979
5980 /* If THIS is varying we want to pick up equivalences from OTHER.
5981 Just special-case this here rather than trying to fixup after the
5982 fact. */
5983 if (this->varying_p ())
5984 this->deep_copy (other);
5985 else
5986 {
5987 value_range_base tem = intersect_helper (this, other);
5988 this->update (tem.kind (), tem.min (), tem.max ());
5989
5990 /* If the result is VR_UNDEFINED there is no need to mess with
5991 equivalencies. */
5992 if (!undefined_p ())
5993 {
5994 /* The resulting set of equivalences for range intersection
5995 is the union of the two sets. */
5996 if (m_equiv && other->m_equiv && m_equiv != other->m_equiv)
5997 bitmap_ior_into (m_equiv, other->m_equiv);
5998 else if (other->m_equiv && !m_equiv)
5999 {
6000 /* All equivalence bitmaps are allocated from the same
6001 obstack. So we can use the obstack associated with
6002 VR to allocate this->m_equiv. */
6003 m_equiv = BITMAP_ALLOC (other->m_equiv->obstack);
6004 bitmap_copy (m_equiv, other->m_equiv);
6005 }
6006 }
6007 }
6008
6009 if (dump_file && (dump_flags & TDF_DETAILS))
6010 {
6011 fprintf (dump_file, "to\n ");
6012 dump_value_range (dump_file, this);
6013 fprintf (dump_file, "\n");
6014 }
6015 }
6016
6017 /* Helper for meet operation for value ranges. Given two value ranges VR0 and
6018 VR1, return a range that contains both VR0 and VR1. This may not be the
6019 smallest possible such range. */
6020
6021 value_range_base
6022 value_range_base::union_helper (const value_range_base *vr0,
6023 const value_range_base *vr1)
6024 {
6025 /* VR0 has the resulting range if VR1 is undefined or VR0 is varying. */
6026 if (vr1->undefined_p ()
6027 || vr0->varying_p ())
6028 return *vr0;
6029
6030 /* VR1 has the resulting range if VR0 is undefined or VR1 is varying. */
6031 if (vr0->undefined_p ()
6032 || vr1->varying_p ())
6033 return *vr1;
6034
6035 value_range_kind vr0type = vr0->kind ();
6036 tree vr0min = vr0->min ();
6037 tree vr0max = vr0->max ();
6038 union_ranges (&vr0type, &vr0min, &vr0max,
6039 vr1->kind (), vr1->min (), vr1->max ());
6040
6041 /* Work on a temporary so we can still use vr0 when union returns varying. */
6042 value_range_base tem;
6043 if (vr0type == VR_UNDEFINED)
6044 tem.set_undefined ();
6045 else if (vr0type == VR_VARYING)
6046 tem.set_varying (vr0->type ());
6047 else
6048 tem.set (vr0type, vr0min, vr0max);
6049
6050 /* Failed to find an efficient meet. Before giving up and setting
6051 the result to VARYING, see if we can at least derive a useful
6052 anti-range. */
6053 if (tem.varying_p ()
6054 && range_includes_zero_p (vr0) == 0
6055 && range_includes_zero_p (vr1) == 0)
6056 {
6057 tem.set_nonzero (vr0->type ());
6058 return tem;
6059 }
6060
6061 return tem;
6062 }
6063
6064
6065 /* Meet operation for value ranges. Given two value ranges VR0 and
6066 VR1, store in VR0 a range that contains both VR0 and VR1. This
6067 may not be the smallest possible such range. */
6068
6069 void
6070 value_range_base::union_ (const value_range_base *other)
6071 {
6072 if (dump_file && (dump_flags & TDF_DETAILS))
6073 {
6074 fprintf (dump_file, "Meeting\n ");
6075 dump_value_range (dump_file, this);
6076 fprintf (dump_file, "\nand\n ");
6077 dump_value_range (dump_file, other);
6078 fprintf (dump_file, "\n");
6079 }
6080
6081 *this = union_helper (this, other);
6082
6083 if (dump_file && (dump_flags & TDF_DETAILS))
6084 {
6085 fprintf (dump_file, "to\n ");
6086 dump_value_range (dump_file, this);
6087 fprintf (dump_file, "\n");
6088 }
6089 }
6090
6091 void
6092 value_range::union_ (const value_range *other)
6093 {
6094 if (dump_file && (dump_flags & TDF_DETAILS))
6095 {
6096 fprintf (dump_file, "Meeting\n ");
6097 dump_value_range (dump_file, this);
6098 fprintf (dump_file, "\nand\n ");
6099 dump_value_range (dump_file, other);
6100 fprintf (dump_file, "\n");
6101 }
6102
6103 /* If THIS is undefined we want to pick up equivalences from OTHER.
6104 Just special-case this here rather than trying to fixup after the fact. */
6105 if (this->undefined_p ())
6106 this->deep_copy (other);
6107 else
6108 {
6109 value_range_base tem = union_helper (this, other);
6110 this->update (tem.kind (), tem.min (), tem.max ());
6111
6112 /* The resulting set of equivalences is always the intersection of
6113 the two sets. */
6114 if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
6115 bitmap_and_into (this->m_equiv, other->m_equiv);
6116 else if (this->m_equiv && !other->m_equiv)
6117 bitmap_clear (this->m_equiv);
6118 }
6119
6120 if (dump_file && (dump_flags & TDF_DETAILS))
6121 {
6122 fprintf (dump_file, "to\n ");
6123 dump_value_range (dump_file, this);
6124 fprintf (dump_file, "\n");
6125 }
6126 }
6127
6128 /* Normalize addresses into constants. */
6129
6130 value_range_base
6131 value_range_base::normalize_addresses () const
6132 {
6133 if (!POINTER_TYPE_P (type ()) || range_has_numeric_bounds_p (this))
6134 return *this;
6135
6136 if (!range_includes_zero_p (this))
6137 {
6138 gcc_checking_assert (TREE_CODE (m_min) == ADDR_EXPR
6139 || TREE_CODE (m_max) == ADDR_EXPR);
6140 return range_nonzero (type ());
6141 }
6142 return value_range_base (type ());
6143 }
6144
6145 /* Normalize symbolics and addresses into constants. */
6146
6147 value_range_base
6148 value_range_base::normalize_symbolics () const
6149 {
6150 if (varying_p () || undefined_p ())
6151 return *this;
6152 tree ttype = type ();
6153 bool min_symbolic = !is_gimple_min_invariant (min ());
6154 bool max_symbolic = !is_gimple_min_invariant (max ());
6155 if (!min_symbolic && !max_symbolic)
6156 return normalize_addresses ();
6157
6158 // [SYM, SYM] -> VARYING
6159 if (min_symbolic && max_symbolic)
6160 {
6161 value_range_base var;
6162 var.set_varying (ttype);
6163 return var;
6164 }
6165 if (kind () == VR_RANGE)
6166 {
6167 // [SYM, NUM] -> [-MIN, NUM]
6168 if (min_symbolic)
6169 return value_range_base (VR_RANGE, vrp_val_min (ttype, true), max ());
6170 // [NUM, SYM] -> [NUM, +MAX]
6171 return value_range_base (VR_RANGE, min (), vrp_val_max (ttype, true));
6172 }
6173 gcc_checking_assert (kind () == VR_ANTI_RANGE);
6174 // ~[SYM, NUM] -> [NUM + 1, +MAX]
6175 if (min_symbolic)
6176 {
6177 if (!vrp_val_is_max (max ()))
6178 {
6179 tree n = wide_int_to_tree (ttype, wi::to_wide (max ()) + 1);
6180 return value_range_base (VR_RANGE, n, vrp_val_max (ttype, true));
6181 }
6182 value_range_base var;
6183 var.set_varying (ttype);
6184 return var;
6185 }
6186 // ~[NUM, SYM] -> [-MIN, NUM - 1]
6187 if (!vrp_val_is_min (min ()))
6188 {
6189 tree n = wide_int_to_tree (ttype, wi::to_wide (min ()) - 1);
6190 return value_range_base (VR_RANGE, vrp_val_min (ttype, true), n);
6191 }
6192 value_range_base var;
6193 var.set_varying (ttype);
6194 return var;
6195 }
6196
6197 /* Return the number of sub-ranges in a range. */
6198
6199 unsigned
6200 value_range_base::num_pairs () const
6201 {
6202 if (undefined_p ())
6203 return 0;
6204 if (varying_p ())
6205 return 1;
6206 if (symbolic_p ())
6207 return normalize_symbolics ().num_pairs ();
6208 if (m_kind == VR_ANTI_RANGE)
6209 {
6210 // ~[MIN, X] has one sub-range of [X+1, MAX], and
6211 // ~[X, MAX] has one sub-range of [MIN, X-1].
6212 if (vrp_val_is_min (m_min, true) || vrp_val_is_max (m_max, true))
6213 return 1;
6214 return 2;
6215 }
6216 return 1;
6217 }
6218
6219 /* Return the lower bound for a sub-range. PAIR is the sub-range in
6220 question. */
6221
6222 wide_int
6223 value_range_base::lower_bound (unsigned pair) const
6224 {
6225 if (symbolic_p ())
6226 return normalize_symbolics ().lower_bound (pair);
6227
6228 gcc_checking_assert (!undefined_p ());
6229 gcc_checking_assert (pair + 1 <= num_pairs ());
6230 tree t = NULL;
6231 if (m_kind == VR_ANTI_RANGE)
6232 {
6233 tree typ = type ();
6234 if (pair == 1 || vrp_val_is_min (m_min, true))
6235 t = wide_int_to_tree (typ, wi::to_wide (m_max) + 1);
6236 else
6237 t = vrp_val_min (typ, true);
6238 }
6239 else
6240 t = m_min;
6241 return wi::to_wide (t);
6242 }
6243
6244 /* Return the upper bound for a sub-range. PAIR is the sub-range in
6245 question. */
6246
6247 wide_int
6248 value_range_base::upper_bound (unsigned pair) const
6249 {
6250 if (symbolic_p ())
6251 return normalize_symbolics ().upper_bound (pair);
6252
6253 gcc_checking_assert (!undefined_p ());
6254 gcc_checking_assert (pair + 1 <= num_pairs ());
6255 tree t = NULL;
6256 if (m_kind == VR_ANTI_RANGE)
6257 {
6258 tree typ = type ();
6259 if (pair == 1 || vrp_val_is_min (m_min, true))
6260 t = vrp_val_max (typ, true);
6261 else
6262 t = wide_int_to_tree (typ, wi::to_wide (m_min) - 1);
6263 }
6264 else
6265 t = m_max;
6266 return wi::to_wide (t);
6267 }
6268
6269 /* Return the highest bound in a range. */
6270
6271 wide_int
6272 value_range_base::upper_bound () const
6273 {
6274 unsigned pairs = num_pairs ();
6275 gcc_checking_assert (pairs > 0);
6276 return upper_bound (pairs - 1);
6277 }
6278
6279 /* Return TRUE if range contains INTEGER_CST. */
6280
6281 bool
6282 value_range_base::contains_p (tree cst) const
6283 {
6284 gcc_checking_assert (TREE_CODE (cst) == INTEGER_CST);
6285 if (symbolic_p ())
6286 return normalize_symbolics ().contains_p (cst);
6287 return value_inside_range (cst) == 1;
6288 }
6289
6290 /* Return the inverse of a range. */
6291
6292 void
6293 value_range_base::invert ()
6294 {
6295 if (m_kind == VR_RANGE)
6296 m_kind = VR_ANTI_RANGE;
6297 else if (m_kind == VR_ANTI_RANGE)
6298 m_kind = VR_RANGE;
6299 else
6300 gcc_unreachable ();
6301 }
6302
6303 /* Range union, but for references. */
6304
6305 void
6306 value_range_base::union_ (const value_range_base &r)
6307 {
6308 /* Disable details for now, because it makes the ranger dump
6309 unnecessarily verbose. */
6310 bool details = dump_flags & TDF_DETAILS;
6311 if (details)
6312 dump_flags &= ~TDF_DETAILS;
6313 union_ (&r);
6314 if (details)
6315 dump_flags |= TDF_DETAILS;
6316 }
6317
6318 /* Range intersect, but for references. */
6319
6320 void
6321 value_range_base::intersect (const value_range_base &r)
6322 {
6323 /* Disable details for now, because it makes the ranger dump
6324 unnecessarily verbose. */
6325 bool details = dump_flags & TDF_DETAILS;
6326 if (details)
6327 dump_flags &= ~TDF_DETAILS;
6328 intersect (&r);
6329 if (details)
6330 dump_flags |= TDF_DETAILS;
6331 }
6332
6333 /* Return TRUE if two types are compatible for range operations. */
6334
6335 static bool
6336 range_compatible_p (tree t1, tree t2)
6337 {
6338 if (POINTER_TYPE_P (t1) && POINTER_TYPE_P (t2))
6339 return true;
6340
6341 return types_compatible_p (t1, t2);
6342 }
6343
6344 bool
6345 value_range_base::operator== (const value_range_base &r) const
6346 {
6347 if (undefined_p ())
6348 return r.undefined_p ();
6349
6350 if (num_pairs () != r.num_pairs ()
6351 || !range_compatible_p (type (), r.type ()))
6352 return false;
6353
6354 for (unsigned p = 0; p < num_pairs (); p++)
6355 if (wi::ne_p (lower_bound (p), r.lower_bound (p))
6356 || wi::ne_p (upper_bound (p), r.upper_bound (p)))
6357 return false;
6358
6359 return true;
6360 }
6361
6362 /* Visit all arguments for PHI node PHI that flow through executable
6363 edges. If a valid value range can be derived from all the incoming
6364 value ranges, set a new range for the LHS of PHI. */
6365
6366 enum ssa_prop_result
6367 vrp_prop::visit_phi (gphi *phi)
6368 {
6369 tree lhs = PHI_RESULT (phi);
6370 value_range vr_result;
6371 extract_range_from_phi_node (phi, &vr_result);
6372 if (update_value_range (lhs, &vr_result))
6373 {
6374 if (dump_file && (dump_flags & TDF_DETAILS))
6375 {
6376 fprintf (dump_file, "Found new range for ");
6377 print_generic_expr (dump_file, lhs);
6378 fprintf (dump_file, ": ");
6379 dump_value_range (dump_file, &vr_result);
6380 fprintf (dump_file, "\n");
6381 }
6382
6383 if (vr_result.varying_p ())
6384 return SSA_PROP_VARYING;
6385
6386 return SSA_PROP_INTERESTING;
6387 }
6388
6389 /* Nothing changed, don't add outgoing edges. */
6390 return SSA_PROP_NOT_INTERESTING;
6391 }
6392
6393 class vrp_folder : public substitute_and_fold_engine
6394 {
6395 public:
6396 vrp_folder () : substitute_and_fold_engine (/* Fold all stmts. */ true) { }
6397 tree get_value (tree) FINAL OVERRIDE;
6398 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6399 bool fold_predicate_in (gimple_stmt_iterator *);
6400
6401 class vr_values *vr_values;
6402
6403 /* Delegators. */
6404 tree vrp_evaluate_conditional (tree_code code, tree op0,
6405 tree op1, gimple *stmt)
6406 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6407 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6408 { return vr_values->simplify_stmt_using_ranges (gsi); }
6409 tree op_with_constant_singleton_value_range (tree op)
6410 { return vr_values->op_with_constant_singleton_value_range (op); }
6411 };
6412
6413 /* If the statement pointed by SI has a predicate whose value can be
6414 computed using the value range information computed by VRP, compute
6415 its value and return true. Otherwise, return false. */
6416
6417 bool
6418 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6419 {
6420 bool assignment_p = false;
6421 tree val;
6422 gimple *stmt = gsi_stmt (*si);
6423
6424 if (is_gimple_assign (stmt)
6425 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6426 {
6427 assignment_p = true;
6428 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6429 gimple_assign_rhs1 (stmt),
6430 gimple_assign_rhs2 (stmt),
6431 stmt);
6432 }
6433 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6434 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6435 gimple_cond_lhs (cond_stmt),
6436 gimple_cond_rhs (cond_stmt),
6437 stmt);
6438 else
6439 return false;
6440
6441 if (val)
6442 {
6443 if (assignment_p)
6444 val = fold_convert (gimple_expr_type (stmt), val);
6445
6446 if (dump_file)
6447 {
6448 fprintf (dump_file, "Folding predicate ");
6449 print_gimple_expr (dump_file, stmt, 0);
6450 fprintf (dump_file, " to ");
6451 print_generic_expr (dump_file, val);
6452 fprintf (dump_file, "\n");
6453 }
6454
6455 if (is_gimple_assign (stmt))
6456 gimple_assign_set_rhs_from_tree (si, val);
6457 else
6458 {
6459 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6460 gcond *cond_stmt = as_a <gcond *> (stmt);
6461 if (integer_zerop (val))
6462 gimple_cond_make_false (cond_stmt);
6463 else if (integer_onep (val))
6464 gimple_cond_make_true (cond_stmt);
6465 else
6466 gcc_unreachable ();
6467 }
6468
6469 return true;
6470 }
6471
6472 return false;
6473 }
6474
6475 /* Callback for substitute_and_fold folding the stmt at *SI. */
6476
6477 bool
6478 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6479 {
6480 if (fold_predicate_in (si))
6481 return true;
6482
6483 return simplify_stmt_using_ranges (si);
6484 }
6485
6486 /* If OP has a value range with a single constant value return that,
6487 otherwise return NULL_TREE. This returns OP itself if OP is a
6488 constant.
6489
6490 Implemented as a pure wrapper right now, but this will change. */
6491
6492 tree
6493 vrp_folder::get_value (tree op)
6494 {
6495 return op_with_constant_singleton_value_range (op);
6496 }
6497
6498 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6499 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6500 BB. If no such ASSERT_EXPR is found, return OP. */
6501
6502 static tree
6503 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6504 {
6505 imm_use_iterator imm_iter;
6506 gimple *use_stmt;
6507 use_operand_p use_p;
6508
6509 if (TREE_CODE (op) == SSA_NAME)
6510 {
6511 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6512 {
6513 use_stmt = USE_STMT (use_p);
6514 if (use_stmt != stmt
6515 && gimple_assign_single_p (use_stmt)
6516 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6517 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6518 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6519 return gimple_assign_lhs (use_stmt);
6520 }
6521 }
6522 return op;
6523 }
6524
6525 /* A hack. */
6526 static class vr_values *x_vr_values;
6527
6528 /* A trivial wrapper so that we can present the generic jump threading
6529 code with a simple API for simplifying statements. STMT is the
6530 statement we want to simplify, WITHIN_STMT provides the location
6531 for any overflow warnings. */
6532
6533 static tree
6534 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6535 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6536 basic_block bb)
6537 {
6538 /* First see if the conditional is in the hash table. */
6539 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6540 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6541 return cached_lhs;
6542
6543 vr_values *vr_values = x_vr_values;
6544 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6545 {
6546 tree op0 = gimple_cond_lhs (cond_stmt);
6547 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6548
6549 tree op1 = gimple_cond_rhs (cond_stmt);
6550 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6551
6552 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6553 op0, op1, within_stmt);
6554 }
6555
6556 /* We simplify a switch statement by trying to determine which case label
6557 will be taken. If we are successful then we return the corresponding
6558 CASE_LABEL_EXPR. */
6559 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6560 {
6561 tree op = gimple_switch_index (switch_stmt);
6562 if (TREE_CODE (op) != SSA_NAME)
6563 return NULL_TREE;
6564
6565 op = lhs_of_dominating_assert (op, bb, stmt);
6566
6567 const value_range *vr = vr_values->get_value_range (op);
6568 if (vr->undefined_p ()
6569 || vr->varying_p ()
6570 || vr->symbolic_p ())
6571 return NULL_TREE;
6572
6573 if (vr->kind () == VR_RANGE)
6574 {
6575 size_t i, j;
6576 /* Get the range of labels that contain a part of the operand's
6577 value range. */
6578 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
6579
6580 /* Is there only one such label? */
6581 if (i == j)
6582 {
6583 tree label = gimple_switch_label (switch_stmt, i);
6584
6585 /* The i'th label will be taken only if the value range of the
6586 operand is entirely within the bounds of this label. */
6587 if (CASE_HIGH (label) != NULL_TREE
6588 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
6589 && tree_int_cst_compare (CASE_HIGH (label),
6590 vr->max ()) >= 0)
6591 : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
6592 && tree_int_cst_equal (vr->min (), vr->max ())))
6593 return label;
6594 }
6595
6596 /* If there are no such labels then the default label will be
6597 taken. */
6598 if (i > j)
6599 return gimple_switch_label (switch_stmt, 0);
6600 }
6601
6602 if (vr->kind () == VR_ANTI_RANGE)
6603 {
6604 unsigned n = gimple_switch_num_labels (switch_stmt);
6605 tree min_label = gimple_switch_label (switch_stmt, 1);
6606 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6607
6608 /* The default label will be taken only if the anti-range of the
6609 operand is entirely outside the bounds of all the (non-default)
6610 case labels. */
6611 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
6612 && (CASE_HIGH (max_label) != NULL_TREE
6613 ? tree_int_cst_compare (vr->max (),
6614 CASE_HIGH (max_label)) >= 0
6615 : tree_int_cst_compare (vr->max (),
6616 CASE_LOW (max_label)) >= 0))
6617 return gimple_switch_label (switch_stmt, 0);
6618 }
6619
6620 return NULL_TREE;
6621 }
6622
6623 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6624 {
6625 tree lhs = gimple_assign_lhs (assign_stmt);
6626 if (TREE_CODE (lhs) == SSA_NAME
6627 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6628 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6629 && stmt_interesting_for_vrp (stmt))
6630 {
6631 edge dummy_e;
6632 tree dummy_tree;
6633 value_range new_vr;
6634 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6635 &dummy_tree, &new_vr);
6636 tree singleton;
6637 if (new_vr.singleton_p (&singleton))
6638 return singleton;
6639 }
6640 }
6641
6642 return NULL_TREE;
6643 }
6644
6645 class vrp_dom_walker : public dom_walker
6646 {
6647 public:
6648 vrp_dom_walker (cdi_direction direction,
6649 class const_and_copies *const_and_copies,
6650 class avail_exprs_stack *avail_exprs_stack)
6651 : dom_walker (direction, REACHABLE_BLOCKS),
6652 m_const_and_copies (const_and_copies),
6653 m_avail_exprs_stack (avail_exprs_stack),
6654 m_dummy_cond (NULL) {}
6655
6656 virtual edge before_dom_children (basic_block);
6657 virtual void after_dom_children (basic_block);
6658
6659 class vr_values *vr_values;
6660
6661 private:
6662 class const_and_copies *m_const_and_copies;
6663 class avail_exprs_stack *m_avail_exprs_stack;
6664
6665 gcond *m_dummy_cond;
6666
6667 };
6668
6669 /* Called before processing dominator children of BB. We want to look
6670 at ASSERT_EXPRs and record information from them in the appropriate
6671 tables.
6672
6673 We could look at other statements here. It's not seen as likely
6674 to significantly increase the jump threads we discover. */
6675
6676 edge
6677 vrp_dom_walker::before_dom_children (basic_block bb)
6678 {
6679 gimple_stmt_iterator gsi;
6680
6681 m_avail_exprs_stack->push_marker ();
6682 m_const_and_copies->push_marker ();
6683 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6684 {
6685 gimple *stmt = gsi_stmt (gsi);
6686 if (gimple_assign_single_p (stmt)
6687 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6688 {
6689 tree rhs1 = gimple_assign_rhs1 (stmt);
6690 tree cond = TREE_OPERAND (rhs1, 1);
6691 tree inverted = invert_truthvalue (cond);
6692 vec<cond_equivalence> p;
6693 p.create (3);
6694 record_conditions (&p, cond, inverted);
6695 for (unsigned int i = 0; i < p.length (); i++)
6696 m_avail_exprs_stack->record_cond (&p[i]);
6697
6698 tree lhs = gimple_assign_lhs (stmt);
6699 m_const_and_copies->record_const_or_copy (lhs,
6700 TREE_OPERAND (rhs1, 0));
6701 p.release ();
6702 continue;
6703 }
6704 break;
6705 }
6706 return NULL;
6707 }
6708
6709 /* Called after processing dominator children of BB. This is where we
6710 actually call into the threader. */
6711 void
6712 vrp_dom_walker::after_dom_children (basic_block bb)
6713 {
6714 if (!m_dummy_cond)
6715 m_dummy_cond = gimple_build_cond (NE_EXPR,
6716 integer_zero_node, integer_zero_node,
6717 NULL, NULL);
6718
6719 x_vr_values = vr_values;
6720 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6721 m_avail_exprs_stack, NULL,
6722 simplify_stmt_for_jump_threading);
6723 x_vr_values = NULL;
6724
6725 m_avail_exprs_stack->pop_to_marker ();
6726 m_const_and_copies->pop_to_marker ();
6727 }
6728
6729 /* Blocks which have more than one predecessor and more than
6730 one successor present jump threading opportunities, i.e.,
6731 when the block is reached from a specific predecessor, we
6732 may be able to determine which of the outgoing edges will
6733 be traversed. When this optimization applies, we are able
6734 to avoid conditionals at runtime and we may expose secondary
6735 optimization opportunities.
6736
6737 This routine is effectively a driver for the generic jump
6738 threading code. It basically just presents the generic code
6739 with edges that may be suitable for jump threading.
6740
6741 Unlike DOM, we do not iterate VRP if jump threading was successful.
6742 While iterating may expose new opportunities for VRP, it is expected
6743 those opportunities would be very limited and the compile time cost
6744 to expose those opportunities would be significant.
6745
6746 As jump threading opportunities are discovered, they are registered
6747 for later realization. */
6748
6749 static void
6750 identify_jump_threads (class vr_values *vr_values)
6751 {
6752 /* Ugh. When substituting values earlier in this pass we can
6753 wipe the dominance information. So rebuild the dominator
6754 information as we need it within the jump threading code. */
6755 calculate_dominance_info (CDI_DOMINATORS);
6756
6757 /* We do not allow VRP information to be used for jump threading
6758 across a back edge in the CFG. Otherwise it becomes too
6759 difficult to avoid eliminating loop exit tests. Of course
6760 EDGE_DFS_BACK is not accurate at this time so we have to
6761 recompute it. */
6762 mark_dfs_back_edges ();
6763
6764 /* Allocate our unwinder stack to unwind any temporary equivalences
6765 that might be recorded. */
6766 const_and_copies *equiv_stack = new const_and_copies ();
6767
6768 hash_table<expr_elt_hasher> *avail_exprs
6769 = new hash_table<expr_elt_hasher> (1024);
6770 avail_exprs_stack *avail_exprs_stack
6771 = new class avail_exprs_stack (avail_exprs);
6772
6773 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6774 walker.vr_values = vr_values;
6775 walker.walk (cfun->cfg->x_entry_block_ptr);
6776
6777 /* We do not actually update the CFG or SSA graphs at this point as
6778 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6779 handle ASSERT_EXPRs gracefully. */
6780 delete equiv_stack;
6781 delete avail_exprs;
6782 delete avail_exprs_stack;
6783 }
6784
6785 /* Traverse all the blocks folding conditionals with known ranges. */
6786
6787 void
6788 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6789 {
6790 size_t i;
6791
6792 /* We have completed propagating through the lattice. */
6793 vr_values.set_lattice_propagation_complete ();
6794
6795 if (dump_file)
6796 {
6797 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6798 vr_values.dump_all_value_ranges (dump_file);
6799 fprintf (dump_file, "\n");
6800 }
6801
6802 /* Set value range to non pointer SSA_NAMEs. */
6803 for (i = 0; i < num_ssa_names; i++)
6804 {
6805 tree name = ssa_name (i);
6806 if (!name)
6807 continue;
6808
6809 const value_range *vr = get_value_range (name);
6810 if (!name || !vr->constant_p ())
6811 continue;
6812
6813 if (POINTER_TYPE_P (TREE_TYPE (name))
6814 && range_includes_zero_p (vr) == 0)
6815 set_ptr_nonnull (name);
6816 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6817 set_range_info (name, *vr);
6818 }
6819
6820 /* If we're checking array refs, we want to merge information on
6821 the executability of each edge between vrp_folder and the
6822 check_array_bounds_dom_walker: each can clear the
6823 EDGE_EXECUTABLE flag on edges, in different ways.
6824
6825 Hence, if we're going to call check_all_array_refs, set
6826 the flag on every edge now, rather than in
6827 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6828 it from some edges. */
6829 if (warn_array_bounds && warn_array_bounds_p)
6830 set_all_edges_as_executable (cfun);
6831
6832 class vrp_folder vrp_folder;
6833 vrp_folder.vr_values = &vr_values;
6834 vrp_folder.substitute_and_fold ();
6835
6836 if (warn_array_bounds && warn_array_bounds_p)
6837 check_all_array_refs ();
6838 }
6839
6840 /* Main entry point to VRP (Value Range Propagation). This pass is
6841 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6842 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6843 Programming Language Design and Implementation, pp. 67-78, 1995.
6844 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6845
6846 This is essentially an SSA-CCP pass modified to deal with ranges
6847 instead of constants.
6848
6849 While propagating ranges, we may find that two or more SSA name
6850 have equivalent, though distinct ranges. For instance,
6851
6852 1 x_9 = p_3->a;
6853 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6854 3 if (p_4 == q_2)
6855 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6856 5 endif
6857 6 if (q_2)
6858
6859 In the code above, pointer p_5 has range [q_2, q_2], but from the
6860 code we can also determine that p_5 cannot be NULL and, if q_2 had
6861 a non-varying range, p_5's range should also be compatible with it.
6862
6863 These equivalences are created by two expressions: ASSERT_EXPR and
6864 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6865 result of another assertion, then we can use the fact that p_5 and
6866 p_4 are equivalent when evaluating p_5's range.
6867
6868 Together with value ranges, we also propagate these equivalences
6869 between names so that we can take advantage of information from
6870 multiple ranges when doing final replacement. Note that this
6871 equivalency relation is transitive but not symmetric.
6872
6873 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6874 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6875 in contexts where that assertion does not hold (e.g., in line 6).
6876
6877 TODO, the main difference between this pass and Patterson's is that
6878 we do not propagate edge probabilities. We only compute whether
6879 edges can be taken or not. That is, instead of having a spectrum
6880 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6881 DON'T KNOW. In the future, it may be worthwhile to propagate
6882 probabilities to aid branch prediction. */
6883
6884 static unsigned int
6885 execute_vrp (bool warn_array_bounds_p)
6886 {
6887
6888 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6889 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6890 scev_initialize ();
6891
6892 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6893 Inserting assertions may split edges which will invalidate
6894 EDGE_DFS_BACK. */
6895 insert_range_assertions ();
6896
6897 threadedge_initialize_values ();
6898
6899 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6900 mark_dfs_back_edges ();
6901
6902 class vrp_prop vrp_prop;
6903 vrp_prop.vrp_initialize ();
6904 vrp_prop.ssa_propagate ();
6905 vrp_prop.vrp_finalize (warn_array_bounds_p);
6906
6907 /* We must identify jump threading opportunities before we release
6908 the datastructures built by VRP. */
6909 identify_jump_threads (&vrp_prop.vr_values);
6910
6911 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6912 was set by a type conversion can often be rewritten to use the
6913 RHS of the type conversion.
6914
6915 However, doing so inhibits jump threading through the comparison.
6916 So that transformation is not performed until after jump threading
6917 is complete. */
6918 basic_block bb;
6919 FOR_EACH_BB_FN (bb, cfun)
6920 {
6921 gimple *last = last_stmt (bb);
6922 if (last && gimple_code (last) == GIMPLE_COND)
6923 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6924 }
6925
6926 free_numbers_of_iterations_estimates (cfun);
6927
6928 /* ASSERT_EXPRs must be removed before finalizing jump threads
6929 as finalizing jump threads calls the CFG cleanup code which
6930 does not properly handle ASSERT_EXPRs. */
6931 remove_range_assertions ();
6932
6933 /* If we exposed any new variables, go ahead and put them into
6934 SSA form now, before we handle jump threading. This simplifies
6935 interactions between rewriting of _DECL nodes into SSA form
6936 and rewriting SSA_NAME nodes into SSA form after block
6937 duplication and CFG manipulation. */
6938 update_ssa (TODO_update_ssa);
6939
6940 /* We identified all the jump threading opportunities earlier, but could
6941 not transform the CFG at that time. This routine transforms the
6942 CFG and arranges for the dominator tree to be rebuilt if necessary.
6943
6944 Note the SSA graph update will occur during the normal TODO
6945 processing by the pass manager. */
6946 thread_through_all_blocks (false);
6947
6948 vrp_prop.vr_values.cleanup_edges_and_switches ();
6949 threadedge_finalize_values ();
6950
6951 scev_finalize ();
6952 loop_optimizer_finalize ();
6953 return 0;
6954 }
6955
6956 namespace {
6957
6958 const pass_data pass_data_vrp =
6959 {
6960 GIMPLE_PASS, /* type */
6961 "vrp", /* name */
6962 OPTGROUP_NONE, /* optinfo_flags */
6963 TV_TREE_VRP, /* tv_id */
6964 PROP_ssa, /* properties_required */
6965 0, /* properties_provided */
6966 0, /* properties_destroyed */
6967 0, /* todo_flags_start */
6968 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6969 };
6970
6971 class pass_vrp : public gimple_opt_pass
6972 {
6973 public:
6974 pass_vrp (gcc::context *ctxt)
6975 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6976 {}
6977
6978 /* opt_pass methods: */
6979 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6980 void set_pass_param (unsigned int n, bool param)
6981 {
6982 gcc_assert (n == 0);
6983 warn_array_bounds_p = param;
6984 }
6985 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6986 virtual unsigned int execute (function *)
6987 { return execute_vrp (warn_array_bounds_p); }
6988
6989 private:
6990 bool warn_array_bounds_p;
6991 }; // class pass_vrp
6992
6993 } // anon namespace
6994
6995 gimple_opt_pass *
6996 make_pass_vrp (gcc::context *ctxt)
6997 {
6998 return new pass_vrp (ctxt);
6999 }
7000
7001
7002 /* Worker for determine_value_range. */
7003
7004 static void
7005 determine_value_range_1 (value_range_base *vr, tree expr)
7006 {
7007 if (BINARY_CLASS_P (expr))
7008 {
7009 value_range_base vr0, vr1;
7010 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
7011 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
7012 range_fold_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
7013 &vr0, &vr1);
7014 }
7015 else if (UNARY_CLASS_P (expr))
7016 {
7017 value_range_base vr0;
7018 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
7019 range_fold_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
7020 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
7021 }
7022 else if (TREE_CODE (expr) == INTEGER_CST)
7023 vr->set (expr);
7024 else
7025 {
7026 value_range_kind kind;
7027 wide_int min, max;
7028 /* For SSA names try to extract range info computed by VRP. Otherwise
7029 fall back to varying. */
7030 if (TREE_CODE (expr) == SSA_NAME
7031 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
7032 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
7033 vr->set (kind, wide_int_to_tree (TREE_TYPE (expr), min),
7034 wide_int_to_tree (TREE_TYPE (expr), max));
7035 else
7036 vr->set_varying (TREE_TYPE (expr));
7037 }
7038 }
7039
7040 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
7041 the determined range type. */
7042
7043 value_range_kind
7044 determine_value_range (tree expr, wide_int *min, wide_int *max)
7045 {
7046 value_range_base vr;
7047 determine_value_range_1 (&vr, expr);
7048 if (vr.constant_p ())
7049 {
7050 *min = wi::to_wide (vr.min ());
7051 *max = wi::to_wide (vr.max ());
7052 return vr.kind ();
7053 }
7054
7055 return VR_VARYING;
7056 }