[Ada] Replace low-level Ekind membership tests with high-level Is_Formal
[gcc.git] / gcc / gimple-ssa-evrp-analyze.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "tree-pass.h"
27 #include "ssa.h"
28 #include "gimple-pretty-print.h"
29 #include "cfganal.h"
30 #include "gimple-fold.h"
31 #include "tree-eh.h"
32 #include "gimple-iterator.h"
33 #include "tree-cfg.h"
34 #include "tree-ssa-loop-manip.h"
35 #include "tree-ssa-loop.h"
36 #include "cfgloop.h"
37 #include "tree-scalar-evolution.h"
38 #include "tree-ssa-propagate.h"
39 #include "alloc-pool.h"
40 #include "domwalk.h"
41 #include "tree-cfgcleanup.h"
42 #include "vr-values.h"
43 #include "gimple-ssa-evrp-analyze.h"
44
45 evrp_range_analyzer::evrp_range_analyzer (bool update_global_ranges)
46 : stack (10), m_update_global_ranges (update_global_ranges)
47 {
48 edge e;
49 edge_iterator ei;
50 basic_block bb;
51 FOR_EACH_BB_FN (bb, cfun)
52 {
53 bb->flags &= ~BB_VISITED;
54 FOR_EACH_EDGE (e, ei, bb->preds)
55 e->flags |= EDGE_EXECUTABLE;
56 }
57 }
58
59 /* Push an unwinding marker onto the unwinding stack. */
60
61 void
62 evrp_range_analyzer::push_marker ()
63 {
64 stack.safe_push (std::make_pair (NULL_TREE, (value_range_equiv *)NULL));
65 }
66
67 /* Analyze ranges as we enter basic block BB. */
68
69 void
70 evrp_range_analyzer::enter (basic_block bb)
71 {
72 if (!optimize)
73 return;
74 push_marker ();
75 record_ranges_from_incoming_edge (bb);
76 record_ranges_from_phis (bb);
77 bb->flags |= BB_VISITED;
78 }
79
80 /* Find new range for NAME such that (OP CODE LIMIT) is true. */
81 value_range_equiv *
82 evrp_range_analyzer::try_find_new_range (tree name,
83 tree op, tree_code code, tree limit)
84 {
85 value_range_equiv vr;
86 const value_range_equiv *old_vr = get_value_range (name);
87
88 /* Discover VR when condition is true. */
89 extract_range_for_var_from_comparison_expr (name, code, op, limit, &vr);
90 /* If we found any usable VR, set the VR to ssa_name and create a
91 PUSH old value in the stack with the old VR. */
92 if (!vr.undefined_p () && !vr.varying_p ())
93 {
94 if (old_vr->equal_p (vr, /*ignore_equivs=*/true))
95 return NULL;
96 value_range_equiv *new_vr = allocate_value_range_equiv ();
97 new_vr->move (&vr);
98 return new_vr;
99 }
100 return NULL;
101 }
102
103 /* For LHS record VR in the SSA info. */
104 void
105 evrp_range_analyzer::set_ssa_range_info (tree lhs, value_range_equiv *vr)
106 {
107 gcc_assert (m_update_global_ranges);
108
109 /* Set the SSA with the value range. */
110 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
111 {
112 if (vr->constant_p ())
113 set_range_info (lhs, vr->kind (),
114 wi::to_wide (vr->min ()),
115 wi::to_wide (vr->max ()));
116 }
117 else if (POINTER_TYPE_P (TREE_TYPE (lhs))
118 && range_includes_zero_p (vr) == 0)
119 set_ptr_nonnull (lhs);
120 }
121
122 /* Return true if all uses of NAME are dominated by STMT or feed STMT
123 via a chain of single immediate uses. */
124
125 static bool
126 all_uses_feed_or_dominated_by_stmt (tree name, gimple *stmt)
127 {
128 use_operand_p use_p, use2_p;
129 imm_use_iterator iter;
130 basic_block stmt_bb = gimple_bb (stmt);
131
132 FOR_EACH_IMM_USE_FAST (use_p, iter, name)
133 {
134 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
135 if (use_stmt == stmt
136 || is_gimple_debug (use_stmt)
137 || (gimple_bb (use_stmt) != stmt_bb
138 && dominated_by_p (CDI_DOMINATORS,
139 gimple_bb (use_stmt), stmt_bb)))
140 continue;
141 while (use_stmt != stmt
142 && is_gimple_assign (use_stmt)
143 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
144 && single_imm_use (gimple_assign_lhs (use_stmt),
145 &use2_p, &use_stmt2))
146 use_stmt = use_stmt2;
147 if (use_stmt != stmt)
148 return false;
149 }
150 return true;
151 }
152
153 void
154 evrp_range_analyzer::record_ranges_from_incoming_edge (basic_block bb)
155 {
156 edge pred_e = single_pred_edge_ignoring_loop_edges (bb, false);
157 if (pred_e)
158 {
159 gimple *stmt = last_stmt (pred_e->src);
160 tree op0 = NULL_TREE;
161
162 if (stmt
163 && gimple_code (stmt) == GIMPLE_COND
164 && (op0 = gimple_cond_lhs (stmt))
165 && TREE_CODE (op0) == SSA_NAME
166 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
167 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
168 {
169 if (dump_file && (dump_flags & TDF_DETAILS))
170 {
171 fprintf (dump_file, "Visiting controlling predicate ");
172 print_gimple_stmt (dump_file, stmt, 0);
173 }
174 /* Entering a new scope. Try to see if we can find a VR
175 here. */
176 tree op1 = gimple_cond_rhs (stmt);
177 if (TREE_OVERFLOW_P (op1))
178 op1 = drop_tree_overflow (op1);
179 tree_code code = gimple_cond_code (stmt);
180
181 auto_vec<assert_info, 8> asserts;
182 register_edge_assert_for (op0, pred_e, code, op0, op1, asserts);
183 if (TREE_CODE (op1) == SSA_NAME)
184 register_edge_assert_for (op1, pred_e, code, op0, op1, asserts);
185
186 auto_vec<std::pair<tree, value_range_equiv *>, 8> vrs;
187 for (unsigned i = 0; i < asserts.length (); ++i)
188 {
189 value_range_equiv *vr
190 = try_find_new_range (asserts[i].name,
191 asserts[i].expr,
192 asserts[i].comp_code,
193 asserts[i].val);
194 if (vr)
195 vrs.safe_push (std::make_pair (asserts[i].name, vr));
196 }
197
198 /* If pred_e is really a fallthru we can record value ranges
199 in SSA names as well. */
200 bool is_fallthru = assert_unreachable_fallthru_edge_p (pred_e);
201
202 /* Push updated ranges only after finding all of them to avoid
203 ordering issues that can lead to worse ranges. */
204 for (unsigned i = 0; i < vrs.length (); ++i)
205 {
206 /* But make sure we do not weaken ranges like when
207 getting first [64, +INF] and then ~[0, 0] from
208 conditions like (s & 0x3cc0) == 0). */
209 const value_range_equiv *old_vr
210 = get_value_range (vrs[i].first);
211 value_range tem (*old_vr);
212 tem.intersect (vrs[i].second);
213 if (tem.equal_p (*old_vr))
214 {
215 free_value_range (vrs[i].second);
216 continue;
217 }
218 push_value_range (vrs[i].first, vrs[i].second);
219 if (is_fallthru
220 && m_update_global_ranges
221 && all_uses_feed_or_dominated_by_stmt (vrs[i].first, stmt))
222 {
223 set_ssa_range_info (vrs[i].first, vrs[i].second);
224 maybe_set_nonzero_bits (pred_e, vrs[i].first);
225 }
226 }
227 }
228 }
229 }
230
231 void
232 evrp_range_analyzer::record_ranges_from_phis (basic_block bb)
233 {
234 /* Visit PHI stmts and discover any new VRs possible. */
235 bool has_unvisited_preds = false;
236 edge_iterator ei;
237 edge e;
238 FOR_EACH_EDGE (e, ei, bb->preds)
239 if (e->flags & EDGE_EXECUTABLE
240 && !(e->src->flags & BB_VISITED))
241 {
242 has_unvisited_preds = true;
243 break;
244 }
245
246 for (gphi_iterator gpi = gsi_start_phis (bb);
247 !gsi_end_p (gpi); gsi_next (&gpi))
248 {
249 gphi *phi = gpi.phi ();
250 tree lhs = PHI_RESULT (phi);
251 if (virtual_operand_p (lhs))
252 continue;
253
254 /* Skips floats and other things we can't represent in a
255 range. */
256 if (!value_range::supports_type_p (TREE_TYPE (lhs)))
257 continue;
258
259 value_range_equiv vr_result;
260 bool interesting = stmt_interesting_for_vrp (phi);
261 if (!has_unvisited_preds && interesting)
262 extract_range_from_phi_node (phi, &vr_result);
263 else
264 {
265 vr_result.set_varying (TREE_TYPE (lhs));
266 /* When we have an unvisited executable predecessor we can't
267 use PHI arg ranges which may be still UNDEFINED but have
268 to use VARYING for them. But we can still resort to
269 SCEV for loop header PHIs. */
270 class loop *l;
271 if (scev_initialized_p ()
272 && interesting
273 && (l = loop_containing_stmt (phi))
274 && l->header == gimple_bb (phi))
275 adjust_range_with_scev (&vr_result, l, phi, lhs);
276 }
277 update_value_range (lhs, &vr_result);
278
279 /* Set the SSA with the value range. */
280 if (m_update_global_ranges)
281 set_ssa_range_info (lhs, &vr_result);
282 }
283 }
284
285 /* Record ranges from STMT into our VR_VALUES class. If TEMPORARY is
286 true, then this is a temporary equivalence and should be recorded
287 into the unwind table. Othewise record the equivalence into the
288 global table. */
289
290 void
291 evrp_range_analyzer::record_ranges_from_stmt (gimple *stmt, bool temporary)
292 {
293 tree output = NULL_TREE;
294
295 if (!optimize)
296 return;
297
298 if (dyn_cast <gcond *> (stmt))
299 ;
300 else if (stmt_interesting_for_vrp (stmt))
301 {
302 edge taken_edge;
303 value_range_equiv vr;
304 extract_range_from_stmt (stmt, &taken_edge, &output, &vr);
305 if (output)
306 {
307 /* Set the SSA with the value range. There are two cases to
308 consider. First (the the most common) is we are processing
309 STMT in a context where its resulting range globally holds
310 and thus it can be reflected into the global ranges and need
311 not be unwound as we leave scope.
312
313 The second case occurs if we are processing a statement in
314 a context where the resulting range must not be reflected
315 into the global tables and must be unwound as we leave
316 the current context. This happens in jump threading for
317 example. */
318 if (!temporary)
319 {
320 /* Case one. We can just update the underlying range
321 information as well as the global information. */
322 update_value_range (output, &vr);
323 if (m_update_global_ranges)
324 set_ssa_range_info (output, &vr);
325 }
326 else
327 {
328 /* We're going to need to unwind this range. We cannot
329 use VR as that's a stack object. We have to allocate
330 a new range and push the old range onto the stack. We
331 also have to be very careful about sharing the underlying
332 bitmaps. Ugh. */
333 value_range_equiv *new_vr = allocate_value_range_equiv ();
334 new_vr->set (vr.min (), vr.max (), NULL, vr.kind ());
335 vr.equiv_clear ();
336 push_value_range (output, new_vr);
337 }
338 }
339 else
340 set_defs_to_varying (stmt);
341 }
342 else
343 set_defs_to_varying (stmt);
344
345 /* See if we can derive a range for any of STMT's operands. */
346 tree op;
347 ssa_op_iter i;
348 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
349 {
350 tree value;
351 enum tree_code comp_code;
352
353 /* If OP is used in such a way that we can infer a value
354 range for it, and we don't find a previous assertion for
355 it, create a new assertion location node for OP. */
356 if (infer_value_range (stmt, op, &comp_code, &value))
357 {
358 /* If we are able to infer a nonzero value range for OP,
359 then walk backwards through the use-def chain to see if OP
360 was set via a typecast.
361 If so, then we can also infer a nonzero value range
362 for the operand of the NOP_EXPR. */
363 if (comp_code == NE_EXPR && integer_zerop (value))
364 {
365 tree t = op;
366 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
367 while (is_gimple_assign (def_stmt)
368 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
369 && TREE_CODE
370 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
371 && POINTER_TYPE_P
372 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
373 {
374 t = gimple_assign_rhs1 (def_stmt);
375 def_stmt = SSA_NAME_DEF_STMT (t);
376
377 /* Add VR when (T COMP_CODE value) condition is
378 true. */
379 value_range_equiv *op_range
380 = try_find_new_range (t, t, comp_code, value);
381 if (op_range)
382 push_value_range (t, op_range);
383 }
384 }
385 /* Add VR when (OP COMP_CODE value) condition is true. */
386 value_range_equiv *op_range = try_find_new_range (op, op,
387 comp_code, value);
388 if (op_range)
389 push_value_range (op, op_range);
390 }
391 }
392 }
393
394 /* Unwind recorded ranges to their most recent state. */
395
396 void
397 evrp_range_analyzer::pop_to_marker (void)
398 {
399 gcc_checking_assert (!stack.is_empty ());
400 while (stack.last ().first != NULL_TREE)
401 pop_value_range ();
402 stack.pop ();
403 }
404
405 /* Restore/pop VRs valid only for BB when we leave BB. */
406
407 void
408 evrp_range_analyzer::leave (basic_block bb ATTRIBUTE_UNUSED)
409 {
410 if (!optimize)
411 return;
412 pop_to_marker ();
413 }
414
415
416 /* Push the Value Range of VAR to the stack and update it with new VR. */
417
418 void
419 evrp_range_analyzer::push_value_range (tree var, value_range_equiv *vr)
420 {
421 if (dump_file && (dump_flags & TDF_DETAILS))
422 {
423 fprintf (dump_file, "pushing new range for ");
424 print_generic_expr (dump_file, var);
425 fprintf (dump_file, ": ");
426 dump_value_range (dump_file, vr);
427 fprintf (dump_file, "\n");
428 }
429 value_range_equiv *old_vr = swap_vr_value (var, vr);
430 stack.safe_push (std::make_pair (var, old_vr));
431 }
432
433 /* Pop a Value Range from the vrp_stack. */
434
435 void
436 evrp_range_analyzer::pop_value_range ()
437 {
438 std::pair<tree, value_range_equiv *> e = stack.pop ();
439 tree var = e.first;
440 value_range_equiv *vr = e.second;
441 if (dump_file && (dump_flags & TDF_DETAILS))
442 {
443 fprintf (dump_file, "popping range for ");
444 print_generic_expr (dump_file, var);
445 fprintf (dump_file, ", restoring ");
446 dump_value_range (dump_file, vr);
447 fprintf (dump_file, "\n");
448 }
449 /* We saved off a lattice entry, now give it back and release
450 the one we popped. */
451 value_range_equiv *popped_vr = swap_vr_value (var, vr);
452 if (popped_vr)
453 free_value_range (popped_vr);
454 }