re PR tree-optimization/61289 (Bad jump threading generates infinite loop)
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "pointer-set.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimple-iterator.h"
40 #include "gimple-ssa.h"
41 #include "tree-cfg.h"
42 #include "tree-phinodes.h"
43 #include "ssa-iterators.h"
44 #include "stringpool.h"
45 #include "tree-ssanames.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
49 #include "params.h"
50 #include "tree-ssa-threadedge.h"
51 #include "builtins.h"
52
53 /* To avoid code explosion due to jump threading, we limit the
54 number of statements we are going to copy. This variable
55 holds the number of statements currently seen that we'll have
56 to copy as part of the jump threading process. */
57 static int stmt_count;
58
59 /* Array to record value-handles per SSA_NAME. */
60 vec<tree> ssa_name_values;
61
62 /* Set the value for the SSA name NAME to VALUE. */
63
64 void
65 set_ssa_name_value (tree name, tree value)
66 {
67 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
68 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
69 if (value && TREE_OVERFLOW_P (value))
70 value = drop_tree_overflow (value);
71 ssa_name_values[SSA_NAME_VERSION (name)] = value;
72 }
73
74 /* Initialize the per SSA_NAME value-handles array. Returns it. */
75 void
76 threadedge_initialize_values (void)
77 {
78 gcc_assert (!ssa_name_values.exists ());
79 ssa_name_values.create (num_ssa_names);
80 }
81
82 /* Free the per SSA_NAME value-handle array. */
83 void
84 threadedge_finalize_values (void)
85 {
86 ssa_name_values.release ();
87 }
88
89 /* Return TRUE if we may be able to thread an incoming edge into
90 BB to an outgoing edge from BB. Return FALSE otherwise. */
91
92 bool
93 potentially_threadable_block (basic_block bb)
94 {
95 gimple_stmt_iterator gsi;
96
97 /* If BB has a single successor or a single predecessor, then
98 there is no threading opportunity. */
99 if (single_succ_p (bb) || single_pred_p (bb))
100 return false;
101
102 /* If BB does not end with a conditional, switch or computed goto,
103 then there is no threading opportunity. */
104 gsi = gsi_last_bb (bb);
105 if (gsi_end_p (gsi)
106 || ! gsi_stmt (gsi)
107 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
108 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
109 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
110 return false;
111
112 return true;
113 }
114
115 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
116 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
117 BB. If no such ASSERT_EXPR is found, return OP. */
118
119 static tree
120 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
121 {
122 imm_use_iterator imm_iter;
123 gimple use_stmt;
124 use_operand_p use_p;
125
126 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
127 {
128 use_stmt = USE_STMT (use_p);
129 if (use_stmt != stmt
130 && gimple_assign_single_p (use_stmt)
131 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
132 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
133 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
134 {
135 return gimple_assign_lhs (use_stmt);
136 }
137 }
138 return op;
139 }
140
141 /* We record temporary equivalences created by PHI nodes or
142 statements within the target block. Doing so allows us to
143 identify more jump threading opportunities, even in blocks
144 with side effects.
145
146 We keep track of those temporary equivalences in a stack
147 structure so that we can unwind them when we're done processing
148 a particular edge. This routine handles unwinding the data
149 structures. */
150
151 static void
152 remove_temporary_equivalences (vec<tree> *stack)
153 {
154 while (stack->length () > 0)
155 {
156 tree prev_value, dest;
157
158 dest = stack->pop ();
159
160 /* A NULL value indicates we should stop unwinding, otherwise
161 pop off the next entry as they're recorded in pairs. */
162 if (dest == NULL)
163 break;
164
165 prev_value = stack->pop ();
166 set_ssa_name_value (dest, prev_value);
167 }
168 }
169
170 /* Record a temporary equivalence, saving enough information so that
171 we can restore the state of recorded equivalences when we're
172 done processing the current edge. */
173
174 static void
175 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
176 {
177 tree prev_x = SSA_NAME_VALUE (x);
178
179 /* Y may be NULL if we are invalidating entries in the table. */
180 if (y && TREE_CODE (y) == SSA_NAME)
181 {
182 tree tmp = SSA_NAME_VALUE (y);
183 y = tmp ? tmp : y;
184 }
185
186 set_ssa_name_value (x, y);
187 stack->reserve (2);
188 stack->quick_push (prev_x);
189 stack->quick_push (x);
190 }
191
192 /* Record temporary equivalences created by PHIs at the target of the
193 edge E. Record unwind information for the equivalences onto STACK.
194
195 If a PHI which prevents threading is encountered, then return FALSE
196 indicating we should not thread this edge, else return TRUE.
197
198 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
199 of any equivalences recorded. We use this to make invalidation after
200 traversing back edges less painful. */
201
202 static bool
203 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
204 {
205 gimple_stmt_iterator gsi;
206
207 /* Each PHI creates a temporary equivalence, record them.
208 These are context sensitive equivalences and will be removed
209 later. */
210 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
211 {
212 gimple phi = gsi_stmt (gsi);
213 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
214 tree dst = gimple_phi_result (phi);
215
216 /* If the desired argument is not the same as this PHI's result
217 and it is set by a PHI in E->dest, then we can not thread
218 through E->dest. */
219 if (src != dst
220 && TREE_CODE (src) == SSA_NAME
221 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
222 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
223 return false;
224
225 /* We consider any non-virtual PHI as a statement since it
226 count result in a constant assignment or copy operation. */
227 if (!virtual_operand_p (dst))
228 stmt_count++;
229
230 record_temporary_equivalence (dst, src, stack);
231 }
232 return true;
233 }
234
235 /* Fold the RHS of an assignment statement and return it as a tree.
236 May return NULL_TREE if no simplification is possible. */
237
238 static tree
239 fold_assignment_stmt (gimple stmt)
240 {
241 enum tree_code subcode = gimple_assign_rhs_code (stmt);
242
243 switch (get_gimple_rhs_class (subcode))
244 {
245 case GIMPLE_SINGLE_RHS:
246 return fold (gimple_assign_rhs1 (stmt));
247
248 case GIMPLE_UNARY_RHS:
249 {
250 tree lhs = gimple_assign_lhs (stmt);
251 tree op0 = gimple_assign_rhs1 (stmt);
252 return fold_unary (subcode, TREE_TYPE (lhs), op0);
253 }
254
255 case GIMPLE_BINARY_RHS:
256 {
257 tree lhs = gimple_assign_lhs (stmt);
258 tree op0 = gimple_assign_rhs1 (stmt);
259 tree op1 = gimple_assign_rhs2 (stmt);
260 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
261 }
262
263 case GIMPLE_TERNARY_RHS:
264 {
265 tree lhs = gimple_assign_lhs (stmt);
266 tree op0 = gimple_assign_rhs1 (stmt);
267 tree op1 = gimple_assign_rhs2 (stmt);
268 tree op2 = gimple_assign_rhs3 (stmt);
269
270 /* Sadly, we have to handle conditional assignments specially
271 here, because fold expects all the operands of an expression
272 to be folded before the expression itself is folded, but we
273 can't just substitute the folded condition here. */
274 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
275 op0 = fold (op0);
276
277 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
278 }
279
280 default:
281 gcc_unreachable ();
282 }
283 }
284
285 /* A new value has been assigned to LHS. If necessary, invalidate any
286 equivalences that are no longer valid. */
287 static void
288 invalidate_equivalences (tree lhs, vec<tree> *stack)
289 {
290
291 for (unsigned int i = 1; i < num_ssa_names; i++)
292 if (ssa_name (i) && SSA_NAME_VALUE (ssa_name (i)) == lhs)
293 record_temporary_equivalence (ssa_name (i), NULL_TREE, stack);
294
295 if (SSA_NAME_VALUE (lhs))
296 record_temporary_equivalence (lhs, NULL_TREE, stack);
297 }
298
299 /* Try to simplify each statement in E->dest, ultimately leading to
300 a simplification of the COND_EXPR at the end of E->dest.
301
302 Record unwind information for temporary equivalences onto STACK.
303
304 Use SIMPLIFY (a pointer to a callback function) to further simplify
305 statements using pass specific information.
306
307 We might consider marking just those statements which ultimately
308 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
309 would be recovered by trying to simplify fewer statements.
310
311 If we are able to simplify a statement into the form
312 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
313 a context sensitive equivalence which may help us simplify
314 later statements in E->dest. */
315
316 static gimple
317 record_temporary_equivalences_from_stmts_at_dest (edge e,
318 vec<tree> *stack,
319 tree (*simplify) (gimple,
320 gimple),
321 bool backedge_seen)
322 {
323 gimple stmt = NULL;
324 gimple_stmt_iterator gsi;
325 int max_stmt_count;
326
327 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
328
329 /* Walk through each statement in the block recording equivalences
330 we discover. Note any equivalences we discover are context
331 sensitive (ie, are dependent on traversing E) and must be unwound
332 when we're finished processing E. */
333 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
334 {
335 tree cached_lhs = NULL;
336
337 stmt = gsi_stmt (gsi);
338
339 /* Ignore empty statements and labels. */
340 if (gimple_code (stmt) == GIMPLE_NOP
341 || gimple_code (stmt) == GIMPLE_LABEL
342 || is_gimple_debug (stmt))
343 continue;
344
345 /* If the statement has volatile operands, then we assume we
346 can not thread through this block. This is overly
347 conservative in some ways. */
348 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
349 return NULL;
350
351 /* If duplicating this block is going to cause too much code
352 expansion, then do not thread through this block. */
353 stmt_count++;
354 if (stmt_count > max_stmt_count)
355 return NULL;
356
357 /* If this is not a statement that sets an SSA_NAME to a new
358 value, then do not try to simplify this statement as it will
359 not simplify in any way that is helpful for jump threading. */
360 if ((gimple_code (stmt) != GIMPLE_ASSIGN
361 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
362 && (gimple_code (stmt) != GIMPLE_CALL
363 || gimple_call_lhs (stmt) == NULL_TREE
364 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
365 {
366 /* STMT might still have DEFS and we need to invalidate any known
367 equivalences for them.
368
369 Consider if STMT is a GIMPLE_ASM with one or more outputs that
370 feeds a conditional inside a loop. We might derive an equivalence
371 due to the conditional. */
372 tree op;
373 ssa_op_iter iter;
374
375 if (backedge_seen)
376 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
377 invalidate_equivalences (op, stack);
378
379 continue;
380 }
381
382 /* The result of __builtin_object_size depends on all the arguments
383 of a phi node. Temporarily using only one edge produces invalid
384 results. For example
385
386 if (x < 6)
387 goto l;
388 else
389 goto l;
390
391 l:
392 r = PHI <&w[2].a[1](2), &a.a[6](3)>
393 __builtin_object_size (r, 0)
394
395 The result of __builtin_object_size is defined to be the maximum of
396 remaining bytes. If we use only one edge on the phi, the result will
397 change to be the remaining bytes for the corresponding phi argument.
398
399 Similarly for __builtin_constant_p:
400
401 r = PHI <1(2), 2(3)>
402 __builtin_constant_p (r)
403
404 Both PHI arguments are constant, but x ? 1 : 2 is still not
405 constant. */
406
407 if (is_gimple_call (stmt))
408 {
409 tree fndecl = gimple_call_fndecl (stmt);
410 if (fndecl
411 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
412 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
413 {
414 if (backedge_seen)
415 {
416 tree lhs = gimple_get_lhs (stmt);
417 invalidate_equivalences (lhs, stack);
418 }
419 continue;
420 }
421 }
422
423 /* At this point we have a statement which assigns an RHS to an
424 SSA_VAR on the LHS. We want to try and simplify this statement
425 to expose more context sensitive equivalences which in turn may
426 allow us to simplify the condition at the end of the loop.
427
428 Handle simple copy operations as well as implied copies from
429 ASSERT_EXPRs. */
430 if (gimple_assign_single_p (stmt)
431 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
432 cached_lhs = gimple_assign_rhs1 (stmt);
433 else if (gimple_assign_single_p (stmt)
434 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
435 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
436 else
437 {
438 /* A statement that is not a trivial copy or ASSERT_EXPR.
439 We're going to temporarily copy propagate the operands
440 and see if that allows us to simplify this statement. */
441 tree *copy;
442 ssa_op_iter iter;
443 use_operand_p use_p;
444 unsigned int num, i = 0;
445
446 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
447 copy = XCNEWVEC (tree, num);
448
449 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
450 the operands. */
451 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
452 {
453 tree tmp = NULL;
454 tree use = USE_FROM_PTR (use_p);
455
456 copy[i++] = use;
457 if (TREE_CODE (use) == SSA_NAME)
458 tmp = SSA_NAME_VALUE (use);
459 if (tmp)
460 SET_USE (use_p, tmp);
461 }
462
463 /* Try to fold/lookup the new expression. Inserting the
464 expression into the hash table is unlikely to help. */
465 if (is_gimple_call (stmt))
466 cached_lhs = fold_call_stmt (stmt, false);
467 else
468 cached_lhs = fold_assignment_stmt (stmt);
469
470 if (!cached_lhs
471 || (TREE_CODE (cached_lhs) != SSA_NAME
472 && !is_gimple_min_invariant (cached_lhs)))
473 cached_lhs = (*simplify) (stmt, stmt);
474
475 /* Restore the statement's original uses/defs. */
476 i = 0;
477 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
478 SET_USE (use_p, copy[i++]);
479
480 free (copy);
481 }
482
483 /* Record the context sensitive equivalence if we were able
484 to simplify this statement.
485
486 If we have traversed a backedge at some point during threading,
487 then always enter something here. Either a real equivalence,
488 or a NULL_TREE equivalence which is effectively invalidation of
489 prior equivalences. */
490 if (cached_lhs
491 && (TREE_CODE (cached_lhs) == SSA_NAME
492 || is_gimple_min_invariant (cached_lhs)))
493 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
494 else if (backedge_seen)
495 invalidate_equivalences (gimple_get_lhs (stmt), stack);
496 }
497 return stmt;
498 }
499
500 /* Once we have passed a backedge in the CFG when threading, we do not want to
501 utilize edge equivalences for simplification purpose. They are no longer
502 necessarily valid. We use this callback rather than the ones provided by
503 DOM/VRP to achieve that effect. */
504 static tree
505 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED, gimple stmt2 ATTRIBUTE_UNUSED)
506 {
507 return NULL_TREE;
508 }
509
510 /* Simplify the control statement at the end of the block E->dest.
511
512 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
513 is available to use/clobber in DUMMY_COND.
514
515 Use SIMPLIFY (a pointer to a callback function) to further simplify
516 a condition using pass specific information.
517
518 Return the simplified condition or NULL if simplification could
519 not be performed. */
520
521 static tree
522 simplify_control_stmt_condition (edge e,
523 gimple stmt,
524 gimple dummy_cond,
525 tree (*simplify) (gimple, gimple),
526 bool handle_dominating_asserts)
527 {
528 tree cond, cached_lhs;
529 enum gimple_code code = gimple_code (stmt);
530
531 /* For comparisons, we have to update both operands, then try
532 to simplify the comparison. */
533 if (code == GIMPLE_COND)
534 {
535 tree op0, op1;
536 enum tree_code cond_code;
537
538 op0 = gimple_cond_lhs (stmt);
539 op1 = gimple_cond_rhs (stmt);
540 cond_code = gimple_cond_code (stmt);
541
542 /* Get the current value of both operands. */
543 if (TREE_CODE (op0) == SSA_NAME)
544 {
545 tree tmp = SSA_NAME_VALUE (op0);
546 if (tmp)
547 op0 = tmp;
548 }
549
550 if (TREE_CODE (op1) == SSA_NAME)
551 {
552 tree tmp = SSA_NAME_VALUE (op1);
553 if (tmp)
554 op1 = tmp;
555 }
556
557 if (handle_dominating_asserts)
558 {
559 /* Now see if the operand was consumed by an ASSERT_EXPR
560 which dominates E->src. If so, we want to replace the
561 operand with the LHS of the ASSERT_EXPR. */
562 if (TREE_CODE (op0) == SSA_NAME)
563 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
564
565 if (TREE_CODE (op1) == SSA_NAME)
566 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
567 }
568
569 /* We may need to canonicalize the comparison. For
570 example, op0 might be a constant while op1 is an
571 SSA_NAME. Failure to canonicalize will cause us to
572 miss threading opportunities. */
573 if (tree_swap_operands_p (op0, op1, false))
574 {
575 tree tmp;
576 cond_code = swap_tree_comparison (cond_code);
577 tmp = op0;
578 op0 = op1;
579 op1 = tmp;
580 }
581
582 /* Stuff the operator and operands into our dummy conditional
583 expression. */
584 gimple_cond_set_code (dummy_cond, cond_code);
585 gimple_cond_set_lhs (dummy_cond, op0);
586 gimple_cond_set_rhs (dummy_cond, op1);
587
588 /* We absolutely do not care about any type conversions
589 we only care about a zero/nonzero value. */
590 fold_defer_overflow_warnings ();
591
592 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
593 if (cached_lhs)
594 while (CONVERT_EXPR_P (cached_lhs))
595 cached_lhs = TREE_OPERAND (cached_lhs, 0);
596
597 fold_undefer_overflow_warnings ((cached_lhs
598 && is_gimple_min_invariant (cached_lhs)),
599 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
600
601 /* If we have not simplified the condition down to an invariant,
602 then use the pass specific callback to simplify the condition. */
603 if (!cached_lhs
604 || !is_gimple_min_invariant (cached_lhs))
605 cached_lhs = (*simplify) (dummy_cond, stmt);
606
607 return cached_lhs;
608 }
609
610 if (code == GIMPLE_SWITCH)
611 cond = gimple_switch_index (stmt);
612 else if (code == GIMPLE_GOTO)
613 cond = gimple_goto_dest (stmt);
614 else
615 gcc_unreachable ();
616
617 /* We can have conditionals which just test the state of a variable
618 rather than use a relational operator. These are simpler to handle. */
619 if (TREE_CODE (cond) == SSA_NAME)
620 {
621 cached_lhs = cond;
622
623 /* Get the variable's current value from the equivalence chains.
624
625 It is possible to get loops in the SSA_NAME_VALUE chains
626 (consider threading the backedge of a loop where we have
627 a loop invariant SSA_NAME used in the condition. */
628 if (cached_lhs
629 && TREE_CODE (cached_lhs) == SSA_NAME
630 && SSA_NAME_VALUE (cached_lhs))
631 cached_lhs = SSA_NAME_VALUE (cached_lhs);
632
633 /* If we're dominated by a suitable ASSERT_EXPR, then
634 update CACHED_LHS appropriately. */
635 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
636 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
637
638 /* If we haven't simplified to an invariant yet, then use the
639 pass specific callback to try and simplify it further. */
640 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
641 cached_lhs = (*simplify) (stmt, stmt);
642 }
643 else
644 cached_lhs = NULL;
645
646 return cached_lhs;
647 }
648
649 /* Copy debug stmts from DEST's chain of single predecessors up to
650 SRC, so that we don't lose the bindings as PHI nodes are introduced
651 when DEST gains new predecessors. */
652 void
653 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
654 {
655 if (!MAY_HAVE_DEBUG_STMTS)
656 return;
657
658 if (!single_pred_p (dest))
659 return;
660
661 gcc_checking_assert (dest != src);
662
663 gimple_stmt_iterator gsi = gsi_after_labels (dest);
664 int i = 0;
665 const int alloc_count = 16; // ?? Should this be a PARAM?
666
667 /* Estimate the number of debug vars overridden in the beginning of
668 DEST, to tell how many we're going to need to begin with. */
669 for (gimple_stmt_iterator si = gsi;
670 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
671 {
672 gimple stmt = gsi_stmt (si);
673 if (!is_gimple_debug (stmt))
674 break;
675 i++;
676 }
677
678 auto_vec<tree, alloc_count> fewvars;
679 pointer_set_t *vars = NULL;
680
681 /* If we're already starting with 3/4 of alloc_count, go for a
682 pointer_set, otherwise start with an unordered stack-allocated
683 VEC. */
684 if (i * 4 > alloc_count * 3)
685 vars = pointer_set_create ();
686
687 /* Now go through the initial debug stmts in DEST again, this time
688 actually inserting in VARS or FEWVARS. Don't bother checking for
689 duplicates in FEWVARS. */
690 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
691 {
692 gimple stmt = gsi_stmt (si);
693 if (!is_gimple_debug (stmt))
694 break;
695
696 tree var;
697
698 if (gimple_debug_bind_p (stmt))
699 var = gimple_debug_bind_get_var (stmt);
700 else if (gimple_debug_source_bind_p (stmt))
701 var = gimple_debug_source_bind_get_var (stmt);
702 else
703 gcc_unreachable ();
704
705 if (vars)
706 pointer_set_insert (vars, var);
707 else
708 fewvars.quick_push (var);
709 }
710
711 basic_block bb = dest;
712
713 do
714 {
715 bb = single_pred (bb);
716 for (gimple_stmt_iterator si = gsi_last_bb (bb);
717 !gsi_end_p (si); gsi_prev (&si))
718 {
719 gimple stmt = gsi_stmt (si);
720 if (!is_gimple_debug (stmt))
721 continue;
722
723 tree var;
724
725 if (gimple_debug_bind_p (stmt))
726 var = gimple_debug_bind_get_var (stmt);
727 else if (gimple_debug_source_bind_p (stmt))
728 var = gimple_debug_source_bind_get_var (stmt);
729 else
730 gcc_unreachable ();
731
732 /* Discard debug bind overlaps. ??? Unlike stmts from src,
733 copied into a new block that will precede BB, debug bind
734 stmts in bypassed BBs may actually be discarded if
735 they're overwritten by subsequent debug bind stmts, which
736 might be a problem once we introduce stmt frontier notes
737 or somesuch. Adding `&& bb == src' to the condition
738 below will preserve all potentially relevant debug
739 notes. */
740 if (vars && pointer_set_insert (vars, var))
741 continue;
742 else if (!vars)
743 {
744 int i = fewvars.length ();
745 while (i--)
746 if (fewvars[i] == var)
747 break;
748 if (i >= 0)
749 continue;
750
751 if (fewvars.length () < (unsigned) alloc_count)
752 fewvars.quick_push (var);
753 else
754 {
755 vars = pointer_set_create ();
756 for (i = 0; i < alloc_count; i++)
757 pointer_set_insert (vars, fewvars[i]);
758 fewvars.release ();
759 pointer_set_insert (vars, var);
760 }
761 }
762
763 stmt = gimple_copy (stmt);
764 /* ??? Should we drop the location of the copy to denote
765 they're artificial bindings? */
766 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
767 }
768 }
769 while (bb != src && single_pred_p (bb));
770
771 if (vars)
772 pointer_set_destroy (vars);
773 else if (fewvars.exists ())
774 fewvars.release ();
775 }
776
777 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
778 need not be duplicated as part of the CFG/SSA updating process).
779
780 If it is threadable, add it to PATH and VISITED and recurse, ultimately
781 returning TRUE from the toplevel call. Otherwise do nothing and
782 return false.
783
784 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
785 try and simplify the condition at the end of TAKEN_EDGE->dest. */
786 static bool
787 thread_around_empty_blocks (edge taken_edge,
788 gimple dummy_cond,
789 bool handle_dominating_asserts,
790 tree (*simplify) (gimple, gimple),
791 bitmap visited,
792 vec<jump_thread_edge *> *path,
793 bool *backedge_seen_p)
794 {
795 basic_block bb = taken_edge->dest;
796 gimple_stmt_iterator gsi;
797 gimple stmt;
798 tree cond;
799
800 /* The key property of these blocks is that they need not be duplicated
801 when threading. Thus they can not have visible side effects such
802 as PHI nodes. */
803 if (!gsi_end_p (gsi_start_phis (bb)))
804 return false;
805
806 /* Skip over DEBUG statements at the start of the block. */
807 gsi = gsi_start_nondebug_bb (bb);
808
809 /* If the block has no statements, but does have a single successor, then
810 it's just a forwarding block and we can thread through it trivially.
811
812 However, note that just threading through empty blocks with single
813 successors is not inherently profitable. For the jump thread to
814 be profitable, we must avoid a runtime conditional.
815
816 By taking the return value from the recursive call, we get the
817 desired effect of returning TRUE when we found a profitable jump
818 threading opportunity and FALSE otherwise.
819
820 This is particularly important when this routine is called after
821 processing a joiner block. Returning TRUE too aggressively in
822 that case results in pointless duplication of the joiner block. */
823 if (gsi_end_p (gsi))
824 {
825 if (single_succ_p (bb))
826 {
827 taken_edge = single_succ_edge (bb);
828 if (!bitmap_bit_p (visited, taken_edge->dest->index))
829 {
830 jump_thread_edge *x
831 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
832 path->safe_push (x);
833 bitmap_set_bit (visited, taken_edge->dest->index);
834 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
835 if (*backedge_seen_p)
836 simplify = dummy_simplify;
837 return thread_around_empty_blocks (taken_edge,
838 dummy_cond,
839 handle_dominating_asserts,
840 simplify,
841 visited,
842 path,
843 backedge_seen_p);
844 }
845 }
846
847 /* We have a block with no statements, but multiple successors? */
848 return false;
849 }
850
851 /* The only real statements this block can have are a control
852 flow altering statement. Anything else stops the thread. */
853 stmt = gsi_stmt (gsi);
854 if (gimple_code (stmt) != GIMPLE_COND
855 && gimple_code (stmt) != GIMPLE_GOTO
856 && gimple_code (stmt) != GIMPLE_SWITCH)
857 return false;
858
859 /* If we have traversed a backedge, then we do not want to look
860 at certain expressions in the table that can not be relied upon.
861 Luckily the only code that looked at those expressions is the
862 SIMPLIFY callback, which we replace if we can no longer use it. */
863 if (*backedge_seen_p)
864 simplify = dummy_simplify;
865
866 /* Extract and simplify the condition. */
867 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
868 simplify, handle_dominating_asserts);
869
870 /* If the condition can be statically computed and we have not already
871 visited the destination edge, then add the taken edge to our thread
872 path. */
873 if (cond && is_gimple_min_invariant (cond))
874 {
875 taken_edge = find_taken_edge (bb, cond);
876
877 if (bitmap_bit_p (visited, taken_edge->dest->index))
878 return false;
879 bitmap_set_bit (visited, taken_edge->dest->index);
880
881 jump_thread_edge *x
882 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
883 path->safe_push (x);
884 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
885 if (*backedge_seen_p)
886 simplify = dummy_simplify;
887
888 thread_around_empty_blocks (taken_edge,
889 dummy_cond,
890 handle_dominating_asserts,
891 simplify,
892 visited,
893 path,
894 backedge_seen_p);
895 return true;
896 }
897
898 return false;
899 }
900
901 /* We are exiting E->src, see if E->dest ends with a conditional
902 jump which has a known value when reached via E.
903
904 E->dest can have arbitrary side effects which, if threading is
905 successful, will be maintained.
906
907 Special care is necessary if E is a back edge in the CFG as we
908 may have already recorded equivalences for E->dest into our
909 various tables, including the result of the conditional at
910 the end of E->dest. Threading opportunities are severely
911 limited in that case to avoid short-circuiting the loop
912 incorrectly.
913
914 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
915 to avoid allocating memory.
916
917 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
918 the simplified condition with left-hand sides of ASSERT_EXPRs they are
919 used in.
920
921 STACK is used to undo temporary equivalences created during the walk of
922 E->dest.
923
924 SIMPLIFY is a pass-specific function used to simplify statements.
925
926 Our caller is responsible for restoring the state of the expression
927 and const_and_copies stacks.
928
929 Positive return value is success. Zero return value is failure, but
930 the block can still be duplicated as a joiner in a jump thread path,
931 negative indicates the block should not be duplicated and thus is not
932 suitable for a joiner in a jump threading path. */
933
934 static int
935 thread_through_normal_block (edge e,
936 gimple dummy_cond,
937 bool handle_dominating_asserts,
938 vec<tree> *stack,
939 tree (*simplify) (gimple, gimple),
940 vec<jump_thread_edge *> *path,
941 bitmap visited,
942 bool *backedge_seen_p)
943 {
944 /* If we have traversed a backedge, then we do not want to look
945 at certain expressions in the table that can not be relied upon.
946 Luckily the only code that looked at those expressions is the
947 SIMPLIFY callback, which we replace if we can no longer use it. */
948 if (*backedge_seen_p)
949 simplify = dummy_simplify;
950
951 /* PHIs create temporary equivalences. */
952 if (!record_temporary_equivalences_from_phis (e, stack))
953 return 0;
954
955 /* Now walk each statement recording any context sensitive
956 temporary equivalences we can detect. */
957 gimple stmt
958 = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify,
959 *backedge_seen_p);
960
961 /* If we didn't look at all the statements, the most likely reason is
962 there were too many and thus duplicating this block is not profitable.
963
964 Also note if we do not look at all the statements, then we may not
965 have invalidated equivalences that are no longer valid if we threaded
966 around a loop. Thus we must signal to our caller that this block
967 is not suitable for use as a joiner in a threading path. */
968 if (!stmt)
969 return -1;
970
971 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
972 will be taken. */
973 if (gimple_code (stmt) == GIMPLE_COND
974 || gimple_code (stmt) == GIMPLE_GOTO
975 || gimple_code (stmt) == GIMPLE_SWITCH)
976 {
977 tree cond;
978
979 /* Extract and simplify the condition. */
980 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
981 handle_dominating_asserts);
982
983 if (cond && is_gimple_min_invariant (cond))
984 {
985 edge taken_edge = find_taken_edge (e->dest, cond);
986 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
987
988 /* DEST could be NULL for a computed jump to an absolute
989 address. */
990 if (dest == NULL
991 || dest == e->dest
992 || bitmap_bit_p (visited, dest->index))
993 return 0;
994
995 /* Only push the EDGE_START_JUMP_THREAD marker if this is
996 first edge on the path. */
997 if (path->length () == 0)
998 {
999 jump_thread_edge *x
1000 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1001 path->safe_push (x);
1002 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1003 }
1004
1005 jump_thread_edge *x
1006 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1007 path->safe_push (x);
1008 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1009 if (*backedge_seen_p)
1010 simplify = dummy_simplify;
1011
1012 /* See if we can thread through DEST as well, this helps capture
1013 secondary effects of threading without having to re-run DOM or
1014 VRP.
1015
1016 We don't want to thread back to a block we have already
1017 visited. This may be overly conservative. */
1018 bitmap_set_bit (visited, dest->index);
1019 bitmap_set_bit (visited, e->dest->index);
1020 thread_around_empty_blocks (taken_edge,
1021 dummy_cond,
1022 handle_dominating_asserts,
1023 simplify,
1024 visited,
1025 path,
1026 backedge_seen_p);
1027 return 1;
1028 }
1029 }
1030 return 0;
1031 }
1032
1033 /* We are exiting E->src, see if E->dest ends with a conditional
1034 jump which has a known value when reached via E.
1035
1036 Special care is necessary if E is a back edge in the CFG as we
1037 may have already recorded equivalences for E->dest into our
1038 various tables, including the result of the conditional at
1039 the end of E->dest. Threading opportunities are severely
1040 limited in that case to avoid short-circuiting the loop
1041 incorrectly.
1042
1043 Note it is quite common for the first block inside a loop to
1044 end with a conditional which is either always true or always
1045 false when reached via the loop backedge. Thus we do not want
1046 to blindly disable threading across a loop backedge.
1047
1048 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1049 to avoid allocating memory.
1050
1051 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1052 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1053 used in.
1054
1055 STACK is used to undo temporary equivalences created during the walk of
1056 E->dest.
1057
1058 SIMPLIFY is a pass-specific function used to simplify statements. */
1059
1060 void
1061 thread_across_edge (gimple dummy_cond,
1062 edge e,
1063 bool handle_dominating_asserts,
1064 vec<tree> *stack,
1065 tree (*simplify) (gimple, gimple))
1066 {
1067 bitmap visited = BITMAP_ALLOC (NULL);
1068 bool backedge_seen;
1069
1070 stmt_count = 0;
1071
1072 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1073 bitmap_clear (visited);
1074 bitmap_set_bit (visited, e->src->index);
1075 bitmap_set_bit (visited, e->dest->index);
1076 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1077 if (backedge_seen)
1078 simplify = dummy_simplify;
1079
1080 int threaded = thread_through_normal_block (e, dummy_cond,
1081 handle_dominating_asserts,
1082 stack, simplify, path,
1083 visited, &backedge_seen);
1084 if (threaded > 0)
1085 {
1086 propagate_threaded_block_debug_into (path->last ()->e->dest,
1087 e->dest);
1088 remove_temporary_equivalences (stack);
1089 BITMAP_FREE (visited);
1090 register_jump_thread (path);
1091 return;
1092 }
1093 else
1094 {
1095 /* Negative and zero return values indicate no threading was possible,
1096 thus there should be no edges on the thread path and no need to walk
1097 through the vector entries. */
1098 gcc_assert (path->length () == 0);
1099 path->release ();
1100
1101 /* A negative status indicates the target block was deemed too big to
1102 duplicate. Just quit now rather than trying to use the block as
1103 a joiner in a jump threading path.
1104
1105 This prevents unnecessary code growth, but more importantly if we
1106 do not look at all the statements in the block, then we may have
1107 missed some invalidations if we had traversed a backedge! */
1108 if (threaded < 0)
1109 {
1110 BITMAP_FREE (visited);
1111 remove_temporary_equivalences (stack);
1112 return;
1113 }
1114 }
1115
1116 /* We were unable to determine what out edge from E->dest is taken. However,
1117 we might still be able to thread through successors of E->dest. This
1118 often occurs when E->dest is a joiner block which then fans back out
1119 based on redundant tests.
1120
1121 If so, we'll copy E->dest and redirect the appropriate predecessor to
1122 the copy. Within the copy of E->dest, we'll thread one or more edges
1123 to points deeper in the CFG.
1124
1125 This is a stopgap until we have a more structured approach to path
1126 isolation. */
1127 {
1128 edge taken_edge;
1129 edge_iterator ei;
1130 bool found;
1131
1132 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1133 we can safely redirect any of the edges. Just punt those cases. */
1134 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1135 if (taken_edge->flags & EDGE_ABNORMAL)
1136 {
1137 remove_temporary_equivalences (stack);
1138 BITMAP_FREE (visited);
1139 return;
1140 }
1141
1142 /* Look at each successor of E->dest to see if we can thread through it. */
1143 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1144 {
1145 /* Push a fresh marker so we can unwind the equivalences created
1146 for each of E->dest's successors. */
1147 stack->safe_push (NULL_TREE);
1148
1149 /* Avoid threading to any block we have already visited. */
1150 bitmap_clear (visited);
1151 bitmap_set_bit (visited, e->src->index);
1152 bitmap_set_bit (visited, e->dest->index);
1153 bitmap_set_bit (visited, taken_edge->dest->index);
1154 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1155
1156 /* Record whether or not we were able to thread through a successor
1157 of E->dest. */
1158 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1159 path->safe_push (x);
1160
1161 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1162 path->safe_push (x);
1163 found = false;
1164 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1165 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1166 if (backedge_seen)
1167 simplify = dummy_simplify;
1168 found = thread_around_empty_blocks (taken_edge,
1169 dummy_cond,
1170 handle_dominating_asserts,
1171 simplify,
1172 visited,
1173 path,
1174 &backedge_seen);
1175
1176 if (backedge_seen)
1177 simplify = dummy_simplify;
1178
1179 if (!found)
1180 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1181 handle_dominating_asserts,
1182 stack, simplify, path, visited,
1183 &backedge_seen) > 0;
1184
1185 /* If we were able to thread through a successor of E->dest, then
1186 record the jump threading opportunity. */
1187 if (found)
1188 {
1189 propagate_threaded_block_debug_into (path->last ()->e->dest,
1190 taken_edge->dest);
1191 register_jump_thread (path);
1192 }
1193 else
1194 {
1195 delete_jump_thread_path (path);
1196 }
1197
1198 /* And unwind the equivalence table. */
1199 remove_temporary_equivalences (stack);
1200 }
1201 BITMAP_FREE (visited);
1202 }
1203
1204 remove_temporary_equivalences (stack);
1205 }