tree-ssa-threadedge.c (thread_around_empty_blocks): New argument backedge_seen_p.
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "gimple.h"
34 #include "gimple-ssa.h"
35 #include "tree-cfg.h"
36 #include "tree-phinodes.h"
37 #include "ssa-iterators.h"
38 #include "tree-ssanames.h"
39 #include "tree-ssa-propagate.h"
40 #include "tree-ssa-threadupdate.h"
41 #include "langhooks.h"
42 #include "params.h"
43 #include "tree-ssa-threadedge.h"
44
45 /* To avoid code explosion due to jump threading, we limit the
46 number of statements we are going to copy. This variable
47 holds the number of statements currently seen that we'll have
48 to copy as part of the jump threading process. */
49 static int stmt_count;
50
51 /* Array to record value-handles per SSA_NAME. */
52 vec<tree> ssa_name_values;
53
54 /* Set the value for the SSA name NAME to VALUE. */
55
56 void
57 set_ssa_name_value (tree name, tree value)
58 {
59 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
60 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
61 if (value && TREE_OVERFLOW_P (value))
62 value = drop_tree_overflow (value);
63 ssa_name_values[SSA_NAME_VERSION (name)] = value;
64 }
65
66 /* Initialize the per SSA_NAME value-handles array. Returns it. */
67 void
68 threadedge_initialize_values (void)
69 {
70 gcc_assert (!ssa_name_values.exists ());
71 ssa_name_values.create (num_ssa_names);
72 }
73
74 /* Free the per SSA_NAME value-handle array. */
75 void
76 threadedge_finalize_values (void)
77 {
78 ssa_name_values.release ();
79 }
80
81 /* Return TRUE if we may be able to thread an incoming edge into
82 BB to an outgoing edge from BB. Return FALSE otherwise. */
83
84 bool
85 potentially_threadable_block (basic_block bb)
86 {
87 gimple_stmt_iterator gsi;
88
89 /* If BB has a single successor or a single predecessor, then
90 there is no threading opportunity. */
91 if (single_succ_p (bb) || single_pred_p (bb))
92 return false;
93
94 /* If BB does not end with a conditional, switch or computed goto,
95 then there is no threading opportunity. */
96 gsi = gsi_last_bb (bb);
97 if (gsi_end_p (gsi)
98 || ! gsi_stmt (gsi)
99 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
100 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
101 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
102 return false;
103
104 return true;
105 }
106
107 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
108 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
109 BB. If no such ASSERT_EXPR is found, return OP. */
110
111 static tree
112 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
113 {
114 imm_use_iterator imm_iter;
115 gimple use_stmt;
116 use_operand_p use_p;
117
118 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
119 {
120 use_stmt = USE_STMT (use_p);
121 if (use_stmt != stmt
122 && gimple_assign_single_p (use_stmt)
123 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
124 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
125 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
126 {
127 return gimple_assign_lhs (use_stmt);
128 }
129 }
130 return op;
131 }
132
133 /* We record temporary equivalences created by PHI nodes or
134 statements within the target block. Doing so allows us to
135 identify more jump threading opportunities, even in blocks
136 with side effects.
137
138 We keep track of those temporary equivalences in a stack
139 structure so that we can unwind them when we're done processing
140 a particular edge. This routine handles unwinding the data
141 structures. */
142
143 static void
144 remove_temporary_equivalences (vec<tree> *stack)
145 {
146 while (stack->length () > 0)
147 {
148 tree prev_value, dest;
149
150 dest = stack->pop ();
151
152 /* A NULL value indicates we should stop unwinding, otherwise
153 pop off the next entry as they're recorded in pairs. */
154 if (dest == NULL)
155 break;
156
157 prev_value = stack->pop ();
158 set_ssa_name_value (dest, prev_value);
159 }
160 }
161
162 /* Record a temporary equivalence, saving enough information so that
163 we can restore the state of recorded equivalences when we're
164 done processing the current edge. */
165
166 static void
167 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
168 {
169 tree prev_x = SSA_NAME_VALUE (x);
170
171 if (TREE_CODE (y) == SSA_NAME)
172 {
173 tree tmp = SSA_NAME_VALUE (y);
174 y = tmp ? tmp : y;
175 }
176
177 set_ssa_name_value (x, y);
178 stack->reserve (2);
179 stack->quick_push (prev_x);
180 stack->quick_push (x);
181 }
182
183 /* Record temporary equivalences created by PHIs at the target of the
184 edge E. Record unwind information for the equivalences onto STACK.
185
186 If a PHI which prevents threading is encountered, then return FALSE
187 indicating we should not thread this edge, else return TRUE. */
188
189 static bool
190 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
191 {
192 gimple_stmt_iterator gsi;
193
194 /* Each PHI creates a temporary equivalence, record them.
195 These are context sensitive equivalences and will be removed
196 later. */
197 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
198 {
199 gimple phi = gsi_stmt (gsi);
200 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
201 tree dst = gimple_phi_result (phi);
202
203 /* If the desired argument is not the same as this PHI's result
204 and it is set by a PHI in E->dest, then we can not thread
205 through E->dest. */
206 if (src != dst
207 && TREE_CODE (src) == SSA_NAME
208 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
209 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
210 return false;
211
212 /* We consider any non-virtual PHI as a statement since it
213 count result in a constant assignment or copy operation. */
214 if (!virtual_operand_p (dst))
215 stmt_count++;
216
217 record_temporary_equivalence (dst, src, stack);
218 }
219 return true;
220 }
221
222 /* Fold the RHS of an assignment statement and return it as a tree.
223 May return NULL_TREE if no simplification is possible. */
224
225 static tree
226 fold_assignment_stmt (gimple stmt)
227 {
228 enum tree_code subcode = gimple_assign_rhs_code (stmt);
229
230 switch (get_gimple_rhs_class (subcode))
231 {
232 case GIMPLE_SINGLE_RHS:
233 return fold (gimple_assign_rhs1 (stmt));
234
235 case GIMPLE_UNARY_RHS:
236 {
237 tree lhs = gimple_assign_lhs (stmt);
238 tree op0 = gimple_assign_rhs1 (stmt);
239 return fold_unary (subcode, TREE_TYPE (lhs), op0);
240 }
241
242 case GIMPLE_BINARY_RHS:
243 {
244 tree lhs = gimple_assign_lhs (stmt);
245 tree op0 = gimple_assign_rhs1 (stmt);
246 tree op1 = gimple_assign_rhs2 (stmt);
247 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
248 }
249
250 case GIMPLE_TERNARY_RHS:
251 {
252 tree lhs = gimple_assign_lhs (stmt);
253 tree op0 = gimple_assign_rhs1 (stmt);
254 tree op1 = gimple_assign_rhs2 (stmt);
255 tree op2 = gimple_assign_rhs3 (stmt);
256
257 /* Sadly, we have to handle conditional assignments specially
258 here, because fold expects all the operands of an expression
259 to be folded before the expression itself is folded, but we
260 can't just substitute the folded condition here. */
261 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
262 op0 = fold (op0);
263
264 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
265 }
266
267 default:
268 gcc_unreachable ();
269 }
270 }
271
272 /* Try to simplify each statement in E->dest, ultimately leading to
273 a simplification of the COND_EXPR at the end of E->dest.
274
275 Record unwind information for temporary equivalences onto STACK.
276
277 Use SIMPLIFY (a pointer to a callback function) to further simplify
278 statements using pass specific information.
279
280 We might consider marking just those statements which ultimately
281 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
282 would be recovered by trying to simplify fewer statements.
283
284 If we are able to simplify a statement into the form
285 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
286 a context sensitive equivalence which may help us simplify
287 later statements in E->dest. */
288
289 static gimple
290 record_temporary_equivalences_from_stmts_at_dest (edge e,
291 vec<tree> *stack,
292 tree (*simplify) (gimple,
293 gimple))
294 {
295 gimple stmt = NULL;
296 gimple_stmt_iterator gsi;
297 int max_stmt_count;
298
299 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
300
301 /* Walk through each statement in the block recording equivalences
302 we discover. Note any equivalences we discover are context
303 sensitive (ie, are dependent on traversing E) and must be unwound
304 when we're finished processing E. */
305 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
306 {
307 tree cached_lhs = NULL;
308
309 stmt = gsi_stmt (gsi);
310
311 /* Ignore empty statements and labels. */
312 if (gimple_code (stmt) == GIMPLE_NOP
313 || gimple_code (stmt) == GIMPLE_LABEL
314 || is_gimple_debug (stmt))
315 continue;
316
317 /* If the statement has volatile operands, then we assume we
318 can not thread through this block. This is overly
319 conservative in some ways. */
320 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
321 return NULL;
322
323 /* If duplicating this block is going to cause too much code
324 expansion, then do not thread through this block. */
325 stmt_count++;
326 if (stmt_count > max_stmt_count)
327 return NULL;
328
329 /* If this is not a statement that sets an SSA_NAME to a new
330 value, then do not try to simplify this statement as it will
331 not simplify in any way that is helpful for jump threading. */
332 if ((gimple_code (stmt) != GIMPLE_ASSIGN
333 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
334 && (gimple_code (stmt) != GIMPLE_CALL
335 || gimple_call_lhs (stmt) == NULL_TREE
336 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
337 continue;
338
339 /* The result of __builtin_object_size depends on all the arguments
340 of a phi node. Temporarily using only one edge produces invalid
341 results. For example
342
343 if (x < 6)
344 goto l;
345 else
346 goto l;
347
348 l:
349 r = PHI <&w[2].a[1](2), &a.a[6](3)>
350 __builtin_object_size (r, 0)
351
352 The result of __builtin_object_size is defined to be the maximum of
353 remaining bytes. If we use only one edge on the phi, the result will
354 change to be the remaining bytes for the corresponding phi argument.
355
356 Similarly for __builtin_constant_p:
357
358 r = PHI <1(2), 2(3)>
359 __builtin_constant_p (r)
360
361 Both PHI arguments are constant, but x ? 1 : 2 is still not
362 constant. */
363
364 if (is_gimple_call (stmt))
365 {
366 tree fndecl = gimple_call_fndecl (stmt);
367 if (fndecl
368 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
369 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
370 continue;
371 }
372
373 /* At this point we have a statement which assigns an RHS to an
374 SSA_VAR on the LHS. We want to try and simplify this statement
375 to expose more context sensitive equivalences which in turn may
376 allow us to simplify the condition at the end of the loop.
377
378 Handle simple copy operations as well as implied copies from
379 ASSERT_EXPRs. */
380 if (gimple_assign_single_p (stmt)
381 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
382 cached_lhs = gimple_assign_rhs1 (stmt);
383 else if (gimple_assign_single_p (stmt)
384 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
385 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
386 else
387 {
388 /* A statement that is not a trivial copy or ASSERT_EXPR.
389 We're going to temporarily copy propagate the operands
390 and see if that allows us to simplify this statement. */
391 tree *copy;
392 ssa_op_iter iter;
393 use_operand_p use_p;
394 unsigned int num, i = 0;
395
396 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
397 copy = XCNEWVEC (tree, num);
398
399 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
400 the operands. */
401 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
402 {
403 tree tmp = NULL;
404 tree use = USE_FROM_PTR (use_p);
405
406 copy[i++] = use;
407 if (TREE_CODE (use) == SSA_NAME)
408 tmp = SSA_NAME_VALUE (use);
409 if (tmp)
410 SET_USE (use_p, tmp);
411 }
412
413 /* Try to fold/lookup the new expression. Inserting the
414 expression into the hash table is unlikely to help. */
415 if (is_gimple_call (stmt))
416 cached_lhs = fold_call_stmt (stmt, false);
417 else
418 cached_lhs = fold_assignment_stmt (stmt);
419
420 if (!cached_lhs
421 || (TREE_CODE (cached_lhs) != SSA_NAME
422 && !is_gimple_min_invariant (cached_lhs)))
423 cached_lhs = (*simplify) (stmt, stmt);
424
425 /* Restore the statement's original uses/defs. */
426 i = 0;
427 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
428 SET_USE (use_p, copy[i++]);
429
430 free (copy);
431 }
432
433 /* Record the context sensitive equivalence if we were able
434 to simplify this statement. */
435 if (cached_lhs
436 && (TREE_CODE (cached_lhs) == SSA_NAME
437 || is_gimple_min_invariant (cached_lhs)))
438 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
439 }
440 return stmt;
441 }
442
443 /* Simplify the control statement at the end of the block E->dest.
444
445 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
446 is available to use/clobber in DUMMY_COND.
447
448 Use SIMPLIFY (a pointer to a callback function) to further simplify
449 a condition using pass specific information.
450
451 Return the simplified condition or NULL if simplification could
452 not be performed. */
453
454 static tree
455 simplify_control_stmt_condition (edge e,
456 gimple stmt,
457 gimple dummy_cond,
458 tree (*simplify) (gimple, gimple),
459 bool handle_dominating_asserts)
460 {
461 tree cond, cached_lhs;
462 enum gimple_code code = gimple_code (stmt);
463
464 /* For comparisons, we have to update both operands, then try
465 to simplify the comparison. */
466 if (code == GIMPLE_COND)
467 {
468 tree op0, op1;
469 enum tree_code cond_code;
470
471 op0 = gimple_cond_lhs (stmt);
472 op1 = gimple_cond_rhs (stmt);
473 cond_code = gimple_cond_code (stmt);
474
475 /* Get the current value of both operands. */
476 if (TREE_CODE (op0) == SSA_NAME)
477 {
478 tree tmp = SSA_NAME_VALUE (op0);
479 if (tmp)
480 op0 = tmp;
481 }
482
483 if (TREE_CODE (op1) == SSA_NAME)
484 {
485 tree tmp = SSA_NAME_VALUE (op1);
486 if (tmp)
487 op1 = tmp;
488 }
489
490 if (handle_dominating_asserts)
491 {
492 /* Now see if the operand was consumed by an ASSERT_EXPR
493 which dominates E->src. If so, we want to replace the
494 operand with the LHS of the ASSERT_EXPR. */
495 if (TREE_CODE (op0) == SSA_NAME)
496 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
497
498 if (TREE_CODE (op1) == SSA_NAME)
499 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
500 }
501
502 /* We may need to canonicalize the comparison. For
503 example, op0 might be a constant while op1 is an
504 SSA_NAME. Failure to canonicalize will cause us to
505 miss threading opportunities. */
506 if (tree_swap_operands_p (op0, op1, false))
507 {
508 tree tmp;
509 cond_code = swap_tree_comparison (cond_code);
510 tmp = op0;
511 op0 = op1;
512 op1 = tmp;
513 }
514
515 /* Stuff the operator and operands into our dummy conditional
516 expression. */
517 gimple_cond_set_code (dummy_cond, cond_code);
518 gimple_cond_set_lhs (dummy_cond, op0);
519 gimple_cond_set_rhs (dummy_cond, op1);
520
521 /* We absolutely do not care about any type conversions
522 we only care about a zero/nonzero value. */
523 fold_defer_overflow_warnings ();
524
525 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
526 if (cached_lhs)
527 while (CONVERT_EXPR_P (cached_lhs))
528 cached_lhs = TREE_OPERAND (cached_lhs, 0);
529
530 fold_undefer_overflow_warnings ((cached_lhs
531 && is_gimple_min_invariant (cached_lhs)),
532 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
533
534 /* If we have not simplified the condition down to an invariant,
535 then use the pass specific callback to simplify the condition. */
536 if (!cached_lhs
537 || !is_gimple_min_invariant (cached_lhs))
538 cached_lhs = (*simplify) (dummy_cond, stmt);
539
540 return cached_lhs;
541 }
542
543 if (code == GIMPLE_SWITCH)
544 cond = gimple_switch_index (stmt);
545 else if (code == GIMPLE_GOTO)
546 cond = gimple_goto_dest (stmt);
547 else
548 gcc_unreachable ();
549
550 /* We can have conditionals which just test the state of a variable
551 rather than use a relational operator. These are simpler to handle. */
552 if (TREE_CODE (cond) == SSA_NAME)
553 {
554 cached_lhs = cond;
555
556 /* Get the variable's current value from the equivalence chains.
557
558 It is possible to get loops in the SSA_NAME_VALUE chains
559 (consider threading the backedge of a loop where we have
560 a loop invariant SSA_NAME used in the condition. */
561 if (cached_lhs
562 && TREE_CODE (cached_lhs) == SSA_NAME
563 && SSA_NAME_VALUE (cached_lhs))
564 cached_lhs = SSA_NAME_VALUE (cached_lhs);
565
566 /* If we're dominated by a suitable ASSERT_EXPR, then
567 update CACHED_LHS appropriately. */
568 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
569 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
570
571 /* If we haven't simplified to an invariant yet, then use the
572 pass specific callback to try and simplify it further. */
573 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
574 cached_lhs = (*simplify) (stmt, stmt);
575 }
576 else
577 cached_lhs = NULL;
578
579 return cached_lhs;
580 }
581
582 /* Return TRUE if the statement at the end of e->dest depends on
583 the output of any statement in BB. Otherwise return FALSE.
584
585 This is used when we are threading a backedge and need to ensure
586 that temporary equivalences from BB do not affect the condition
587 in e->dest. */
588
589 static bool
590 cond_arg_set_in_bb (edge e, basic_block bb)
591 {
592 ssa_op_iter iter;
593 use_operand_p use_p;
594 gimple last = last_stmt (e->dest);
595
596 /* E->dest does not have to end with a control transferring
597 instruction. This can occur when we try to extend a jump
598 threading opportunity deeper into the CFG. In that case
599 it is safe for this check to return false. */
600 if (!last)
601 return false;
602
603 if (gimple_code (last) != GIMPLE_COND
604 && gimple_code (last) != GIMPLE_GOTO
605 && gimple_code (last) != GIMPLE_SWITCH)
606 return false;
607
608 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
609 {
610 tree use = USE_FROM_PTR (use_p);
611
612 if (TREE_CODE (use) == SSA_NAME
613 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
614 && gimple_bb (SSA_NAME_DEF_STMT (use)) == bb)
615 return true;
616 }
617 return false;
618 }
619
620 /* Copy debug stmts from DEST's chain of single predecessors up to
621 SRC, so that we don't lose the bindings as PHI nodes are introduced
622 when DEST gains new predecessors. */
623 void
624 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
625 {
626 if (!MAY_HAVE_DEBUG_STMTS)
627 return;
628
629 if (!single_pred_p (dest))
630 return;
631
632 gcc_checking_assert (dest != src);
633
634 gimple_stmt_iterator gsi = gsi_after_labels (dest);
635 int i = 0;
636 const int alloc_count = 16; // ?? Should this be a PARAM?
637
638 /* Estimate the number of debug vars overridden in the beginning of
639 DEST, to tell how many we're going to need to begin with. */
640 for (gimple_stmt_iterator si = gsi;
641 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
642 {
643 gimple stmt = gsi_stmt (si);
644 if (!is_gimple_debug (stmt))
645 break;
646 i++;
647 }
648
649 stack_vec<tree, alloc_count> fewvars;
650 pointer_set_t *vars = NULL;
651
652 /* If we're already starting with 3/4 of alloc_count, go for a
653 pointer_set, otherwise start with an unordered stack-allocated
654 VEC. */
655 if (i * 4 > alloc_count * 3)
656 vars = pointer_set_create ();
657
658 /* Now go through the initial debug stmts in DEST again, this time
659 actually inserting in VARS or FEWVARS. Don't bother checking for
660 duplicates in FEWVARS. */
661 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
662 {
663 gimple stmt = gsi_stmt (si);
664 if (!is_gimple_debug (stmt))
665 break;
666
667 tree var;
668
669 if (gimple_debug_bind_p (stmt))
670 var = gimple_debug_bind_get_var (stmt);
671 else if (gimple_debug_source_bind_p (stmt))
672 var = gimple_debug_source_bind_get_var (stmt);
673 else
674 gcc_unreachable ();
675
676 if (vars)
677 pointer_set_insert (vars, var);
678 else
679 fewvars.quick_push (var);
680 }
681
682 basic_block bb = dest;
683
684 do
685 {
686 bb = single_pred (bb);
687 for (gimple_stmt_iterator si = gsi_last_bb (bb);
688 !gsi_end_p (si); gsi_prev (&si))
689 {
690 gimple stmt = gsi_stmt (si);
691 if (!is_gimple_debug (stmt))
692 continue;
693
694 tree var;
695
696 if (gimple_debug_bind_p (stmt))
697 var = gimple_debug_bind_get_var (stmt);
698 else if (gimple_debug_source_bind_p (stmt))
699 var = gimple_debug_source_bind_get_var (stmt);
700 else
701 gcc_unreachable ();
702
703 /* Discard debug bind overlaps. ??? Unlike stmts from src,
704 copied into a new block that will precede BB, debug bind
705 stmts in bypassed BBs may actually be discarded if
706 they're overwritten by subsequent debug bind stmts, which
707 might be a problem once we introduce stmt frontier notes
708 or somesuch. Adding `&& bb == src' to the condition
709 below will preserve all potentially relevant debug
710 notes. */
711 if (vars && pointer_set_insert (vars, var))
712 continue;
713 else if (!vars)
714 {
715 int i = fewvars.length ();
716 while (i--)
717 if (fewvars[i] == var)
718 break;
719 if (i >= 0)
720 continue;
721
722 if (fewvars.length () < (unsigned) alloc_count)
723 fewvars.quick_push (var);
724 else
725 {
726 vars = pointer_set_create ();
727 for (i = 0; i < alloc_count; i++)
728 pointer_set_insert (vars, fewvars[i]);
729 fewvars.release ();
730 pointer_set_insert (vars, var);
731 }
732 }
733
734 stmt = gimple_copy (stmt);
735 /* ??? Should we drop the location of the copy to denote
736 they're artificial bindings? */
737 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
738 }
739 }
740 while (bb != src && single_pred_p (bb));
741
742 if (vars)
743 pointer_set_destroy (vars);
744 else if (fewvars.exists ())
745 fewvars.release ();
746 }
747
748 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
749 need not be duplicated as part of the CFG/SSA updating process).
750
751 If it is threadable, add it to PATH and VISITED and recurse, ultimately
752 returning TRUE from the toplevel call. Otherwise do nothing and
753 return false.
754
755 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
756 try and simplify the condition at the end of TAKEN_EDGE->dest. */
757 static bool
758 thread_around_empty_blocks (edge taken_edge,
759 gimple dummy_cond,
760 bool handle_dominating_asserts,
761 tree (*simplify) (gimple, gimple),
762 bitmap visited,
763 vec<jump_thread_edge *> *path,
764 bool *backedge_seen_p)
765 {
766 basic_block bb = taken_edge->dest;
767 gimple_stmt_iterator gsi;
768 gimple stmt;
769 tree cond;
770
771 /* The key property of these blocks is that they need not be duplicated
772 when threading. Thus they can not have visible side effects such
773 as PHI nodes. */
774 if (!gsi_end_p (gsi_start_phis (bb)))
775 return false;
776
777 /* Skip over DEBUG statements at the start of the block. */
778 gsi = gsi_start_nondebug_bb (bb);
779
780 /* If the block has no statements, but does have a single successor, then
781 it's just a forwarding block and we can thread through it trivially.
782
783 However, note that just threading through empty blocks with single
784 successors is not inherently profitable. For the jump thread to
785 be profitable, we must avoid a runtime conditional.
786
787 By taking the return value from the recursive call, we get the
788 desired effect of returning TRUE when we found a profitable jump
789 threading opportunity and FALSE otherwise.
790
791 This is particularly important when this routine is called after
792 processing a joiner block. Returning TRUE too aggressively in
793 that case results in pointless duplication of the joiner block. */
794 if (gsi_end_p (gsi))
795 {
796 if (single_succ_p (bb))
797 {
798 taken_edge = single_succ_edge (bb);
799 if (!bitmap_bit_p (visited, taken_edge->dest->index))
800 {
801 jump_thread_edge *x
802 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
803 path->safe_push (x);
804 bitmap_set_bit (visited, taken_edge->dest->index);
805 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
806 return thread_around_empty_blocks (taken_edge,
807 dummy_cond,
808 handle_dominating_asserts,
809 simplify,
810 visited,
811 path,
812 backedge_seen_p);
813 }
814 }
815
816 /* We have a block with no statements, but multiple successors? */
817 return false;
818 }
819
820 /* The only real statements this block can have are a control
821 flow altering statement. Anything else stops the thread. */
822 stmt = gsi_stmt (gsi);
823 if (gimple_code (stmt) != GIMPLE_COND
824 && gimple_code (stmt) != GIMPLE_GOTO
825 && gimple_code (stmt) != GIMPLE_SWITCH)
826 return false;
827
828 /* Extract and simplify the condition. */
829 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
830 simplify, handle_dominating_asserts);
831
832 /* If the condition can be statically computed and we have not already
833 visited the destination edge, then add the taken edge to our thread
834 path. */
835 if (cond && is_gimple_min_invariant (cond))
836 {
837 taken_edge = find_taken_edge (bb, cond);
838
839 if (bitmap_bit_p (visited, taken_edge->dest->index))
840 return false;
841 bitmap_set_bit (visited, taken_edge->dest->index);
842
843 jump_thread_edge *x
844 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
845 path->safe_push (x);
846 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
847
848 thread_around_empty_blocks (taken_edge,
849 dummy_cond,
850 handle_dominating_asserts,
851 simplify,
852 visited,
853 path,
854 backedge_seen_p);
855 return true;
856 }
857
858 return false;
859 }
860
861 /* We are exiting E->src, see if E->dest ends with a conditional
862 jump which has a known value when reached via E.
863
864 E->dest can have arbitrary side effects which, if threading is
865 successful, will be maintained.
866
867 Special care is necessary if E is a back edge in the CFG as we
868 may have already recorded equivalences for E->dest into our
869 various tables, including the result of the conditional at
870 the end of E->dest. Threading opportunities are severely
871 limited in that case to avoid short-circuiting the loop
872 incorrectly.
873
874 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
875 to avoid allocating memory.
876
877 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
878 the simplified condition with left-hand sides of ASSERT_EXPRs they are
879 used in.
880
881 STACK is used to undo temporary equivalences created during the walk of
882 E->dest.
883
884 SIMPLIFY is a pass-specific function used to simplify statements.
885
886 Our caller is responsible for restoring the state of the expression
887 and const_and_copies stacks. */
888
889 static bool
890 thread_through_normal_block (edge e,
891 gimple dummy_cond,
892 bool handle_dominating_asserts,
893 vec<tree> *stack,
894 tree (*simplify) (gimple, gimple),
895 vec<jump_thread_edge *> *path,
896 bitmap visited,
897 bool *backedge_seen_p)
898 {
899 /* If we have crossed a backedge, then we want to verify that the COND_EXPR,
900 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
901 by any statements in e->dest. If it is affected, then it is not
902 safe to thread this edge. */
903 if (*backedge_seen_p
904 && cond_arg_set_in_bb (e, e->dest))
905 return false;
906
907 /* PHIs create temporary equivalences. */
908 if (!record_temporary_equivalences_from_phis (e, stack))
909 return false;
910
911 /* Now walk each statement recording any context sensitive
912 temporary equivalences we can detect. */
913 gimple stmt
914 = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
915 if (!stmt)
916 return false;
917
918 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
919 will be taken. */
920 if (gimple_code (stmt) == GIMPLE_COND
921 || gimple_code (stmt) == GIMPLE_GOTO
922 || gimple_code (stmt) == GIMPLE_SWITCH)
923 {
924 tree cond;
925
926 /* Extract and simplify the condition. */
927 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
928 handle_dominating_asserts);
929
930 if (cond && is_gimple_min_invariant (cond))
931 {
932 edge taken_edge = find_taken_edge (e->dest, cond);
933 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
934
935 /* DEST could be NULL for a computed jump to an absolute
936 address. */
937 if (dest == NULL
938 || dest == e->dest
939 || bitmap_bit_p (visited, dest->index))
940 return false;
941
942 jump_thread_edge *x
943 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
944 path->safe_push (x);
945 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
946
947 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
948 path->safe_push (x);
949 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
950
951 /* See if we can thread through DEST as well, this helps capture
952 secondary effects of threading without having to re-run DOM or
953 VRP. */
954 if (!*backedge_seen_p
955 || ! cond_arg_set_in_bb (taken_edge, e->dest))
956 {
957 /* We don't want to thread back to a block we have already
958 visited. This may be overly conservative. */
959 bitmap_set_bit (visited, dest->index);
960 bitmap_set_bit (visited, e->dest->index);
961 thread_around_empty_blocks (taken_edge,
962 dummy_cond,
963 handle_dominating_asserts,
964 simplify,
965 visited,
966 path,
967 backedge_seen_p);
968 }
969 return true;
970 }
971 }
972 return false;
973 }
974
975 /* We are exiting E->src, see if E->dest ends with a conditional
976 jump which has a known value when reached via E.
977
978 Special care is necessary if E is a back edge in the CFG as we
979 may have already recorded equivalences for E->dest into our
980 various tables, including the result of the conditional at
981 the end of E->dest. Threading opportunities are severely
982 limited in that case to avoid short-circuiting the loop
983 incorrectly.
984
985 Note it is quite common for the first block inside a loop to
986 end with a conditional which is either always true or always
987 false when reached via the loop backedge. Thus we do not want
988 to blindly disable threading across a loop backedge.
989
990 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
991 to avoid allocating memory.
992
993 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
994 the simplified condition with left-hand sides of ASSERT_EXPRs they are
995 used in.
996
997 STACK is used to undo temporary equivalences created during the walk of
998 E->dest.
999
1000 SIMPLIFY is a pass-specific function used to simplify statements. */
1001
1002 void
1003 thread_across_edge (gimple dummy_cond,
1004 edge e,
1005 bool handle_dominating_asserts,
1006 vec<tree> *stack,
1007 tree (*simplify) (gimple, gimple))
1008 {
1009 bitmap visited = BITMAP_ALLOC (NULL);
1010 bool backedge_seen;
1011
1012 stmt_count = 0;
1013
1014 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1015 bitmap_clear (visited);
1016 bitmap_set_bit (visited, e->src->index);
1017 bitmap_set_bit (visited, e->dest->index);
1018 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1019 if (thread_through_normal_block (e, dummy_cond, handle_dominating_asserts,
1020 stack, simplify, path, visited,
1021 &backedge_seen))
1022 {
1023 propagate_threaded_block_debug_into (path->last ()->e->dest,
1024 e->dest);
1025 remove_temporary_equivalences (stack);
1026 BITMAP_FREE (visited);
1027 register_jump_thread (path);
1028 return;
1029 }
1030 else
1031 {
1032 /* There should be no edges on the path, so no need to walk through
1033 the vector entries. */
1034 gcc_assert (path->length () == 0);
1035 path->release ();
1036 }
1037
1038 /* We were unable to determine what out edge from E->dest is taken. However,
1039 we might still be able to thread through successors of E->dest. This
1040 often occurs when E->dest is a joiner block which then fans back out
1041 based on redundant tests.
1042
1043 If so, we'll copy E->dest and redirect the appropriate predecessor to
1044 the copy. Within the copy of E->dest, we'll thread one or more edges
1045 to points deeper in the CFG.
1046
1047 This is a stopgap until we have a more structured approach to path
1048 isolation. */
1049 {
1050 edge taken_edge;
1051 edge_iterator ei;
1052 bool found;
1053
1054 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1055 we can safely redirect any of the edges. Just punt those cases. */
1056 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1057 if (taken_edge->flags & EDGE_ABNORMAL)
1058 {
1059 remove_temporary_equivalences (stack);
1060 BITMAP_FREE (visited);
1061 return;
1062 }
1063
1064 /* Look at each successor of E->dest to see if we can thread through it. */
1065 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1066 {
1067 /* Avoid threading to any block we have already visited. */
1068 bitmap_clear (visited);
1069 bitmap_set_bit (visited, taken_edge->dest->index);
1070 bitmap_set_bit (visited, e->dest->index);
1071 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1072
1073 /* Record whether or not we were able to thread through a successor
1074 of E->dest. */
1075 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1076 path->safe_push (x);
1077
1078 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1079 path->safe_push (x);
1080 found = false;
1081 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1082 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1083 if (!backedge_seen
1084 || ! cond_arg_set_in_bb (path->last ()->e, e->dest))
1085 found = thread_around_empty_blocks (taken_edge,
1086 dummy_cond,
1087 handle_dominating_asserts,
1088 simplify,
1089 visited,
1090 path,
1091 &backedge_seen);
1092
1093 /* If we were able to thread through a successor of E->dest, then
1094 record the jump threading opportunity. */
1095 if (found)
1096 {
1097 propagate_threaded_block_debug_into (path->last ()->e->dest,
1098 taken_edge->dest);
1099 register_jump_thread (path);
1100 }
1101 else
1102 {
1103 delete_jump_thread_path (path);
1104 }
1105 }
1106 BITMAP_FREE (visited);
1107 }
1108
1109 remove_temporary_equivalences (stack);
1110 }