tree-ssa-threadedge.c (thread_through_normal_block): Only push the EDGE_START_JUMP_TH...
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "gimple.h"
34 #include "gimple-iterator.h"
35 #include "gimple-ssa.h"
36 #include "tree-cfg.h"
37 #include "tree-phinodes.h"
38 #include "ssa-iterators.h"
39 #include "tree-ssanames.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-ssa-threadupdate.h"
42 #include "langhooks.h"
43 #include "params.h"
44 #include "tree-ssa-threadedge.h"
45
46 /* To avoid code explosion due to jump threading, we limit the
47 number of statements we are going to copy. This variable
48 holds the number of statements currently seen that we'll have
49 to copy as part of the jump threading process. */
50 static int stmt_count;
51
52 /* Array to record value-handles per SSA_NAME. */
53 vec<tree> ssa_name_values;
54
55 /* Set the value for the SSA name NAME to VALUE. */
56
57 void
58 set_ssa_name_value (tree name, tree value)
59 {
60 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
61 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
62 if (value && TREE_OVERFLOW_P (value))
63 value = drop_tree_overflow (value);
64 ssa_name_values[SSA_NAME_VERSION (name)] = value;
65 }
66
67 /* Initialize the per SSA_NAME value-handles array. Returns it. */
68 void
69 threadedge_initialize_values (void)
70 {
71 gcc_assert (!ssa_name_values.exists ());
72 ssa_name_values.create (num_ssa_names);
73 }
74
75 /* Free the per SSA_NAME value-handle array. */
76 void
77 threadedge_finalize_values (void)
78 {
79 ssa_name_values.release ();
80 }
81
82 /* Return TRUE if we may be able to thread an incoming edge into
83 BB to an outgoing edge from BB. Return FALSE otherwise. */
84
85 bool
86 potentially_threadable_block (basic_block bb)
87 {
88 gimple_stmt_iterator gsi;
89
90 /* If BB has a single successor or a single predecessor, then
91 there is no threading opportunity. */
92 if (single_succ_p (bb) || single_pred_p (bb))
93 return false;
94
95 /* If BB does not end with a conditional, switch or computed goto,
96 then there is no threading opportunity. */
97 gsi = gsi_last_bb (bb);
98 if (gsi_end_p (gsi)
99 || ! gsi_stmt (gsi)
100 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
101 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
102 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
103 return false;
104
105 return true;
106 }
107
108 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
109 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
110 BB. If no such ASSERT_EXPR is found, return OP. */
111
112 static tree
113 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
114 {
115 imm_use_iterator imm_iter;
116 gimple use_stmt;
117 use_operand_p use_p;
118
119 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
120 {
121 use_stmt = USE_STMT (use_p);
122 if (use_stmt != stmt
123 && gimple_assign_single_p (use_stmt)
124 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
125 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
126 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
127 {
128 return gimple_assign_lhs (use_stmt);
129 }
130 }
131 return op;
132 }
133
134 /* We record temporary equivalences created by PHI nodes or
135 statements within the target block. Doing so allows us to
136 identify more jump threading opportunities, even in blocks
137 with side effects.
138
139 We keep track of those temporary equivalences in a stack
140 structure so that we can unwind them when we're done processing
141 a particular edge. This routine handles unwinding the data
142 structures. */
143
144 static void
145 remove_temporary_equivalences (vec<tree> *stack)
146 {
147 while (stack->length () > 0)
148 {
149 tree prev_value, dest;
150
151 dest = stack->pop ();
152
153 /* A NULL value indicates we should stop unwinding, otherwise
154 pop off the next entry as they're recorded in pairs. */
155 if (dest == NULL)
156 break;
157
158 prev_value = stack->pop ();
159 set_ssa_name_value (dest, prev_value);
160 }
161 }
162
163 /* Record a temporary equivalence, saving enough information so that
164 we can restore the state of recorded equivalences when we're
165 done processing the current edge. */
166
167 static void
168 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
169 {
170 tree prev_x = SSA_NAME_VALUE (x);
171
172 if (TREE_CODE (y) == SSA_NAME)
173 {
174 tree tmp = SSA_NAME_VALUE (y);
175 y = tmp ? tmp : y;
176 }
177
178 set_ssa_name_value (x, y);
179 stack->reserve (2);
180 stack->quick_push (prev_x);
181 stack->quick_push (x);
182 }
183
184 /* Record temporary equivalences created by PHIs at the target of the
185 edge E. Record unwind information for the equivalences onto STACK.
186
187 If a PHI which prevents threading is encountered, then return FALSE
188 indicating we should not thread this edge, else return TRUE. */
189
190 static bool
191 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
192 {
193 gimple_stmt_iterator gsi;
194
195 /* Each PHI creates a temporary equivalence, record them.
196 These are context sensitive equivalences and will be removed
197 later. */
198 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
199 {
200 gimple phi = gsi_stmt (gsi);
201 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
202 tree dst = gimple_phi_result (phi);
203
204 /* If the desired argument is not the same as this PHI's result
205 and it is set by a PHI in E->dest, then we can not thread
206 through E->dest. */
207 if (src != dst
208 && TREE_CODE (src) == SSA_NAME
209 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
210 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
211 return false;
212
213 /* We consider any non-virtual PHI as a statement since it
214 count result in a constant assignment or copy operation. */
215 if (!virtual_operand_p (dst))
216 stmt_count++;
217
218 record_temporary_equivalence (dst, src, stack);
219 }
220 return true;
221 }
222
223 /* Fold the RHS of an assignment statement and return it as a tree.
224 May return NULL_TREE if no simplification is possible. */
225
226 static tree
227 fold_assignment_stmt (gimple stmt)
228 {
229 enum tree_code subcode = gimple_assign_rhs_code (stmt);
230
231 switch (get_gimple_rhs_class (subcode))
232 {
233 case GIMPLE_SINGLE_RHS:
234 return fold (gimple_assign_rhs1 (stmt));
235
236 case GIMPLE_UNARY_RHS:
237 {
238 tree lhs = gimple_assign_lhs (stmt);
239 tree op0 = gimple_assign_rhs1 (stmt);
240 return fold_unary (subcode, TREE_TYPE (lhs), op0);
241 }
242
243 case GIMPLE_BINARY_RHS:
244 {
245 tree lhs = gimple_assign_lhs (stmt);
246 tree op0 = gimple_assign_rhs1 (stmt);
247 tree op1 = gimple_assign_rhs2 (stmt);
248 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
249 }
250
251 case GIMPLE_TERNARY_RHS:
252 {
253 tree lhs = gimple_assign_lhs (stmt);
254 tree op0 = gimple_assign_rhs1 (stmt);
255 tree op1 = gimple_assign_rhs2 (stmt);
256 tree op2 = gimple_assign_rhs3 (stmt);
257
258 /* Sadly, we have to handle conditional assignments specially
259 here, because fold expects all the operands of an expression
260 to be folded before the expression itself is folded, but we
261 can't just substitute the folded condition here. */
262 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
263 op0 = fold (op0);
264
265 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
266 }
267
268 default:
269 gcc_unreachable ();
270 }
271 }
272
273 /* Try to simplify each statement in E->dest, ultimately leading to
274 a simplification of the COND_EXPR at the end of E->dest.
275
276 Record unwind information for temporary equivalences onto STACK.
277
278 Use SIMPLIFY (a pointer to a callback function) to further simplify
279 statements using pass specific information.
280
281 We might consider marking just those statements which ultimately
282 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
283 would be recovered by trying to simplify fewer statements.
284
285 If we are able to simplify a statement into the form
286 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
287 a context sensitive equivalence which may help us simplify
288 later statements in E->dest. */
289
290 static gimple
291 record_temporary_equivalences_from_stmts_at_dest (edge e,
292 vec<tree> *stack,
293 tree (*simplify) (gimple,
294 gimple))
295 {
296 gimple stmt = NULL;
297 gimple_stmt_iterator gsi;
298 int max_stmt_count;
299
300 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
301
302 /* Walk through each statement in the block recording equivalences
303 we discover. Note any equivalences we discover are context
304 sensitive (ie, are dependent on traversing E) and must be unwound
305 when we're finished processing E. */
306 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
307 {
308 tree cached_lhs = NULL;
309
310 stmt = gsi_stmt (gsi);
311
312 /* Ignore empty statements and labels. */
313 if (gimple_code (stmt) == GIMPLE_NOP
314 || gimple_code (stmt) == GIMPLE_LABEL
315 || is_gimple_debug (stmt))
316 continue;
317
318 /* If the statement has volatile operands, then we assume we
319 can not thread through this block. This is overly
320 conservative in some ways. */
321 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
322 return NULL;
323
324 /* If duplicating this block is going to cause too much code
325 expansion, then do not thread through this block. */
326 stmt_count++;
327 if (stmt_count > max_stmt_count)
328 return NULL;
329
330 /* If this is not a statement that sets an SSA_NAME to a new
331 value, then do not try to simplify this statement as it will
332 not simplify in any way that is helpful for jump threading. */
333 if ((gimple_code (stmt) != GIMPLE_ASSIGN
334 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
335 && (gimple_code (stmt) != GIMPLE_CALL
336 || gimple_call_lhs (stmt) == NULL_TREE
337 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
338 continue;
339
340 /* The result of __builtin_object_size depends on all the arguments
341 of a phi node. Temporarily using only one edge produces invalid
342 results. For example
343
344 if (x < 6)
345 goto l;
346 else
347 goto l;
348
349 l:
350 r = PHI <&w[2].a[1](2), &a.a[6](3)>
351 __builtin_object_size (r, 0)
352
353 The result of __builtin_object_size is defined to be the maximum of
354 remaining bytes. If we use only one edge on the phi, the result will
355 change to be the remaining bytes for the corresponding phi argument.
356
357 Similarly for __builtin_constant_p:
358
359 r = PHI <1(2), 2(3)>
360 __builtin_constant_p (r)
361
362 Both PHI arguments are constant, but x ? 1 : 2 is still not
363 constant. */
364
365 if (is_gimple_call (stmt))
366 {
367 tree fndecl = gimple_call_fndecl (stmt);
368 if (fndecl
369 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
370 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
371 continue;
372 }
373
374 /* At this point we have a statement which assigns an RHS to an
375 SSA_VAR on the LHS. We want to try and simplify this statement
376 to expose more context sensitive equivalences which in turn may
377 allow us to simplify the condition at the end of the loop.
378
379 Handle simple copy operations as well as implied copies from
380 ASSERT_EXPRs. */
381 if (gimple_assign_single_p (stmt)
382 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
383 cached_lhs = gimple_assign_rhs1 (stmt);
384 else if (gimple_assign_single_p (stmt)
385 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
386 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
387 else
388 {
389 /* A statement that is not a trivial copy or ASSERT_EXPR.
390 We're going to temporarily copy propagate the operands
391 and see if that allows us to simplify this statement. */
392 tree *copy;
393 ssa_op_iter iter;
394 use_operand_p use_p;
395 unsigned int num, i = 0;
396
397 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
398 copy = XCNEWVEC (tree, num);
399
400 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
401 the operands. */
402 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
403 {
404 tree tmp = NULL;
405 tree use = USE_FROM_PTR (use_p);
406
407 copy[i++] = use;
408 if (TREE_CODE (use) == SSA_NAME)
409 tmp = SSA_NAME_VALUE (use);
410 if (tmp)
411 SET_USE (use_p, tmp);
412 }
413
414 /* Try to fold/lookup the new expression. Inserting the
415 expression into the hash table is unlikely to help. */
416 if (is_gimple_call (stmt))
417 cached_lhs = fold_call_stmt (stmt, false);
418 else
419 cached_lhs = fold_assignment_stmt (stmt);
420
421 if (!cached_lhs
422 || (TREE_CODE (cached_lhs) != SSA_NAME
423 && !is_gimple_min_invariant (cached_lhs)))
424 cached_lhs = (*simplify) (stmt, stmt);
425
426 /* Restore the statement's original uses/defs. */
427 i = 0;
428 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
429 SET_USE (use_p, copy[i++]);
430
431 free (copy);
432 }
433
434 /* Record the context sensitive equivalence if we were able
435 to simplify this statement. */
436 if (cached_lhs
437 && (TREE_CODE (cached_lhs) == SSA_NAME
438 || is_gimple_min_invariant (cached_lhs)))
439 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
440 }
441 return stmt;
442 }
443
444 /* Simplify the control statement at the end of the block E->dest.
445
446 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
447 is available to use/clobber in DUMMY_COND.
448
449 Use SIMPLIFY (a pointer to a callback function) to further simplify
450 a condition using pass specific information.
451
452 Return the simplified condition or NULL if simplification could
453 not be performed. */
454
455 static tree
456 simplify_control_stmt_condition (edge e,
457 gimple stmt,
458 gimple dummy_cond,
459 tree (*simplify) (gimple, gimple),
460 bool handle_dominating_asserts)
461 {
462 tree cond, cached_lhs;
463 enum gimple_code code = gimple_code (stmt);
464
465 /* For comparisons, we have to update both operands, then try
466 to simplify the comparison. */
467 if (code == GIMPLE_COND)
468 {
469 tree op0, op1;
470 enum tree_code cond_code;
471
472 op0 = gimple_cond_lhs (stmt);
473 op1 = gimple_cond_rhs (stmt);
474 cond_code = gimple_cond_code (stmt);
475
476 /* Get the current value of both operands. */
477 if (TREE_CODE (op0) == SSA_NAME)
478 {
479 tree tmp = SSA_NAME_VALUE (op0);
480 if (tmp)
481 op0 = tmp;
482 }
483
484 if (TREE_CODE (op1) == SSA_NAME)
485 {
486 tree tmp = SSA_NAME_VALUE (op1);
487 if (tmp)
488 op1 = tmp;
489 }
490
491 if (handle_dominating_asserts)
492 {
493 /* Now see if the operand was consumed by an ASSERT_EXPR
494 which dominates E->src. If so, we want to replace the
495 operand with the LHS of the ASSERT_EXPR. */
496 if (TREE_CODE (op0) == SSA_NAME)
497 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
498
499 if (TREE_CODE (op1) == SSA_NAME)
500 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
501 }
502
503 /* We may need to canonicalize the comparison. For
504 example, op0 might be a constant while op1 is an
505 SSA_NAME. Failure to canonicalize will cause us to
506 miss threading opportunities. */
507 if (tree_swap_operands_p (op0, op1, false))
508 {
509 tree tmp;
510 cond_code = swap_tree_comparison (cond_code);
511 tmp = op0;
512 op0 = op1;
513 op1 = tmp;
514 }
515
516 /* Stuff the operator and operands into our dummy conditional
517 expression. */
518 gimple_cond_set_code (dummy_cond, cond_code);
519 gimple_cond_set_lhs (dummy_cond, op0);
520 gimple_cond_set_rhs (dummy_cond, op1);
521
522 /* We absolutely do not care about any type conversions
523 we only care about a zero/nonzero value. */
524 fold_defer_overflow_warnings ();
525
526 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
527 if (cached_lhs)
528 while (CONVERT_EXPR_P (cached_lhs))
529 cached_lhs = TREE_OPERAND (cached_lhs, 0);
530
531 fold_undefer_overflow_warnings ((cached_lhs
532 && is_gimple_min_invariant (cached_lhs)),
533 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
534
535 /* If we have not simplified the condition down to an invariant,
536 then use the pass specific callback to simplify the condition. */
537 if (!cached_lhs
538 || !is_gimple_min_invariant (cached_lhs))
539 cached_lhs = (*simplify) (dummy_cond, stmt);
540
541 return cached_lhs;
542 }
543
544 if (code == GIMPLE_SWITCH)
545 cond = gimple_switch_index (stmt);
546 else if (code == GIMPLE_GOTO)
547 cond = gimple_goto_dest (stmt);
548 else
549 gcc_unreachable ();
550
551 /* We can have conditionals which just test the state of a variable
552 rather than use a relational operator. These are simpler to handle. */
553 if (TREE_CODE (cond) == SSA_NAME)
554 {
555 cached_lhs = cond;
556
557 /* Get the variable's current value from the equivalence chains.
558
559 It is possible to get loops in the SSA_NAME_VALUE chains
560 (consider threading the backedge of a loop where we have
561 a loop invariant SSA_NAME used in the condition. */
562 if (cached_lhs
563 && TREE_CODE (cached_lhs) == SSA_NAME
564 && SSA_NAME_VALUE (cached_lhs))
565 cached_lhs = SSA_NAME_VALUE (cached_lhs);
566
567 /* If we're dominated by a suitable ASSERT_EXPR, then
568 update CACHED_LHS appropriately. */
569 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
570 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
571
572 /* If we haven't simplified to an invariant yet, then use the
573 pass specific callback to try and simplify it further. */
574 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
575 cached_lhs = (*simplify) (stmt, stmt);
576 }
577 else
578 cached_lhs = NULL;
579
580 return cached_lhs;
581 }
582
583 /* Return TRUE if the statement at the end of e->dest depends on
584 the output of any statement in BB. Otherwise return FALSE.
585
586 This is used when we are threading a backedge and need to ensure
587 that temporary equivalences from BB do not affect the condition
588 in e->dest. */
589
590 static bool
591 cond_arg_set_in_bb (edge e, basic_block bb)
592 {
593 ssa_op_iter iter;
594 use_operand_p use_p;
595 gimple last = last_stmt (e->dest);
596
597 /* E->dest does not have to end with a control transferring
598 instruction. This can occur when we try to extend a jump
599 threading opportunity deeper into the CFG. In that case
600 it is safe for this check to return false. */
601 if (!last)
602 return false;
603
604 if (gimple_code (last) != GIMPLE_COND
605 && gimple_code (last) != GIMPLE_GOTO
606 && gimple_code (last) != GIMPLE_SWITCH)
607 return false;
608
609 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
610 {
611 tree use = USE_FROM_PTR (use_p);
612
613 if (TREE_CODE (use) == SSA_NAME
614 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
615 && gimple_bb (SSA_NAME_DEF_STMT (use)) == bb)
616 return true;
617 }
618 return false;
619 }
620
621 /* Copy debug stmts from DEST's chain of single predecessors up to
622 SRC, so that we don't lose the bindings as PHI nodes are introduced
623 when DEST gains new predecessors. */
624 void
625 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
626 {
627 if (!MAY_HAVE_DEBUG_STMTS)
628 return;
629
630 if (!single_pred_p (dest))
631 return;
632
633 gcc_checking_assert (dest != src);
634
635 gimple_stmt_iterator gsi = gsi_after_labels (dest);
636 int i = 0;
637 const int alloc_count = 16; // ?? Should this be a PARAM?
638
639 /* Estimate the number of debug vars overridden in the beginning of
640 DEST, to tell how many we're going to need to begin with. */
641 for (gimple_stmt_iterator si = gsi;
642 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
643 {
644 gimple stmt = gsi_stmt (si);
645 if (!is_gimple_debug (stmt))
646 break;
647 i++;
648 }
649
650 stack_vec<tree, alloc_count> fewvars;
651 pointer_set_t *vars = NULL;
652
653 /* If we're already starting with 3/4 of alloc_count, go for a
654 pointer_set, otherwise start with an unordered stack-allocated
655 VEC. */
656 if (i * 4 > alloc_count * 3)
657 vars = pointer_set_create ();
658
659 /* Now go through the initial debug stmts in DEST again, this time
660 actually inserting in VARS or FEWVARS. Don't bother checking for
661 duplicates in FEWVARS. */
662 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
663 {
664 gimple stmt = gsi_stmt (si);
665 if (!is_gimple_debug (stmt))
666 break;
667
668 tree var;
669
670 if (gimple_debug_bind_p (stmt))
671 var = gimple_debug_bind_get_var (stmt);
672 else if (gimple_debug_source_bind_p (stmt))
673 var = gimple_debug_source_bind_get_var (stmt);
674 else
675 gcc_unreachable ();
676
677 if (vars)
678 pointer_set_insert (vars, var);
679 else
680 fewvars.quick_push (var);
681 }
682
683 basic_block bb = dest;
684
685 do
686 {
687 bb = single_pred (bb);
688 for (gimple_stmt_iterator si = gsi_last_bb (bb);
689 !gsi_end_p (si); gsi_prev (&si))
690 {
691 gimple stmt = gsi_stmt (si);
692 if (!is_gimple_debug (stmt))
693 continue;
694
695 tree var;
696
697 if (gimple_debug_bind_p (stmt))
698 var = gimple_debug_bind_get_var (stmt);
699 else if (gimple_debug_source_bind_p (stmt))
700 var = gimple_debug_source_bind_get_var (stmt);
701 else
702 gcc_unreachable ();
703
704 /* Discard debug bind overlaps. ??? Unlike stmts from src,
705 copied into a new block that will precede BB, debug bind
706 stmts in bypassed BBs may actually be discarded if
707 they're overwritten by subsequent debug bind stmts, which
708 might be a problem once we introduce stmt frontier notes
709 or somesuch. Adding `&& bb == src' to the condition
710 below will preserve all potentially relevant debug
711 notes. */
712 if (vars && pointer_set_insert (vars, var))
713 continue;
714 else if (!vars)
715 {
716 int i = fewvars.length ();
717 while (i--)
718 if (fewvars[i] == var)
719 break;
720 if (i >= 0)
721 continue;
722
723 if (fewvars.length () < (unsigned) alloc_count)
724 fewvars.quick_push (var);
725 else
726 {
727 vars = pointer_set_create ();
728 for (i = 0; i < alloc_count; i++)
729 pointer_set_insert (vars, fewvars[i]);
730 fewvars.release ();
731 pointer_set_insert (vars, var);
732 }
733 }
734
735 stmt = gimple_copy (stmt);
736 /* ??? Should we drop the location of the copy to denote
737 they're artificial bindings? */
738 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
739 }
740 }
741 while (bb != src && single_pred_p (bb));
742
743 if (vars)
744 pointer_set_destroy (vars);
745 else if (fewvars.exists ())
746 fewvars.release ();
747 }
748
749 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
750 need not be duplicated as part of the CFG/SSA updating process).
751
752 If it is threadable, add it to PATH and VISITED and recurse, ultimately
753 returning TRUE from the toplevel call. Otherwise do nothing and
754 return false.
755
756 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
757 try and simplify the condition at the end of TAKEN_EDGE->dest. */
758 static bool
759 thread_around_empty_blocks (edge taken_edge,
760 gimple dummy_cond,
761 bool handle_dominating_asserts,
762 tree (*simplify) (gimple, gimple),
763 bitmap visited,
764 vec<jump_thread_edge *> *path,
765 bool *backedge_seen_p)
766 {
767 basic_block bb = taken_edge->dest;
768 gimple_stmt_iterator gsi;
769 gimple stmt;
770 tree cond;
771
772 /* The key property of these blocks is that they need not be duplicated
773 when threading. Thus they can not have visible side effects such
774 as PHI nodes. */
775 if (!gsi_end_p (gsi_start_phis (bb)))
776 return false;
777
778 /* Skip over DEBUG statements at the start of the block. */
779 gsi = gsi_start_nondebug_bb (bb);
780
781 /* If the block has no statements, but does have a single successor, then
782 it's just a forwarding block and we can thread through it trivially.
783
784 However, note that just threading through empty blocks with single
785 successors is not inherently profitable. For the jump thread to
786 be profitable, we must avoid a runtime conditional.
787
788 By taking the return value from the recursive call, we get the
789 desired effect of returning TRUE when we found a profitable jump
790 threading opportunity and FALSE otherwise.
791
792 This is particularly important when this routine is called after
793 processing a joiner block. Returning TRUE too aggressively in
794 that case results in pointless duplication of the joiner block. */
795 if (gsi_end_p (gsi))
796 {
797 if (single_succ_p (bb))
798 {
799 taken_edge = single_succ_edge (bb);
800 if (!bitmap_bit_p (visited, taken_edge->dest->index))
801 {
802 jump_thread_edge *x
803 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
804 path->safe_push (x);
805 bitmap_set_bit (visited, taken_edge->dest->index);
806 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
807 return thread_around_empty_blocks (taken_edge,
808 dummy_cond,
809 handle_dominating_asserts,
810 simplify,
811 visited,
812 path,
813 backedge_seen_p);
814 }
815 }
816
817 /* We have a block with no statements, but multiple successors? */
818 return false;
819 }
820
821 /* The only real statements this block can have are a control
822 flow altering statement. Anything else stops the thread. */
823 stmt = gsi_stmt (gsi);
824 if (gimple_code (stmt) != GIMPLE_COND
825 && gimple_code (stmt) != GIMPLE_GOTO
826 && gimple_code (stmt) != GIMPLE_SWITCH)
827 return false;
828
829 /* Extract and simplify the condition. */
830 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
831 simplify, handle_dominating_asserts);
832
833 /* If the condition can be statically computed and we have not already
834 visited the destination edge, then add the taken edge to our thread
835 path. */
836 if (cond && is_gimple_min_invariant (cond))
837 {
838 taken_edge = find_taken_edge (bb, cond);
839
840 if (bitmap_bit_p (visited, taken_edge->dest->index))
841 return false;
842 bitmap_set_bit (visited, taken_edge->dest->index);
843
844 jump_thread_edge *x
845 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
846 path->safe_push (x);
847 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
848
849 thread_around_empty_blocks (taken_edge,
850 dummy_cond,
851 handle_dominating_asserts,
852 simplify,
853 visited,
854 path,
855 backedge_seen_p);
856 return true;
857 }
858
859 return false;
860 }
861
862 /* We are exiting E->src, see if E->dest ends with a conditional
863 jump which has a known value when reached via E.
864
865 E->dest can have arbitrary side effects which, if threading is
866 successful, will be maintained.
867
868 Special care is necessary if E is a back edge in the CFG as we
869 may have already recorded equivalences for E->dest into our
870 various tables, including the result of the conditional at
871 the end of E->dest. Threading opportunities are severely
872 limited in that case to avoid short-circuiting the loop
873 incorrectly.
874
875 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
876 to avoid allocating memory.
877
878 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
879 the simplified condition with left-hand sides of ASSERT_EXPRs they are
880 used in.
881
882 STACK is used to undo temporary equivalences created during the walk of
883 E->dest.
884
885 SIMPLIFY is a pass-specific function used to simplify statements.
886
887 Our caller is responsible for restoring the state of the expression
888 and const_and_copies stacks. */
889
890 static bool
891 thread_through_normal_block (edge e,
892 gimple dummy_cond,
893 bool handle_dominating_asserts,
894 vec<tree> *stack,
895 tree (*simplify) (gimple, gimple),
896 vec<jump_thread_edge *> *path,
897 bitmap visited,
898 bool *backedge_seen_p)
899 {
900 /* If we have crossed a backedge, then we want to verify that the COND_EXPR,
901 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
902 by any statements in e->dest. If it is affected, then it is not
903 safe to thread this edge. */
904 if (*backedge_seen_p
905 && cond_arg_set_in_bb (e, e->dest))
906 return false;
907
908 /* PHIs create temporary equivalences. */
909 if (!record_temporary_equivalences_from_phis (e, stack))
910 return false;
911
912 /* Now walk each statement recording any context sensitive
913 temporary equivalences we can detect. */
914 gimple stmt
915 = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
916 if (!stmt)
917 return false;
918
919 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
920 will be taken. */
921 if (gimple_code (stmt) == GIMPLE_COND
922 || gimple_code (stmt) == GIMPLE_GOTO
923 || gimple_code (stmt) == GIMPLE_SWITCH)
924 {
925 tree cond;
926
927 /* Extract and simplify the condition. */
928 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
929 handle_dominating_asserts);
930
931 if (cond && is_gimple_min_invariant (cond))
932 {
933 edge taken_edge = find_taken_edge (e->dest, cond);
934 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
935
936 /* DEST could be NULL for a computed jump to an absolute
937 address. */
938 if (dest == NULL
939 || dest == e->dest
940 || bitmap_bit_p (visited, dest->index))
941 return false;
942
943 /* Only push the EDGE_START_JUMP_THREAD marker if this is
944 first edge on the path. */
945 if (path->length () == 0)
946 {
947 jump_thread_edge *x
948 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
949 path->safe_push (x);
950 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
951 }
952
953 jump_thread_edge *x
954 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
955 path->safe_push (x);
956 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
957
958 /* See if we can thread through DEST as well, this helps capture
959 secondary effects of threading without having to re-run DOM or
960 VRP. */
961 if (!*backedge_seen_p
962 || ! cond_arg_set_in_bb (taken_edge, e->dest))
963 {
964 /* We don't want to thread back to a block we have already
965 visited. This may be overly conservative. */
966 bitmap_set_bit (visited, dest->index);
967 bitmap_set_bit (visited, e->dest->index);
968 thread_around_empty_blocks (taken_edge,
969 dummy_cond,
970 handle_dominating_asserts,
971 simplify,
972 visited,
973 path,
974 backedge_seen_p);
975 }
976 return true;
977 }
978 }
979 return false;
980 }
981
982 /* We are exiting E->src, see if E->dest ends with a conditional
983 jump which has a known value when reached via E.
984
985 Special care is necessary if E is a back edge in the CFG as we
986 may have already recorded equivalences for E->dest into our
987 various tables, including the result of the conditional at
988 the end of E->dest. Threading opportunities are severely
989 limited in that case to avoid short-circuiting the loop
990 incorrectly.
991
992 Note it is quite common for the first block inside a loop to
993 end with a conditional which is either always true or always
994 false when reached via the loop backedge. Thus we do not want
995 to blindly disable threading across a loop backedge.
996
997 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
998 to avoid allocating memory.
999
1000 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1001 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1002 used in.
1003
1004 STACK is used to undo temporary equivalences created during the walk of
1005 E->dest.
1006
1007 SIMPLIFY is a pass-specific function used to simplify statements. */
1008
1009 void
1010 thread_across_edge (gimple dummy_cond,
1011 edge e,
1012 bool handle_dominating_asserts,
1013 vec<tree> *stack,
1014 tree (*simplify) (gimple, gimple))
1015 {
1016 bitmap visited = BITMAP_ALLOC (NULL);
1017 bool backedge_seen;
1018
1019 stmt_count = 0;
1020
1021 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1022 bitmap_clear (visited);
1023 bitmap_set_bit (visited, e->src->index);
1024 bitmap_set_bit (visited, e->dest->index);
1025 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1026 if (thread_through_normal_block (e, dummy_cond, handle_dominating_asserts,
1027 stack, simplify, path, visited,
1028 &backedge_seen))
1029 {
1030 propagate_threaded_block_debug_into (path->last ()->e->dest,
1031 e->dest);
1032 remove_temporary_equivalences (stack);
1033 BITMAP_FREE (visited);
1034 register_jump_thread (path);
1035 return;
1036 }
1037 else
1038 {
1039 /* There should be no edges on the path, so no need to walk through
1040 the vector entries. */
1041 gcc_assert (path->length () == 0);
1042 path->release ();
1043 }
1044
1045 /* We were unable to determine what out edge from E->dest is taken. However,
1046 we might still be able to thread through successors of E->dest. This
1047 often occurs when E->dest is a joiner block which then fans back out
1048 based on redundant tests.
1049
1050 If so, we'll copy E->dest and redirect the appropriate predecessor to
1051 the copy. Within the copy of E->dest, we'll thread one or more edges
1052 to points deeper in the CFG.
1053
1054 This is a stopgap until we have a more structured approach to path
1055 isolation. */
1056 {
1057 edge taken_edge;
1058 edge_iterator ei;
1059 bool found;
1060
1061 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1062 we can safely redirect any of the edges. Just punt those cases. */
1063 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1064 if (taken_edge->flags & EDGE_ABNORMAL)
1065 {
1066 remove_temporary_equivalences (stack);
1067 BITMAP_FREE (visited);
1068 return;
1069 }
1070
1071 /* Look at each successor of E->dest to see if we can thread through it. */
1072 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1073 {
1074 /* Avoid threading to any block we have already visited. */
1075 bitmap_clear (visited);
1076 bitmap_set_bit (visited, taken_edge->dest->index);
1077 bitmap_set_bit (visited, e->dest->index);
1078 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1079
1080 /* Record whether or not we were able to thread through a successor
1081 of E->dest. */
1082 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1083 path->safe_push (x);
1084
1085 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1086 path->safe_push (x);
1087 found = false;
1088 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1089 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1090 if (!backedge_seen
1091 || ! cond_arg_set_in_bb (path->last ()->e, e->dest))
1092 found = thread_around_empty_blocks (taken_edge,
1093 dummy_cond,
1094 handle_dominating_asserts,
1095 simplify,
1096 visited,
1097 path,
1098 &backedge_seen);
1099
1100 /* If we were able to thread through a successor of E->dest, then
1101 record the jump threading opportunity. */
1102 if (found)
1103 {
1104 propagate_threaded_block_debug_into (path->last ()->e->dest,
1105 taken_edge->dest);
1106 register_jump_thread (path);
1107 }
1108 else
1109 {
1110 delete_jump_thread_path (path);
1111 }
1112 }
1113 BITMAP_FREE (visited);
1114 }
1115
1116 remove_temporary_equivalences (stack);
1117 }