tree-ssa-threadedge.c (thread_across_edge): Make path a pointer to a vec.
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "tree-ssa.h"
34 #include "tree-ssa-propagate.h"
35 #include "tree-ssa-threadupdate.h"
36 #include "langhooks.h"
37 #include "params.h"
38
39 /* To avoid code explosion due to jump threading, we limit the
40 number of statements we are going to copy. This variable
41 holds the number of statements currently seen that we'll have
42 to copy as part of the jump threading process. */
43 static int stmt_count;
44
45 /* Array to record value-handles per SSA_NAME. */
46 vec<tree> ssa_name_values;
47
48 /* Set the value for the SSA name NAME to VALUE. */
49
50 void
51 set_ssa_name_value (tree name, tree value)
52 {
53 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
54 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
55 ssa_name_values[SSA_NAME_VERSION (name)] = value;
56 }
57
58 /* Initialize the per SSA_NAME value-handles array. Returns it. */
59 void
60 threadedge_initialize_values (void)
61 {
62 gcc_assert (!ssa_name_values.exists ());
63 ssa_name_values.create (num_ssa_names);
64 }
65
66 /* Free the per SSA_NAME value-handle array. */
67 void
68 threadedge_finalize_values (void)
69 {
70 ssa_name_values.release ();
71 }
72
73 /* Return TRUE if we may be able to thread an incoming edge into
74 BB to an outgoing edge from BB. Return FALSE otherwise. */
75
76 bool
77 potentially_threadable_block (basic_block bb)
78 {
79 gimple_stmt_iterator gsi;
80
81 /* If BB has a single successor or a single predecessor, then
82 there is no threading opportunity. */
83 if (single_succ_p (bb) || single_pred_p (bb))
84 return false;
85
86 /* If BB does not end with a conditional, switch or computed goto,
87 then there is no threading opportunity. */
88 gsi = gsi_last_bb (bb);
89 if (gsi_end_p (gsi)
90 || ! gsi_stmt (gsi)
91 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
92 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
93 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
94 return false;
95
96 return true;
97 }
98
99 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
100 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
101 BB. If no such ASSERT_EXPR is found, return OP. */
102
103 static tree
104 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
105 {
106 imm_use_iterator imm_iter;
107 gimple use_stmt;
108 use_operand_p use_p;
109
110 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
111 {
112 use_stmt = USE_STMT (use_p);
113 if (use_stmt != stmt
114 && gimple_assign_single_p (use_stmt)
115 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
116 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
117 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
118 {
119 return gimple_assign_lhs (use_stmt);
120 }
121 }
122 return op;
123 }
124
125 /* We record temporary equivalences created by PHI nodes or
126 statements within the target block. Doing so allows us to
127 identify more jump threading opportunities, even in blocks
128 with side effects.
129
130 We keep track of those temporary equivalences in a stack
131 structure so that we can unwind them when we're done processing
132 a particular edge. This routine handles unwinding the data
133 structures. */
134
135 static void
136 remove_temporary_equivalences (vec<tree> *stack)
137 {
138 while (stack->length () > 0)
139 {
140 tree prev_value, dest;
141
142 dest = stack->pop ();
143
144 /* A NULL value indicates we should stop unwinding, otherwise
145 pop off the next entry as they're recorded in pairs. */
146 if (dest == NULL)
147 break;
148
149 prev_value = stack->pop ();
150 set_ssa_name_value (dest, prev_value);
151 }
152 }
153
154 /* Record a temporary equivalence, saving enough information so that
155 we can restore the state of recorded equivalences when we're
156 done processing the current edge. */
157
158 static void
159 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
160 {
161 tree prev_x = SSA_NAME_VALUE (x);
162
163 if (TREE_CODE (y) == SSA_NAME)
164 {
165 tree tmp = SSA_NAME_VALUE (y);
166 y = tmp ? tmp : y;
167 }
168
169 set_ssa_name_value (x, y);
170 stack->reserve (2);
171 stack->quick_push (prev_x);
172 stack->quick_push (x);
173 }
174
175 /* Record temporary equivalences created by PHIs at the target of the
176 edge E. Record unwind information for the equivalences onto STACK.
177
178 If a PHI which prevents threading is encountered, then return FALSE
179 indicating we should not thread this edge, else return TRUE. */
180
181 static bool
182 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
183 {
184 gimple_stmt_iterator gsi;
185
186 /* Each PHI creates a temporary equivalence, record them.
187 These are context sensitive equivalences and will be removed
188 later. */
189 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
190 {
191 gimple phi = gsi_stmt (gsi);
192 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
193 tree dst = gimple_phi_result (phi);
194
195 /* If the desired argument is not the same as this PHI's result
196 and it is set by a PHI in E->dest, then we can not thread
197 through E->dest. */
198 if (src != dst
199 && TREE_CODE (src) == SSA_NAME
200 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
201 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
202 return false;
203
204 /* We consider any non-virtual PHI as a statement since it
205 count result in a constant assignment or copy operation. */
206 if (!virtual_operand_p (dst))
207 stmt_count++;
208
209 record_temporary_equivalence (dst, src, stack);
210 }
211 return true;
212 }
213
214 /* Fold the RHS of an assignment statement and return it as a tree.
215 May return NULL_TREE if no simplification is possible. */
216
217 static tree
218 fold_assignment_stmt (gimple stmt)
219 {
220 enum tree_code subcode = gimple_assign_rhs_code (stmt);
221
222 switch (get_gimple_rhs_class (subcode))
223 {
224 case GIMPLE_SINGLE_RHS:
225 return fold (gimple_assign_rhs1 (stmt));
226
227 case GIMPLE_UNARY_RHS:
228 {
229 tree lhs = gimple_assign_lhs (stmt);
230 tree op0 = gimple_assign_rhs1 (stmt);
231 return fold_unary (subcode, TREE_TYPE (lhs), op0);
232 }
233
234 case GIMPLE_BINARY_RHS:
235 {
236 tree lhs = gimple_assign_lhs (stmt);
237 tree op0 = gimple_assign_rhs1 (stmt);
238 tree op1 = gimple_assign_rhs2 (stmt);
239 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
240 }
241
242 case GIMPLE_TERNARY_RHS:
243 {
244 tree lhs = gimple_assign_lhs (stmt);
245 tree op0 = gimple_assign_rhs1 (stmt);
246 tree op1 = gimple_assign_rhs2 (stmt);
247 tree op2 = gimple_assign_rhs3 (stmt);
248
249 /* Sadly, we have to handle conditional assignments specially
250 here, because fold expects all the operands of an expression
251 to be folded before the expression itself is folded, but we
252 can't just substitute the folded condition here. */
253 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
254 op0 = fold (op0);
255
256 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
257 }
258
259 default:
260 gcc_unreachable ();
261 }
262 }
263
264 /* Try to simplify each statement in E->dest, ultimately leading to
265 a simplification of the COND_EXPR at the end of E->dest.
266
267 Record unwind information for temporary equivalences onto STACK.
268
269 Use SIMPLIFY (a pointer to a callback function) to further simplify
270 statements using pass specific information.
271
272 We might consider marking just those statements which ultimately
273 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
274 would be recovered by trying to simplify fewer statements.
275
276 If we are able to simplify a statement into the form
277 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
278 a context sensitive equivalence which may help us simplify
279 later statements in E->dest. */
280
281 static gimple
282 record_temporary_equivalences_from_stmts_at_dest (edge e,
283 vec<tree> *stack,
284 tree (*simplify) (gimple,
285 gimple))
286 {
287 gimple stmt = NULL;
288 gimple_stmt_iterator gsi;
289 int max_stmt_count;
290
291 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
292
293 /* Walk through each statement in the block recording equivalences
294 we discover. Note any equivalences we discover are context
295 sensitive (ie, are dependent on traversing E) and must be unwound
296 when we're finished processing E. */
297 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
298 {
299 tree cached_lhs = NULL;
300
301 stmt = gsi_stmt (gsi);
302
303 /* Ignore empty statements and labels. */
304 if (gimple_code (stmt) == GIMPLE_NOP
305 || gimple_code (stmt) == GIMPLE_LABEL
306 || is_gimple_debug (stmt))
307 continue;
308
309 /* If the statement has volatile operands, then we assume we
310 can not thread through this block. This is overly
311 conservative in some ways. */
312 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
313 return NULL;
314
315 /* If duplicating this block is going to cause too much code
316 expansion, then do not thread through this block. */
317 stmt_count++;
318 if (stmt_count > max_stmt_count)
319 return NULL;
320
321 /* If this is not a statement that sets an SSA_NAME to a new
322 value, then do not try to simplify this statement as it will
323 not simplify in any way that is helpful for jump threading. */
324 if ((gimple_code (stmt) != GIMPLE_ASSIGN
325 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
326 && (gimple_code (stmt) != GIMPLE_CALL
327 || gimple_call_lhs (stmt) == NULL_TREE
328 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
329 continue;
330
331 /* The result of __builtin_object_size depends on all the arguments
332 of a phi node. Temporarily using only one edge produces invalid
333 results. For example
334
335 if (x < 6)
336 goto l;
337 else
338 goto l;
339
340 l:
341 r = PHI <&w[2].a[1](2), &a.a[6](3)>
342 __builtin_object_size (r, 0)
343
344 The result of __builtin_object_size is defined to be the maximum of
345 remaining bytes. If we use only one edge on the phi, the result will
346 change to be the remaining bytes for the corresponding phi argument.
347
348 Similarly for __builtin_constant_p:
349
350 r = PHI <1(2), 2(3)>
351 __builtin_constant_p (r)
352
353 Both PHI arguments are constant, but x ? 1 : 2 is still not
354 constant. */
355
356 if (is_gimple_call (stmt))
357 {
358 tree fndecl = gimple_call_fndecl (stmt);
359 if (fndecl
360 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
361 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
362 continue;
363 }
364
365 /* At this point we have a statement which assigns an RHS to an
366 SSA_VAR on the LHS. We want to try and simplify this statement
367 to expose more context sensitive equivalences which in turn may
368 allow us to simplify the condition at the end of the loop.
369
370 Handle simple copy operations as well as implied copies from
371 ASSERT_EXPRs. */
372 if (gimple_assign_single_p (stmt)
373 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
374 cached_lhs = gimple_assign_rhs1 (stmt);
375 else if (gimple_assign_single_p (stmt)
376 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
377 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
378 else
379 {
380 /* A statement that is not a trivial copy or ASSERT_EXPR.
381 We're going to temporarily copy propagate the operands
382 and see if that allows us to simplify this statement. */
383 tree *copy;
384 ssa_op_iter iter;
385 use_operand_p use_p;
386 unsigned int num, i = 0;
387
388 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
389 copy = XCNEWVEC (tree, num);
390
391 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
392 the operands. */
393 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
394 {
395 tree tmp = NULL;
396 tree use = USE_FROM_PTR (use_p);
397
398 copy[i++] = use;
399 if (TREE_CODE (use) == SSA_NAME)
400 tmp = SSA_NAME_VALUE (use);
401 if (tmp)
402 SET_USE (use_p, tmp);
403 }
404
405 /* Try to fold/lookup the new expression. Inserting the
406 expression into the hash table is unlikely to help. */
407 if (is_gimple_call (stmt))
408 cached_lhs = fold_call_stmt (stmt, false);
409 else
410 cached_lhs = fold_assignment_stmt (stmt);
411
412 if (!cached_lhs
413 || (TREE_CODE (cached_lhs) != SSA_NAME
414 && !is_gimple_min_invariant (cached_lhs)))
415 cached_lhs = (*simplify) (stmt, stmt);
416
417 /* Restore the statement's original uses/defs. */
418 i = 0;
419 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
420 SET_USE (use_p, copy[i++]);
421
422 free (copy);
423 }
424
425 /* Record the context sensitive equivalence if we were able
426 to simplify this statement. */
427 if (cached_lhs
428 && (TREE_CODE (cached_lhs) == SSA_NAME
429 || is_gimple_min_invariant (cached_lhs)))
430 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
431 }
432 return stmt;
433 }
434
435 /* Simplify the control statement at the end of the block E->dest.
436
437 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
438 is available to use/clobber in DUMMY_COND.
439
440 Use SIMPLIFY (a pointer to a callback function) to further simplify
441 a condition using pass specific information.
442
443 Return the simplified condition or NULL if simplification could
444 not be performed. */
445
446 static tree
447 simplify_control_stmt_condition (edge e,
448 gimple stmt,
449 gimple dummy_cond,
450 tree (*simplify) (gimple, gimple),
451 bool handle_dominating_asserts)
452 {
453 tree cond, cached_lhs;
454 enum gimple_code code = gimple_code (stmt);
455
456 /* For comparisons, we have to update both operands, then try
457 to simplify the comparison. */
458 if (code == GIMPLE_COND)
459 {
460 tree op0, op1;
461 enum tree_code cond_code;
462
463 op0 = gimple_cond_lhs (stmt);
464 op1 = gimple_cond_rhs (stmt);
465 cond_code = gimple_cond_code (stmt);
466
467 /* Get the current value of both operands. */
468 if (TREE_CODE (op0) == SSA_NAME)
469 {
470 tree tmp = SSA_NAME_VALUE (op0);
471 if (tmp)
472 op0 = tmp;
473 }
474
475 if (TREE_CODE (op1) == SSA_NAME)
476 {
477 tree tmp = SSA_NAME_VALUE (op1);
478 if (tmp)
479 op1 = tmp;
480 }
481
482 if (handle_dominating_asserts)
483 {
484 /* Now see if the operand was consumed by an ASSERT_EXPR
485 which dominates E->src. If so, we want to replace the
486 operand with the LHS of the ASSERT_EXPR. */
487 if (TREE_CODE (op0) == SSA_NAME)
488 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
489
490 if (TREE_CODE (op1) == SSA_NAME)
491 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
492 }
493
494 /* We may need to canonicalize the comparison. For
495 example, op0 might be a constant while op1 is an
496 SSA_NAME. Failure to canonicalize will cause us to
497 miss threading opportunities. */
498 if (tree_swap_operands_p (op0, op1, false))
499 {
500 tree tmp;
501 cond_code = swap_tree_comparison (cond_code);
502 tmp = op0;
503 op0 = op1;
504 op1 = tmp;
505 }
506
507 /* Stuff the operator and operands into our dummy conditional
508 expression. */
509 gimple_cond_set_code (dummy_cond, cond_code);
510 gimple_cond_set_lhs (dummy_cond, op0);
511 gimple_cond_set_rhs (dummy_cond, op1);
512
513 /* We absolutely do not care about any type conversions
514 we only care about a zero/nonzero value. */
515 fold_defer_overflow_warnings ();
516
517 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
518 if (cached_lhs)
519 while (CONVERT_EXPR_P (cached_lhs))
520 cached_lhs = TREE_OPERAND (cached_lhs, 0);
521
522 fold_undefer_overflow_warnings ((cached_lhs
523 && is_gimple_min_invariant (cached_lhs)),
524 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
525
526 /* If we have not simplified the condition down to an invariant,
527 then use the pass specific callback to simplify the condition. */
528 if (!cached_lhs
529 || !is_gimple_min_invariant (cached_lhs))
530 cached_lhs = (*simplify) (dummy_cond, stmt);
531
532 return cached_lhs;
533 }
534
535 if (code == GIMPLE_SWITCH)
536 cond = gimple_switch_index (stmt);
537 else if (code == GIMPLE_GOTO)
538 cond = gimple_goto_dest (stmt);
539 else
540 gcc_unreachable ();
541
542 /* We can have conditionals which just test the state of a variable
543 rather than use a relational operator. These are simpler to handle. */
544 if (TREE_CODE (cond) == SSA_NAME)
545 {
546 cached_lhs = cond;
547
548 /* Get the variable's current value from the equivalence chains.
549
550 It is possible to get loops in the SSA_NAME_VALUE chains
551 (consider threading the backedge of a loop where we have
552 a loop invariant SSA_NAME used in the condition. */
553 if (cached_lhs
554 && TREE_CODE (cached_lhs) == SSA_NAME
555 && SSA_NAME_VALUE (cached_lhs))
556 cached_lhs = SSA_NAME_VALUE (cached_lhs);
557
558 /* If we're dominated by a suitable ASSERT_EXPR, then
559 update CACHED_LHS appropriately. */
560 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
561 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
562
563 /* If we haven't simplified to an invariant yet, then use the
564 pass specific callback to try and simplify it further. */
565 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
566 cached_lhs = (*simplify) (stmt, stmt);
567 }
568 else
569 cached_lhs = NULL;
570
571 return cached_lhs;
572 }
573
574 /* Return TRUE if the statement at the end of e->dest depends on
575 the output of any statement in BB. Otherwise return FALSE.
576
577 This is used when we are threading a backedge and need to ensure
578 that temporary equivalences from BB do not affect the condition
579 in e->dest. */
580
581 static bool
582 cond_arg_set_in_bb (edge e, basic_block bb)
583 {
584 ssa_op_iter iter;
585 use_operand_p use_p;
586 gimple last = last_stmt (e->dest);
587
588 /* E->dest does not have to end with a control transferring
589 instruction. This can occur when we try to extend a jump
590 threading opportunity deeper into the CFG. In that case
591 it is safe for this check to return false. */
592 if (!last)
593 return false;
594
595 if (gimple_code (last) != GIMPLE_COND
596 && gimple_code (last) != GIMPLE_GOTO
597 && gimple_code (last) != GIMPLE_SWITCH)
598 return false;
599
600 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
601 {
602 tree use = USE_FROM_PTR (use_p);
603
604 if (TREE_CODE (use) == SSA_NAME
605 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
606 && gimple_bb (SSA_NAME_DEF_STMT (use)) == bb)
607 return true;
608 }
609 return false;
610 }
611
612 /* Copy debug stmts from DEST's chain of single predecessors up to
613 SRC, so that we don't lose the bindings as PHI nodes are introduced
614 when DEST gains new predecessors. */
615 void
616 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
617 {
618 if (!MAY_HAVE_DEBUG_STMTS)
619 return;
620
621 if (!single_pred_p (dest))
622 return;
623
624 gcc_checking_assert (dest != src);
625
626 gimple_stmt_iterator gsi = gsi_after_labels (dest);
627 int i = 0;
628 const int alloc_count = 16; // ?? Should this be a PARAM?
629
630 /* Estimate the number of debug vars overridden in the beginning of
631 DEST, to tell how many we're going to need to begin with. */
632 for (gimple_stmt_iterator si = gsi;
633 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
634 {
635 gimple stmt = gsi_stmt (si);
636 if (!is_gimple_debug (stmt))
637 break;
638 i++;
639 }
640
641 vec<tree, va_stack> fewvars = vNULL;
642 pointer_set_t *vars = NULL;
643
644 /* If we're already starting with 3/4 of alloc_count, go for a
645 pointer_set, otherwise start with an unordered stack-allocated
646 VEC. */
647 if (i * 4 > alloc_count * 3)
648 vars = pointer_set_create ();
649 else if (alloc_count)
650 vec_stack_alloc (tree, fewvars, alloc_count);
651
652 /* Now go through the initial debug stmts in DEST again, this time
653 actually inserting in VARS or FEWVARS. Don't bother checking for
654 duplicates in FEWVARS. */
655 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
656 {
657 gimple stmt = gsi_stmt (si);
658 if (!is_gimple_debug (stmt))
659 break;
660
661 tree var;
662
663 if (gimple_debug_bind_p (stmt))
664 var = gimple_debug_bind_get_var (stmt);
665 else if (gimple_debug_source_bind_p (stmt))
666 var = gimple_debug_source_bind_get_var (stmt);
667 else
668 gcc_unreachable ();
669
670 if (vars)
671 pointer_set_insert (vars, var);
672 else
673 fewvars.quick_push (var);
674 }
675
676 basic_block bb = dest;
677
678 do
679 {
680 bb = single_pred (bb);
681 for (gimple_stmt_iterator si = gsi_last_bb (bb);
682 !gsi_end_p (si); gsi_prev (&si))
683 {
684 gimple stmt = gsi_stmt (si);
685 if (!is_gimple_debug (stmt))
686 continue;
687
688 tree var;
689
690 if (gimple_debug_bind_p (stmt))
691 var = gimple_debug_bind_get_var (stmt);
692 else if (gimple_debug_source_bind_p (stmt))
693 var = gimple_debug_source_bind_get_var (stmt);
694 else
695 gcc_unreachable ();
696
697 /* Discard debug bind overlaps. ??? Unlike stmts from src,
698 copied into a new block that will precede BB, debug bind
699 stmts in bypassed BBs may actually be discarded if
700 they're overwritten by subsequent debug bind stmts, which
701 might be a problem once we introduce stmt frontier notes
702 or somesuch. Adding `&& bb == src' to the condition
703 below will preserve all potentially relevant debug
704 notes. */
705 if (vars && pointer_set_insert (vars, var))
706 continue;
707 else if (!vars)
708 {
709 int i = fewvars.length ();
710 while (i--)
711 if (fewvars[i] == var)
712 break;
713 if (i >= 0)
714 continue;
715
716 if (fewvars.length () < (unsigned) alloc_count)
717 fewvars.quick_push (var);
718 else
719 {
720 vars = pointer_set_create ();
721 for (i = 0; i < alloc_count; i++)
722 pointer_set_insert (vars, fewvars[i]);
723 fewvars.release ();
724 pointer_set_insert (vars, var);
725 }
726 }
727
728 stmt = gimple_copy (stmt);
729 /* ??? Should we drop the location of the copy to denote
730 they're artificial bindings? */
731 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
732 }
733 }
734 while (bb != src && single_pred_p (bb));
735
736 if (vars)
737 pointer_set_destroy (vars);
738 else if (fewvars.exists ())
739 fewvars.release ();
740 }
741
742 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
743 need not be duplicated as part of the CFG/SSA updating process).
744
745 If it is threadable, add it to PATH and VISITED and recurse, ultimately
746 returning TRUE from the toplevel call. Otherwise do nothing and
747 return false.
748
749 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
750 try and simplify the condition at the end of TAKEN_EDGE->dest. */
751 static bool
752 thread_around_empty_blocks (edge taken_edge,
753 gimple dummy_cond,
754 bool handle_dominating_asserts,
755 tree (*simplify) (gimple, gimple),
756 bitmap visited,
757 vec<jump_thread_edge *> *path)
758 {
759 basic_block bb = taken_edge->dest;
760 gimple_stmt_iterator gsi;
761 gimple stmt;
762 tree cond;
763
764 /* The key property of these blocks is that they need not be duplicated
765 when threading. Thus they can not have visible side effects such
766 as PHI nodes. */
767 if (!gsi_end_p (gsi_start_phis (bb)))
768 return false;
769
770 /* Skip over DEBUG statements at the start of the block. */
771 gsi = gsi_start_nondebug_bb (bb);
772
773 /* If the block has no statements, but does have a single successor, then
774 it's just a forwarding block and we can thread through it trivially.
775
776 However, note that just threading through empty blocks with single
777 successors is not inherently profitable. For the jump thread to
778 be profitable, we must avoid a runtime conditional.
779
780 By taking the return value from the recursive call, we get the
781 desired effect of returning TRUE when we found a profitable jump
782 threading opportunity and FALSE otherwise.
783
784 This is particularly important when this routine is called after
785 processing a joiner block. Returning TRUE too aggressively in
786 that case results in pointless duplication of the joiner block. */
787 if (gsi_end_p (gsi))
788 {
789 if (single_succ_p (bb))
790 {
791 taken_edge = single_succ_edge (bb);
792 if ((taken_edge->flags & EDGE_DFS_BACK) == 0
793 && !bitmap_bit_p (visited, taken_edge->dest->index))
794 {
795 jump_thread_edge *x
796 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
797 path->safe_push (x);
798 bitmap_set_bit (visited, taken_edge->dest->index);
799 return thread_around_empty_blocks (taken_edge,
800 dummy_cond,
801 handle_dominating_asserts,
802 simplify,
803 visited,
804 path);
805 }
806 }
807
808 /* We have a block with no statements, but multiple successors? */
809 return false;
810 }
811
812 /* The only real statements this block can have are a control
813 flow altering statement. Anything else stops the thread. */
814 stmt = gsi_stmt (gsi);
815 if (gimple_code (stmt) != GIMPLE_COND
816 && gimple_code (stmt) != GIMPLE_GOTO
817 && gimple_code (stmt) != GIMPLE_SWITCH)
818 return false;
819
820 /* Extract and simplify the condition. */
821 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
822 simplify, handle_dominating_asserts);
823
824 /* If the condition can be statically computed and we have not already
825 visited the destination edge, then add the taken edge to our thread
826 path. */
827 if (cond && is_gimple_min_invariant (cond))
828 {
829 taken_edge = find_taken_edge (bb, cond);
830
831 if (bitmap_bit_p (visited, taken_edge->dest->index))
832 return false;
833 bitmap_set_bit (visited, taken_edge->dest->index);
834
835 jump_thread_edge *x
836 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
837 path->safe_push (x);
838
839 thread_around_empty_blocks (taken_edge,
840 dummy_cond,
841 handle_dominating_asserts,
842 simplify,
843 visited,
844 path);
845 return true;
846 }
847
848 return false;
849 }
850
851 /* We are exiting E->src, see if E->dest ends with a conditional
852 jump which has a known value when reached via E.
853
854 Special care is necessary if E is a back edge in the CFG as we
855 may have already recorded equivalences for E->dest into our
856 various tables, including the result of the conditional at
857 the end of E->dest. Threading opportunities are severely
858 limited in that case to avoid short-circuiting the loop
859 incorrectly.
860
861 Note it is quite common for the first block inside a loop to
862 end with a conditional which is either always true or always
863 false when reached via the loop backedge. Thus we do not want
864 to blindly disable threading across a loop backedge.
865
866 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
867 to avoid allocating memory.
868
869 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
870 the simplified condition with left-hand sides of ASSERT_EXPRs they are
871 used in.
872
873 STACK is used to undo temporary equivalences created during the walk of
874 E->dest.
875
876 SIMPLIFY is a pass-specific function used to simplify statements. */
877
878 void
879 thread_across_edge (gimple dummy_cond,
880 edge e,
881 bool handle_dominating_asserts,
882 vec<tree> *stack,
883 tree (*simplify) (gimple, gimple))
884 {
885 gimple stmt;
886
887 /* If E is a backedge, then we want to verify that the COND_EXPR,
888 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
889 by any statements in e->dest. If it is affected, then it is not
890 safe to thread this edge. */
891 if (e->flags & EDGE_DFS_BACK)
892 {
893 if (cond_arg_set_in_bb (e, e->dest))
894 goto fail;
895 }
896
897 stmt_count = 0;
898
899 /* PHIs create temporary equivalences. */
900 if (!record_temporary_equivalences_from_phis (e, stack))
901 goto fail;
902
903 /* Now walk each statement recording any context sensitive
904 temporary equivalences we can detect. */
905 stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
906 if (!stmt)
907 goto fail;
908
909 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
910 will be taken. */
911 if (gimple_code (stmt) == GIMPLE_COND
912 || gimple_code (stmt) == GIMPLE_GOTO
913 || gimple_code (stmt) == GIMPLE_SWITCH)
914 {
915 tree cond;
916
917 /* Extract and simplify the condition. */
918 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
919 handle_dominating_asserts);
920
921 if (cond && is_gimple_min_invariant (cond))
922 {
923 edge taken_edge = find_taken_edge (e->dest, cond);
924 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
925 bitmap visited;
926
927 /* DEST could be NULL for a computed jump to an absolute
928 address. */
929 if (dest == NULL || dest == e->dest)
930 goto fail;
931
932 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
933 jump_thread_edge *x
934 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
935 path->safe_push (x);
936
937 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
938 path->safe_push (x);
939
940 /* See if we can thread through DEST as well, this helps capture
941 secondary effects of threading without having to re-run DOM or
942 VRP. */
943 if ((e->flags & EDGE_DFS_BACK) == 0
944 || ! cond_arg_set_in_bb (taken_edge, e->dest))
945 {
946 /* We don't want to thread back to a block we have already
947 visited. This may be overly conservative. */
948 visited = BITMAP_ALLOC (NULL);
949 bitmap_set_bit (visited, dest->index);
950 bitmap_set_bit (visited, e->dest->index);
951 thread_around_empty_blocks (taken_edge,
952 dummy_cond,
953 handle_dominating_asserts,
954 simplify,
955 visited,
956 path);
957 BITMAP_FREE (visited);
958 }
959
960 remove_temporary_equivalences (stack);
961 propagate_threaded_block_debug_into (path->last ()->e->dest,
962 e->dest);
963 register_jump_thread (path);
964 return;
965 }
966 }
967
968 /* We were unable to determine what out edge from E->dest is taken. However,
969 we might still be able to thread through successors of E->dest. This
970 often occurs when E->dest is a joiner block which then fans back out
971 based on redundant tests.
972
973 If so, we'll copy E->dest and redirect the appropriate predecessor to
974 the copy. Within the copy of E->dest, we'll thread one or more edges
975 to points deeper in the CFG.
976
977 This is a stopgap until we have a more structured approach to path
978 isolation. */
979 {
980 edge taken_edge;
981 edge_iterator ei;
982 bool found;
983 bitmap visited = BITMAP_ALLOC (NULL);
984
985 /* Look at each successor of E->dest to see if we can thread through it. */
986 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
987 {
988 /* Avoid threading to any block we have already visited. */
989 bitmap_clear (visited);
990 bitmap_set_bit (visited, taken_edge->dest->index);
991 bitmap_set_bit (visited, e->dest->index);
992 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
993
994 /* Record whether or not we were able to thread through a successor
995 of E->dest. */
996 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
997 path->safe_push (x);
998
999 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1000 path->safe_push (x);
1001 found = false;
1002 if ((e->flags & EDGE_DFS_BACK) == 0
1003 || ! cond_arg_set_in_bb (path->last ()->e, e->dest))
1004 found = thread_around_empty_blocks (taken_edge,
1005 dummy_cond,
1006 handle_dominating_asserts,
1007 simplify,
1008 visited,
1009 path);
1010
1011 /* If we were able to thread through a successor of E->dest, then
1012 record the jump threading opportunity. */
1013 if (found)
1014 {
1015 propagate_threaded_block_debug_into (path->last ()->e->dest,
1016 taken_edge->dest);
1017 register_jump_thread (path);
1018 }
1019 else
1020 {
1021 for (unsigned int i = 0; i < path->length (); i++)
1022 delete (*path)[i];
1023 path->release();
1024 }
1025 }
1026 BITMAP_FREE (visited);
1027 }
1028
1029 fail:
1030 remove_temporary_equivalences (stack);
1031 }