tree-ssa-threadedge.c (thread_around_empty_blocks): Renamed from thread_around_empty_...
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "tree-flow.h"
34 #include "tree-ssa-propagate.h"
35 #include "langhooks.h"
36 #include "params.h"
37
38 /* To avoid code explosion due to jump threading, we limit the
39 number of statements we are going to copy. This variable
40 holds the number of statements currently seen that we'll have
41 to copy as part of the jump threading process. */
42 static int stmt_count;
43
44 /* Array to record value-handles per SSA_NAME. */
45 vec<tree> ssa_name_values;
46
47 /* Set the value for the SSA name NAME to VALUE. */
48
49 void
50 set_ssa_name_value (tree name, tree value)
51 {
52 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
53 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
54 ssa_name_values[SSA_NAME_VERSION (name)] = value;
55 }
56
57 /* Initialize the per SSA_NAME value-handles array. Returns it. */
58 void
59 threadedge_initialize_values (void)
60 {
61 gcc_assert (!ssa_name_values.exists ());
62 ssa_name_values.create (num_ssa_names);
63 }
64
65 /* Free the per SSA_NAME value-handle array. */
66 void
67 threadedge_finalize_values (void)
68 {
69 ssa_name_values.release ();
70 }
71
72 /* Return TRUE if we may be able to thread an incoming edge into
73 BB to an outgoing edge from BB. Return FALSE otherwise. */
74
75 bool
76 potentially_threadable_block (basic_block bb)
77 {
78 gimple_stmt_iterator gsi;
79
80 /* If BB has a single successor or a single predecessor, then
81 there is no threading opportunity. */
82 if (single_succ_p (bb) || single_pred_p (bb))
83 return false;
84
85 /* If BB does not end with a conditional, switch or computed goto,
86 then there is no threading opportunity. */
87 gsi = gsi_last_bb (bb);
88 if (gsi_end_p (gsi)
89 || ! gsi_stmt (gsi)
90 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
91 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
92 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
93 return false;
94
95 return true;
96 }
97
98 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
99 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
100 BB. If no such ASSERT_EXPR is found, return OP. */
101
102 static tree
103 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
104 {
105 imm_use_iterator imm_iter;
106 gimple use_stmt;
107 use_operand_p use_p;
108
109 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
110 {
111 use_stmt = USE_STMT (use_p);
112 if (use_stmt != stmt
113 && gimple_assign_single_p (use_stmt)
114 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
115 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
116 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
117 {
118 return gimple_assign_lhs (use_stmt);
119 }
120 }
121 return op;
122 }
123
124 /* We record temporary equivalences created by PHI nodes or
125 statements within the target block. Doing so allows us to
126 identify more jump threading opportunities, even in blocks
127 with side effects.
128
129 We keep track of those temporary equivalences in a stack
130 structure so that we can unwind them when we're done processing
131 a particular edge. This routine handles unwinding the data
132 structures. */
133
134 static void
135 remove_temporary_equivalences (vec<tree> *stack)
136 {
137 while (stack->length () > 0)
138 {
139 tree prev_value, dest;
140
141 dest = stack->pop ();
142
143 /* A NULL value indicates we should stop unwinding, otherwise
144 pop off the next entry as they're recorded in pairs. */
145 if (dest == NULL)
146 break;
147
148 prev_value = stack->pop ();
149 set_ssa_name_value (dest, prev_value);
150 }
151 }
152
153 /* Record a temporary equivalence, saving enough information so that
154 we can restore the state of recorded equivalences when we're
155 done processing the current edge. */
156
157 static void
158 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
159 {
160 tree prev_x = SSA_NAME_VALUE (x);
161
162 if (TREE_CODE (y) == SSA_NAME)
163 {
164 tree tmp = SSA_NAME_VALUE (y);
165 y = tmp ? tmp : y;
166 }
167
168 set_ssa_name_value (x, y);
169 stack->reserve (2);
170 stack->quick_push (prev_x);
171 stack->quick_push (x);
172 }
173
174 /* Record temporary equivalences created by PHIs at the target of the
175 edge E. Record unwind information for the equivalences onto STACK.
176
177 If a PHI which prevents threading is encountered, then return FALSE
178 indicating we should not thread this edge, else return TRUE. */
179
180 static bool
181 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
182 {
183 gimple_stmt_iterator gsi;
184
185 /* Each PHI creates a temporary equivalence, record them.
186 These are context sensitive equivalences and will be removed
187 later. */
188 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
189 {
190 gimple phi = gsi_stmt (gsi);
191 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
192 tree dst = gimple_phi_result (phi);
193
194 /* If the desired argument is not the same as this PHI's result
195 and it is set by a PHI in E->dest, then we can not thread
196 through E->dest. */
197 if (src != dst
198 && TREE_CODE (src) == SSA_NAME
199 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
200 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
201 return false;
202
203 /* We consider any non-virtual PHI as a statement since it
204 count result in a constant assignment or copy operation. */
205 if (!virtual_operand_p (dst))
206 stmt_count++;
207
208 record_temporary_equivalence (dst, src, stack);
209 }
210 return true;
211 }
212
213 /* Fold the RHS of an assignment statement and return it as a tree.
214 May return NULL_TREE if no simplification is possible. */
215
216 static tree
217 fold_assignment_stmt (gimple stmt)
218 {
219 enum tree_code subcode = gimple_assign_rhs_code (stmt);
220
221 switch (get_gimple_rhs_class (subcode))
222 {
223 case GIMPLE_SINGLE_RHS:
224 return fold (gimple_assign_rhs1 (stmt));
225
226 case GIMPLE_UNARY_RHS:
227 {
228 tree lhs = gimple_assign_lhs (stmt);
229 tree op0 = gimple_assign_rhs1 (stmt);
230 return fold_unary (subcode, TREE_TYPE (lhs), op0);
231 }
232
233 case GIMPLE_BINARY_RHS:
234 {
235 tree lhs = gimple_assign_lhs (stmt);
236 tree op0 = gimple_assign_rhs1 (stmt);
237 tree op1 = gimple_assign_rhs2 (stmt);
238 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
239 }
240
241 case GIMPLE_TERNARY_RHS:
242 {
243 tree lhs = gimple_assign_lhs (stmt);
244 tree op0 = gimple_assign_rhs1 (stmt);
245 tree op1 = gimple_assign_rhs2 (stmt);
246 tree op2 = gimple_assign_rhs3 (stmt);
247
248 /* Sadly, we have to handle conditional assignments specially
249 here, because fold expects all the operands of an expression
250 to be folded before the expression itself is folded, but we
251 can't just substitute the folded condition here. */
252 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
253 op0 = fold (op0);
254
255 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
256 }
257
258 default:
259 gcc_unreachable ();
260 }
261 }
262
263 /* Try to simplify each statement in E->dest, ultimately leading to
264 a simplification of the COND_EXPR at the end of E->dest.
265
266 Record unwind information for temporary equivalences onto STACK.
267
268 Use SIMPLIFY (a pointer to a callback function) to further simplify
269 statements using pass specific information.
270
271 We might consider marking just those statements which ultimately
272 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
273 would be recovered by trying to simplify fewer statements.
274
275 If we are able to simplify a statement into the form
276 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
277 a context sensitive equivalence which may help us simplify
278 later statements in E->dest. */
279
280 static gimple
281 record_temporary_equivalences_from_stmts_at_dest (edge e,
282 vec<tree> *stack,
283 tree (*simplify) (gimple,
284 gimple))
285 {
286 gimple stmt = NULL;
287 gimple_stmt_iterator gsi;
288 int max_stmt_count;
289
290 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
291
292 /* Walk through each statement in the block recording equivalences
293 we discover. Note any equivalences we discover are context
294 sensitive (ie, are dependent on traversing E) and must be unwound
295 when we're finished processing E. */
296 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
297 {
298 tree cached_lhs = NULL;
299
300 stmt = gsi_stmt (gsi);
301
302 /* Ignore empty statements and labels. */
303 if (gimple_code (stmt) == GIMPLE_NOP
304 || gimple_code (stmt) == GIMPLE_LABEL
305 || is_gimple_debug (stmt))
306 continue;
307
308 /* If the statement has volatile operands, then we assume we
309 can not thread through this block. This is overly
310 conservative in some ways. */
311 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
312 return NULL;
313
314 /* If duplicating this block is going to cause too much code
315 expansion, then do not thread through this block. */
316 stmt_count++;
317 if (stmt_count > max_stmt_count)
318 return NULL;
319
320 /* If this is not a statement that sets an SSA_NAME to a new
321 value, then do not try to simplify this statement as it will
322 not simplify in any way that is helpful for jump threading. */
323 if ((gimple_code (stmt) != GIMPLE_ASSIGN
324 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
325 && (gimple_code (stmt) != GIMPLE_CALL
326 || gimple_call_lhs (stmt) == NULL_TREE
327 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
328 continue;
329
330 /* The result of __builtin_object_size depends on all the arguments
331 of a phi node. Temporarily using only one edge produces invalid
332 results. For example
333
334 if (x < 6)
335 goto l;
336 else
337 goto l;
338
339 l:
340 r = PHI <&w[2].a[1](2), &a.a[6](3)>
341 __builtin_object_size (r, 0)
342
343 The result of __builtin_object_size is defined to be the maximum of
344 remaining bytes. If we use only one edge on the phi, the result will
345 change to be the remaining bytes for the corresponding phi argument.
346
347 Similarly for __builtin_constant_p:
348
349 r = PHI <1(2), 2(3)>
350 __builtin_constant_p (r)
351
352 Both PHI arguments are constant, but x ? 1 : 2 is still not
353 constant. */
354
355 if (is_gimple_call (stmt))
356 {
357 tree fndecl = gimple_call_fndecl (stmt);
358 if (fndecl
359 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
360 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
361 continue;
362 }
363
364 /* At this point we have a statement which assigns an RHS to an
365 SSA_VAR on the LHS. We want to try and simplify this statement
366 to expose more context sensitive equivalences which in turn may
367 allow us to simplify the condition at the end of the loop.
368
369 Handle simple copy operations as well as implied copies from
370 ASSERT_EXPRs. */
371 if (gimple_assign_single_p (stmt)
372 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
373 cached_lhs = gimple_assign_rhs1 (stmt);
374 else if (gimple_assign_single_p (stmt)
375 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
376 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
377 else
378 {
379 /* A statement that is not a trivial copy or ASSERT_EXPR.
380 We're going to temporarily copy propagate the operands
381 and see if that allows us to simplify this statement. */
382 tree *copy;
383 ssa_op_iter iter;
384 use_operand_p use_p;
385 unsigned int num, i = 0;
386
387 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
388 copy = XCNEWVEC (tree, num);
389
390 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
391 the operands. */
392 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
393 {
394 tree tmp = NULL;
395 tree use = USE_FROM_PTR (use_p);
396
397 copy[i++] = use;
398 if (TREE_CODE (use) == SSA_NAME)
399 tmp = SSA_NAME_VALUE (use);
400 if (tmp)
401 SET_USE (use_p, tmp);
402 }
403
404 /* Try to fold/lookup the new expression. Inserting the
405 expression into the hash table is unlikely to help. */
406 if (is_gimple_call (stmt))
407 cached_lhs = fold_call_stmt (stmt, false);
408 else
409 cached_lhs = fold_assignment_stmt (stmt);
410
411 if (!cached_lhs
412 || (TREE_CODE (cached_lhs) != SSA_NAME
413 && !is_gimple_min_invariant (cached_lhs)))
414 cached_lhs = (*simplify) (stmt, stmt);
415
416 /* Restore the statement's original uses/defs. */
417 i = 0;
418 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
419 SET_USE (use_p, copy[i++]);
420
421 free (copy);
422 }
423
424 /* Record the context sensitive equivalence if we were able
425 to simplify this statement. */
426 if (cached_lhs
427 && (TREE_CODE (cached_lhs) == SSA_NAME
428 || is_gimple_min_invariant (cached_lhs)))
429 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
430 }
431 return stmt;
432 }
433
434 /* Simplify the control statement at the end of the block E->dest.
435
436 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
437 is available to use/clobber in DUMMY_COND.
438
439 Use SIMPLIFY (a pointer to a callback function) to further simplify
440 a condition using pass specific information.
441
442 Return the simplified condition or NULL if simplification could
443 not be performed. */
444
445 static tree
446 simplify_control_stmt_condition (edge e,
447 gimple stmt,
448 gimple dummy_cond,
449 tree (*simplify) (gimple, gimple),
450 bool handle_dominating_asserts)
451 {
452 tree cond, cached_lhs;
453 enum gimple_code code = gimple_code (stmt);
454
455 /* For comparisons, we have to update both operands, then try
456 to simplify the comparison. */
457 if (code == GIMPLE_COND)
458 {
459 tree op0, op1;
460 enum tree_code cond_code;
461
462 op0 = gimple_cond_lhs (stmt);
463 op1 = gimple_cond_rhs (stmt);
464 cond_code = gimple_cond_code (stmt);
465
466 /* Get the current value of both operands. */
467 if (TREE_CODE (op0) == SSA_NAME)
468 {
469 tree tmp = SSA_NAME_VALUE (op0);
470 if (tmp)
471 op0 = tmp;
472 }
473
474 if (TREE_CODE (op1) == SSA_NAME)
475 {
476 tree tmp = SSA_NAME_VALUE (op1);
477 if (tmp)
478 op1 = tmp;
479 }
480
481 if (handle_dominating_asserts)
482 {
483 /* Now see if the operand was consumed by an ASSERT_EXPR
484 which dominates E->src. If so, we want to replace the
485 operand with the LHS of the ASSERT_EXPR. */
486 if (TREE_CODE (op0) == SSA_NAME)
487 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
488
489 if (TREE_CODE (op1) == SSA_NAME)
490 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
491 }
492
493 /* We may need to canonicalize the comparison. For
494 example, op0 might be a constant while op1 is an
495 SSA_NAME. Failure to canonicalize will cause us to
496 miss threading opportunities. */
497 if (tree_swap_operands_p (op0, op1, false))
498 {
499 tree tmp;
500 cond_code = swap_tree_comparison (cond_code);
501 tmp = op0;
502 op0 = op1;
503 op1 = tmp;
504 }
505
506 /* Stuff the operator and operands into our dummy conditional
507 expression. */
508 gimple_cond_set_code (dummy_cond, cond_code);
509 gimple_cond_set_lhs (dummy_cond, op0);
510 gimple_cond_set_rhs (dummy_cond, op1);
511
512 /* We absolutely do not care about any type conversions
513 we only care about a zero/nonzero value. */
514 fold_defer_overflow_warnings ();
515
516 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
517 if (cached_lhs)
518 while (CONVERT_EXPR_P (cached_lhs))
519 cached_lhs = TREE_OPERAND (cached_lhs, 0);
520
521 fold_undefer_overflow_warnings ((cached_lhs
522 && is_gimple_min_invariant (cached_lhs)),
523 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
524
525 /* If we have not simplified the condition down to an invariant,
526 then use the pass specific callback to simplify the condition. */
527 if (!cached_lhs
528 || !is_gimple_min_invariant (cached_lhs))
529 cached_lhs = (*simplify) (dummy_cond, stmt);
530
531 return cached_lhs;
532 }
533
534 if (code == GIMPLE_SWITCH)
535 cond = gimple_switch_index (stmt);
536 else if (code == GIMPLE_GOTO)
537 cond = gimple_goto_dest (stmt);
538 else
539 gcc_unreachable ();
540
541 /* We can have conditionals which just test the state of a variable
542 rather than use a relational operator. These are simpler to handle. */
543 if (TREE_CODE (cond) == SSA_NAME)
544 {
545 cached_lhs = cond;
546
547 /* Get the variable's current value from the equivalence chains.
548
549 It is possible to get loops in the SSA_NAME_VALUE chains
550 (consider threading the backedge of a loop where we have
551 a loop invariant SSA_NAME used in the condition. */
552 if (cached_lhs
553 && TREE_CODE (cached_lhs) == SSA_NAME
554 && SSA_NAME_VALUE (cached_lhs))
555 cached_lhs = SSA_NAME_VALUE (cached_lhs);
556
557 /* If we're dominated by a suitable ASSERT_EXPR, then
558 update CACHED_LHS appropriately. */
559 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
560 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
561
562 /* If we haven't simplified to an invariant yet, then use the
563 pass specific callback to try and simplify it further. */
564 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
565 cached_lhs = (*simplify) (stmt, stmt);
566 }
567 else
568 cached_lhs = NULL;
569
570 return cached_lhs;
571 }
572
573 /* Return TRUE if the statement at the end of e->dest depends on
574 the output of any statement in BB. Otherwise return FALSE.
575
576 This is used when we are threading a backedge and need to ensure
577 that temporary equivalences from BB do not affect the condition
578 in e->dest. */
579
580 static bool
581 cond_arg_set_in_bb (edge e, basic_block bb)
582 {
583 ssa_op_iter iter;
584 use_operand_p use_p;
585 gimple last = last_stmt (e->dest);
586
587 /* E->dest does not have to end with a control transferring
588 instruction. This can occur when we try to extend a jump
589 threading opportunity deeper into the CFG. In that case
590 it is safe for this check to return false. */
591 if (!last)
592 return false;
593
594 if (gimple_code (last) != GIMPLE_COND
595 && gimple_code (last) != GIMPLE_GOTO
596 && gimple_code (last) != GIMPLE_SWITCH)
597 return false;
598
599 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
600 {
601 tree use = USE_FROM_PTR (use_p);
602
603 if (TREE_CODE (use) == SSA_NAME
604 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
605 && gimple_bb (SSA_NAME_DEF_STMT (use)) == bb)
606 return true;
607 }
608 return false;
609 }
610
611 /* Copy debug stmts from DEST's chain of single predecessors up to
612 SRC, so that we don't lose the bindings as PHI nodes are introduced
613 when DEST gains new predecessors. */
614 void
615 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
616 {
617 if (!MAY_HAVE_DEBUG_STMTS)
618 return;
619
620 if (!single_pred_p (dest))
621 return;
622
623 gcc_checking_assert (dest != src);
624
625 gimple_stmt_iterator gsi = gsi_after_labels (dest);
626 int i = 0;
627 const int alloc_count = 16; // ?? Should this be a PARAM?
628
629 /* Estimate the number of debug vars overridden in the beginning of
630 DEST, to tell how many we're going to need to begin with. */
631 for (gimple_stmt_iterator si = gsi;
632 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
633 {
634 gimple stmt = gsi_stmt (si);
635 if (!is_gimple_debug (stmt))
636 break;
637 i++;
638 }
639
640 vec<tree, va_stack> fewvars = vNULL;
641 pointer_set_t *vars = NULL;
642
643 /* If we're already starting with 3/4 of alloc_count, go for a
644 pointer_set, otherwise start with an unordered stack-allocated
645 VEC. */
646 if (i * 4 > alloc_count * 3)
647 vars = pointer_set_create ();
648 else if (alloc_count)
649 vec_stack_alloc (tree, fewvars, alloc_count);
650
651 /* Now go through the initial debug stmts in DEST again, this time
652 actually inserting in VARS or FEWVARS. Don't bother checking for
653 duplicates in FEWVARS. */
654 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
655 {
656 gimple stmt = gsi_stmt (si);
657 if (!is_gimple_debug (stmt))
658 break;
659
660 tree var;
661
662 if (gimple_debug_bind_p (stmt))
663 var = gimple_debug_bind_get_var (stmt);
664 else if (gimple_debug_source_bind_p (stmt))
665 var = gimple_debug_source_bind_get_var (stmt);
666 else
667 gcc_unreachable ();
668
669 if (vars)
670 pointer_set_insert (vars, var);
671 else
672 fewvars.quick_push (var);
673 }
674
675 basic_block bb = dest;
676
677 do
678 {
679 bb = single_pred (bb);
680 for (gimple_stmt_iterator si = gsi_last_bb (bb);
681 !gsi_end_p (si); gsi_prev (&si))
682 {
683 gimple stmt = gsi_stmt (si);
684 if (!is_gimple_debug (stmt))
685 continue;
686
687 tree var;
688
689 if (gimple_debug_bind_p (stmt))
690 var = gimple_debug_bind_get_var (stmt);
691 else if (gimple_debug_source_bind_p (stmt))
692 var = gimple_debug_source_bind_get_var (stmt);
693 else
694 gcc_unreachable ();
695
696 /* Discard debug bind overlaps. ??? Unlike stmts from src,
697 copied into a new block that will precede BB, debug bind
698 stmts in bypassed BBs may actually be discarded if
699 they're overwritten by subsequent debug bind stmts, which
700 might be a problem once we introduce stmt frontier notes
701 or somesuch. Adding `&& bb == src' to the condition
702 below will preserve all potentially relevant debug
703 notes. */
704 if (vars && pointer_set_insert (vars, var))
705 continue;
706 else if (!vars)
707 {
708 int i = fewvars.length ();
709 while (i--)
710 if (fewvars[i] == var)
711 break;
712 if (i >= 0)
713 continue;
714
715 if (fewvars.length () < (unsigned) alloc_count)
716 fewvars.quick_push (var);
717 else
718 {
719 vars = pointer_set_create ();
720 for (i = 0; i < alloc_count; i++)
721 pointer_set_insert (vars, fewvars[i]);
722 fewvars.release ();
723 pointer_set_insert (vars, var);
724 }
725 }
726
727 stmt = gimple_copy (stmt);
728 /* ??? Should we drop the location of the copy to denote
729 they're artificial bindings? */
730 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
731 }
732 }
733 while (bb != src && single_pred_p (bb));
734
735 if (vars)
736 pointer_set_destroy (vars);
737 else if (fewvars.exists ())
738 fewvars.release ();
739 }
740
741 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
742 need not be duplicated as part of the CFG/SSA updating process).
743
744 If it is threadable, add it to PATH and VISITED and recurse, ultimately
745 returning TRUE from the toplevel call. Otherwise do nothing and
746 return false.
747
748 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
749 try and simplify the condition at the end of TAKEN_EDGE->dest. */
750 static bool
751 thread_around_empty_blocks (edge taken_edge,
752 gimple dummy_cond,
753 bool handle_dominating_asserts,
754 tree (*simplify) (gimple, gimple),
755 bitmap visited,
756 vec<edge> *path)
757 {
758 basic_block bb = taken_edge->dest;
759 gimple_stmt_iterator gsi;
760 gimple stmt;
761 tree cond;
762
763 /* The key property of these blocks is that they need not be duplicated
764 when threading. Thus they can not have visible side effects such
765 as PHI nodes. */
766 if (!gsi_end_p (gsi_start_phis (bb)))
767 return NULL;
768
769 /* Skip over DEBUG statements at the start of the block. */
770 gsi = gsi_start_nondebug_bb (bb);
771
772 /* If the block has no statements, but does have a single successor, then
773 it's just a forwarding block and we can thread through it trivially. */
774 if (gsi_end_p (gsi))
775 {
776 if (single_succ_p (bb))
777 {
778 taken_edge = single_succ_edge (bb);
779 if ((taken_edge->flags & EDGE_DFS_BACK) == 0
780 && !bitmap_bit_p (visited, taken_edge->dest->index))
781 {
782 bitmap_set_bit (visited, taken_edge->dest->index);
783 path->safe_push (taken_edge);
784 thread_around_empty_blocks (taken_edge,
785 dummy_cond,
786 handle_dominating_asserts,
787 simplify,
788 visited,
789 path);
790 return true;
791 }
792 }
793 return false;
794 }
795
796 /* The only real statements this block can have are a control
797 flow altering statement. Anything else stops the thread. */
798 stmt = gsi_stmt (gsi);
799 if (gimple_code (stmt) != GIMPLE_COND
800 && gimple_code (stmt) != GIMPLE_GOTO
801 && gimple_code (stmt) != GIMPLE_SWITCH)
802 return false;
803
804 /* Extract and simplify the condition. */
805 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
806 simplify, handle_dominating_asserts);
807
808 /* If the condition can be statically computed and we have not already
809 visited the destination edge, then add the taken edge to our thread
810 path. */
811 if (cond && is_gimple_min_invariant (cond))
812 {
813 taken_edge = find_taken_edge (bb, cond);
814
815 if (bitmap_bit_p (visited, taken_edge->dest->index))
816 return false;
817 bitmap_set_bit (visited, taken_edge->dest->index);
818 path->safe_push (taken_edge);
819 thread_around_empty_blocks (taken_edge,
820 dummy_cond,
821 handle_dominating_asserts,
822 simplify,
823 visited,
824 path);
825 return true;
826 }
827
828 return false;
829 }
830
831 /* E1 and E2 are edges into the same basic block. Return TRUE if the
832 PHI arguments associated with those edges are equal or there are no
833 PHI arguments, otherwise return FALSE. */
834
835 static bool
836 phi_args_equal_on_edges (edge e1, edge e2)
837 {
838 gimple_stmt_iterator gsi;
839 int indx1 = e1->dest_idx;
840 int indx2 = e2->dest_idx;
841
842 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
843 {
844 gimple phi = gsi_stmt (gsi);
845
846 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
847 gimple_phi_arg_def (phi, indx2), 0))
848 return false;
849 }
850 return true;
851 }
852
853 /* We are exiting E->src, see if E->dest ends with a conditional
854 jump which has a known value when reached via E.
855
856 Special care is necessary if E is a back edge in the CFG as we
857 may have already recorded equivalences for E->dest into our
858 various tables, including the result of the conditional at
859 the end of E->dest. Threading opportunities are severely
860 limited in that case to avoid short-circuiting the loop
861 incorrectly.
862
863 Note it is quite common for the first block inside a loop to
864 end with a conditional which is either always true or always
865 false when reached via the loop backedge. Thus we do not want
866 to blindly disable threading across a loop backedge.
867
868 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
869 to avoid allocating memory.
870
871 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
872 the simplified condition with left-hand sides of ASSERT_EXPRs they are
873 used in.
874
875 STACK is used to undo temporary equivalences created during the walk of
876 E->dest.
877
878 SIMPLIFY is a pass-specific function used to simplify statements. */
879
880 void
881 thread_across_edge (gimple dummy_cond,
882 edge e,
883 bool handle_dominating_asserts,
884 vec<tree> *stack,
885 tree (*simplify) (gimple, gimple))
886 {
887 gimple stmt;
888
889 /* If E is a backedge, then we want to verify that the COND_EXPR,
890 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
891 by any statements in e->dest. If it is affected, then it is not
892 safe to thread this edge. */
893 if (e->flags & EDGE_DFS_BACK)
894 {
895 if (cond_arg_set_in_bb (e, e->dest))
896 goto fail;
897 }
898
899 stmt_count = 0;
900
901 /* PHIs create temporary equivalences. */
902 if (!record_temporary_equivalences_from_phis (e, stack))
903 goto fail;
904
905 /* Now walk each statement recording any context sensitive
906 temporary equivalences we can detect. */
907 stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
908 if (!stmt)
909 goto fail;
910
911 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
912 will be taken. */
913 if (gimple_code (stmt) == GIMPLE_COND
914 || gimple_code (stmt) == GIMPLE_GOTO
915 || gimple_code (stmt) == GIMPLE_SWITCH)
916 {
917 tree cond;
918
919 /* Extract and simplify the condition. */
920 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
921 handle_dominating_asserts);
922
923 if (cond && is_gimple_min_invariant (cond))
924 {
925 edge taken_edge = find_taken_edge (e->dest, cond);
926 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
927 bitmap visited;
928
929 /* DEST could be NULL for a computed jump to an absolute
930 address. */
931 if (dest == NULL || dest == e->dest)
932 goto fail;
933
934 vec<edge> path = vNULL;
935 path.safe_push (e);
936 path.safe_push (taken_edge);
937
938 /* See if we can thread through DEST as well, this helps capture
939 secondary effects of threading without having to re-run DOM or
940 VRP. */
941 if ((e->flags & EDGE_DFS_BACK) == 0
942 || ! cond_arg_set_in_bb (taken_edge, e->dest))
943 {
944 /* We don't want to thread back to a block we have already
945 visited. This may be overly conservative. */
946 visited = BITMAP_ALLOC (NULL);
947 bitmap_set_bit (visited, dest->index);
948 bitmap_set_bit (visited, e->dest->index);
949 thread_around_empty_blocks (taken_edge,
950 dummy_cond,
951 handle_dominating_asserts,
952 simplify,
953 visited,
954 &path);
955 BITMAP_FREE (visited);
956 }
957
958 remove_temporary_equivalences (stack);
959 propagate_threaded_block_debug_into (path[path.length () - 1]->dest,
960 e->dest);
961 register_jump_thread (path, false);
962 path.release ();
963 return;
964 }
965 }
966
967 /* We were unable to determine what out edge from E->dest is taken. However,
968 we might still be able to thread through successors of E->dest. This
969 often occurs when E->dest is a joiner block which then fans back out
970 based on redundant tests.
971
972 If so, we'll copy E->dest and redirect the appropriate predecessor to
973 the copy. Within the copy of E->dest, we'll thread one or more edges
974 to points deeper in the CFG.
975
976 This is a stopgap until we have a more structured approach to path
977 isolation. */
978 {
979 edge taken_edge;
980 edge_iterator ei;
981 bool found = false;
982 bitmap visited = BITMAP_ALLOC (NULL);
983
984 /* Look at each successor of E->dest to see if we can thread through it. */
985 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
986 {
987 /* Avoid threading to any block we have already visited. */
988 bitmap_clear (visited);
989 bitmap_set_bit (visited, taken_edge->dest->index);
990 bitmap_set_bit (visited, e->dest->index);
991 vec<edge> path = vNULL;
992
993 /* Record whether or not we were able to thread through a successor
994 of E->dest. */
995 path.safe_push (e);
996 path.safe_push (taken_edge);
997 if ((e->flags & EDGE_DFS_BACK) == 0
998 || ! cond_arg_set_in_bb (path[path.length () - 1], e->dest))
999 found = thread_around_empty_blocks (taken_edge,
1000 dummy_cond,
1001 handle_dominating_asserts,
1002 simplify,
1003 visited,
1004 &path);
1005
1006 /* If we were able to thread through a successor of E->dest, then
1007 record the jump threading opportunity. */
1008 if (found)
1009 {
1010 edge tmp;
1011 /* If there is already an edge from the block to be duplicated
1012 (E2->src) to the final target (E3->dest), then make sure that
1013 the PHI args associated with the edges E2 and E3 are the
1014 same. */
1015 tmp = find_edge (taken_edge->src, path[path.length () - 1]->dest);
1016 if (!tmp || phi_args_equal_on_edges (tmp, path[path.length () - 1]))
1017 {
1018 propagate_threaded_block_debug_into (path[path.length () - 1]->dest,
1019 taken_edge->dest);
1020 register_jump_thread (path, true);
1021 }
1022 }
1023
1024 path.release();
1025 }
1026 BITMAP_FREE (visited);
1027 }
1028
1029 fail:
1030 remove_temporary_equivalences (stack);
1031 }