output.h (__gcc_host_wide_int__): Move to hwint.h.
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jeff Law <law@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "function.h"
32 #include "timevar.h"
33 #include "tree-dump.h"
34 #include "tree-flow.h"
35 #include "tree-pass.h"
36 #include "tree-ssa-propagate.h"
37 #include "langhooks.h"
38 #include "params.h"
39
40 /* To avoid code explosion due to jump threading, we limit the
41 number of statements we are going to copy. This variable
42 holds the number of statements currently seen that we'll have
43 to copy as part of the jump threading process. */
44 static int stmt_count;
45
46 /* Array to record value-handles per SSA_NAME. */
47 VEC(tree,heap) *ssa_name_values;
48
49 /* Set the value for the SSA name NAME to VALUE. */
50
51 void
52 set_ssa_name_value (tree name, tree value)
53 {
54 if (SSA_NAME_VERSION (name) >= VEC_length (tree, ssa_name_values))
55 VEC_safe_grow_cleared (tree, heap, ssa_name_values,
56 SSA_NAME_VERSION (name) + 1);
57 VEC_replace (tree, ssa_name_values, SSA_NAME_VERSION (name), value);
58 }
59
60 /* Initialize the per SSA_NAME value-handles array. Returns it. */
61 void
62 threadedge_initialize_values (void)
63 {
64 gcc_assert (ssa_name_values == NULL);
65 ssa_name_values = VEC_alloc(tree, heap, num_ssa_names);
66 }
67
68 /* Free the per SSA_NAME value-handle array. */
69 void
70 threadedge_finalize_values (void)
71 {
72 VEC_free(tree, heap, ssa_name_values);
73 }
74
75 /* Return TRUE if we may be able to thread an incoming edge into
76 BB to an outgoing edge from BB. Return FALSE otherwise. */
77
78 bool
79 potentially_threadable_block (basic_block bb)
80 {
81 gimple_stmt_iterator gsi;
82
83 /* If BB has a single successor or a single predecessor, then
84 there is no threading opportunity. */
85 if (single_succ_p (bb) || single_pred_p (bb))
86 return false;
87
88 /* If BB does not end with a conditional, switch or computed goto,
89 then there is no threading opportunity. */
90 gsi = gsi_last_bb (bb);
91 if (gsi_end_p (gsi)
92 || ! gsi_stmt (gsi)
93 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
94 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
95 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
96 return false;
97
98 return true;
99 }
100
101 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
102 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
103 BB. If no such ASSERT_EXPR is found, return OP. */
104
105 static tree
106 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
107 {
108 imm_use_iterator imm_iter;
109 gimple use_stmt;
110 use_operand_p use_p;
111
112 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
113 {
114 use_stmt = USE_STMT (use_p);
115 if (use_stmt != stmt
116 && gimple_assign_single_p (use_stmt)
117 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
118 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
119 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
120 {
121 return gimple_assign_lhs (use_stmt);
122 }
123 }
124 return op;
125 }
126
127 /* We record temporary equivalences created by PHI nodes or
128 statements within the target block. Doing so allows us to
129 identify more jump threading opportunities, even in blocks
130 with side effects.
131
132 We keep track of those temporary equivalences in a stack
133 structure so that we can unwind them when we're done processing
134 a particular edge. This routine handles unwinding the data
135 structures. */
136
137 static void
138 remove_temporary_equivalences (VEC(tree, heap) **stack)
139 {
140 while (VEC_length (tree, *stack) > 0)
141 {
142 tree prev_value, dest;
143
144 dest = VEC_pop (tree, *stack);
145
146 /* A NULL value indicates we should stop unwinding, otherwise
147 pop off the next entry as they're recorded in pairs. */
148 if (dest == NULL)
149 break;
150
151 prev_value = VEC_pop (tree, *stack);
152 set_ssa_name_value (dest, prev_value);
153 }
154 }
155
156 /* Record a temporary equivalence, saving enough information so that
157 we can restore the state of recorded equivalences when we're
158 done processing the current edge. */
159
160 static void
161 record_temporary_equivalence (tree x, tree y, VEC(tree, heap) **stack)
162 {
163 tree prev_x = SSA_NAME_VALUE (x);
164
165 if (TREE_CODE (y) == SSA_NAME)
166 {
167 tree tmp = SSA_NAME_VALUE (y);
168 y = tmp ? tmp : y;
169 }
170
171 set_ssa_name_value (x, y);
172 VEC_reserve (tree, heap, *stack, 2);
173 VEC_quick_push (tree, *stack, prev_x);
174 VEC_quick_push (tree, *stack, x);
175 }
176
177 /* Record temporary equivalences created by PHIs at the target of the
178 edge E. Record unwind information for the equivalences onto STACK.
179
180 If a PHI which prevents threading is encountered, then return FALSE
181 indicating we should not thread this edge, else return TRUE. */
182
183 static bool
184 record_temporary_equivalences_from_phis (edge e, VEC(tree, heap) **stack)
185 {
186 gimple_stmt_iterator gsi;
187
188 /* Each PHI creates a temporary equivalence, record them.
189 These are context sensitive equivalences and will be removed
190 later. */
191 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
192 {
193 gimple phi = gsi_stmt (gsi);
194 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
195 tree dst = gimple_phi_result (phi);
196
197 /* If the desired argument is not the same as this PHI's result
198 and it is set by a PHI in E->dest, then we can not thread
199 through E->dest. */
200 if (src != dst
201 && TREE_CODE (src) == SSA_NAME
202 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
203 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
204 return false;
205
206 /* We consider any non-virtual PHI as a statement since it
207 count result in a constant assignment or copy operation. */
208 if (is_gimple_reg (dst))
209 stmt_count++;
210
211 record_temporary_equivalence (dst, src, stack);
212 }
213 return true;
214 }
215
216 /* Fold the RHS of an assignment statement and return it as a tree.
217 May return NULL_TREE if no simplification is possible. */
218
219 static tree
220 fold_assignment_stmt (gimple stmt)
221 {
222 enum tree_code subcode = gimple_assign_rhs_code (stmt);
223
224 switch (get_gimple_rhs_class (subcode))
225 {
226 case GIMPLE_SINGLE_RHS:
227 return fold (gimple_assign_rhs1 (stmt));
228
229 case GIMPLE_UNARY_RHS:
230 {
231 tree lhs = gimple_assign_lhs (stmt);
232 tree op0 = gimple_assign_rhs1 (stmt);
233 return fold_unary (subcode, TREE_TYPE (lhs), op0);
234 }
235
236 case GIMPLE_BINARY_RHS:
237 {
238 tree lhs = gimple_assign_lhs (stmt);
239 tree op0 = gimple_assign_rhs1 (stmt);
240 tree op1 = gimple_assign_rhs2 (stmt);
241 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
242 }
243
244 case GIMPLE_TERNARY_RHS:
245 {
246 tree lhs = gimple_assign_lhs (stmt);
247 tree op0 = gimple_assign_rhs1 (stmt);
248 tree op1 = gimple_assign_rhs2 (stmt);
249 tree op2 = gimple_assign_rhs3 (stmt);
250
251 /* Sadly, we have to handle conditional assignments specially
252 here, because fold expects all the operands of an expression
253 to be folded before the expression itself is folded, but we
254 can't just substitute the folded condition here. */
255 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
256 op0 = fold (op0);
257
258 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
259 }
260
261 default:
262 gcc_unreachable ();
263 }
264 }
265
266 /* Try to simplify each statement in E->dest, ultimately leading to
267 a simplification of the COND_EXPR at the end of E->dest.
268
269 Record unwind information for temporary equivalences onto STACK.
270
271 Use SIMPLIFY (a pointer to a callback function) to further simplify
272 statements using pass specific information.
273
274 We might consider marking just those statements which ultimately
275 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
276 would be recovered by trying to simplify fewer statements.
277
278 If we are able to simplify a statement into the form
279 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
280 a context sensitive equivalence which may help us simplify
281 later statements in E->dest. */
282
283 static gimple
284 record_temporary_equivalences_from_stmts_at_dest (edge e,
285 VEC(tree, heap) **stack,
286 tree (*simplify) (gimple,
287 gimple))
288 {
289 gimple stmt = NULL;
290 gimple_stmt_iterator gsi;
291 int max_stmt_count;
292
293 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
294
295 /* Walk through each statement in the block recording equivalences
296 we discover. Note any equivalences we discover are context
297 sensitive (ie, are dependent on traversing E) and must be unwound
298 when we're finished processing E. */
299 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
300 {
301 tree cached_lhs = NULL;
302
303 stmt = gsi_stmt (gsi);
304
305 /* Ignore empty statements and labels. */
306 if (gimple_code (stmt) == GIMPLE_NOP
307 || gimple_code (stmt) == GIMPLE_LABEL
308 || is_gimple_debug (stmt))
309 continue;
310
311 /* If the statement has volatile operands, then we assume we
312 can not thread through this block. This is overly
313 conservative in some ways. */
314 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
315 return NULL;
316
317 /* If duplicating this block is going to cause too much code
318 expansion, then do not thread through this block. */
319 stmt_count++;
320 if (stmt_count > max_stmt_count)
321 return NULL;
322
323 /* If this is not a statement that sets an SSA_NAME to a new
324 value, then do not try to simplify this statement as it will
325 not simplify in any way that is helpful for jump threading. */
326 if ((gimple_code (stmt) != GIMPLE_ASSIGN
327 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
328 && (gimple_code (stmt) != GIMPLE_CALL
329 || gimple_call_lhs (stmt) == NULL_TREE
330 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
331 continue;
332
333 /* The result of __builtin_object_size depends on all the arguments
334 of a phi node. Temporarily using only one edge produces invalid
335 results. For example
336
337 if (x < 6)
338 goto l;
339 else
340 goto l;
341
342 l:
343 r = PHI <&w[2].a[1](2), &a.a[6](3)>
344 __builtin_object_size (r, 0)
345
346 The result of __builtin_object_size is defined to be the maximum of
347 remaining bytes. If we use only one edge on the phi, the result will
348 change to be the remaining bytes for the corresponding phi argument.
349
350 Similarly for __builtin_constant_p:
351
352 r = PHI <1(2), 2(3)>
353 __builtin_constant_p (r)
354
355 Both PHI arguments are constant, but x ? 1 : 2 is still not
356 constant. */
357
358 if (is_gimple_call (stmt))
359 {
360 tree fndecl = gimple_call_fndecl (stmt);
361 if (fndecl
362 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
363 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
364 continue;
365 }
366
367 /* At this point we have a statement which assigns an RHS to an
368 SSA_VAR on the LHS. We want to try and simplify this statement
369 to expose more context sensitive equivalences which in turn may
370 allow us to simplify the condition at the end of the loop.
371
372 Handle simple copy operations as well as implied copies from
373 ASSERT_EXPRs. */
374 if (gimple_assign_single_p (stmt)
375 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
376 cached_lhs = gimple_assign_rhs1 (stmt);
377 else if (gimple_assign_single_p (stmt)
378 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
379 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
380 else
381 {
382 /* A statement that is not a trivial copy or ASSERT_EXPR.
383 We're going to temporarily copy propagate the operands
384 and see if that allows us to simplify this statement. */
385 tree *copy;
386 ssa_op_iter iter;
387 use_operand_p use_p;
388 unsigned int num, i = 0;
389
390 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
391 copy = XCNEWVEC (tree, num);
392
393 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
394 the operands. */
395 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
396 {
397 tree tmp = NULL;
398 tree use = USE_FROM_PTR (use_p);
399
400 copy[i++] = use;
401 if (TREE_CODE (use) == SSA_NAME)
402 tmp = SSA_NAME_VALUE (use);
403 if (tmp)
404 SET_USE (use_p, tmp);
405 }
406
407 /* Try to fold/lookup the new expression. Inserting the
408 expression into the hash table is unlikely to help. */
409 if (is_gimple_call (stmt))
410 cached_lhs = fold_call_stmt (stmt, false);
411 else
412 cached_lhs = fold_assignment_stmt (stmt);
413
414 if (!cached_lhs
415 || (TREE_CODE (cached_lhs) != SSA_NAME
416 && !is_gimple_min_invariant (cached_lhs)))
417 cached_lhs = (*simplify) (stmt, stmt);
418
419 /* Restore the statement's original uses/defs. */
420 i = 0;
421 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
422 SET_USE (use_p, copy[i++]);
423
424 free (copy);
425 }
426
427 /* Record the context sensitive equivalence if we were able
428 to simplify this statement. */
429 if (cached_lhs
430 && (TREE_CODE (cached_lhs) == SSA_NAME
431 || is_gimple_min_invariant (cached_lhs)))
432 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
433 }
434 return stmt;
435 }
436
437 /* Simplify the control statement at the end of the block E->dest.
438
439 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
440 is available to use/clobber in DUMMY_COND.
441
442 Use SIMPLIFY (a pointer to a callback function) to further simplify
443 a condition using pass specific information.
444
445 Return the simplified condition or NULL if simplification could
446 not be performed. */
447
448 static tree
449 simplify_control_stmt_condition (edge e,
450 gimple stmt,
451 gimple dummy_cond,
452 tree (*simplify) (gimple, gimple),
453 bool handle_dominating_asserts)
454 {
455 tree cond, cached_lhs;
456 enum gimple_code code = gimple_code (stmt);
457
458 /* For comparisons, we have to update both operands, then try
459 to simplify the comparison. */
460 if (code == GIMPLE_COND)
461 {
462 tree op0, op1;
463 enum tree_code cond_code;
464
465 op0 = gimple_cond_lhs (stmt);
466 op1 = gimple_cond_rhs (stmt);
467 cond_code = gimple_cond_code (stmt);
468
469 /* Get the current value of both operands. */
470 if (TREE_CODE (op0) == SSA_NAME)
471 {
472 tree tmp = SSA_NAME_VALUE (op0);
473 if (tmp)
474 op0 = tmp;
475 }
476
477 if (TREE_CODE (op1) == SSA_NAME)
478 {
479 tree tmp = SSA_NAME_VALUE (op1);
480 if (tmp)
481 op1 = tmp;
482 }
483
484 if (handle_dominating_asserts)
485 {
486 /* Now see if the operand was consumed by an ASSERT_EXPR
487 which dominates E->src. If so, we want to replace the
488 operand with the LHS of the ASSERT_EXPR. */
489 if (TREE_CODE (op0) == SSA_NAME)
490 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
491
492 if (TREE_CODE (op1) == SSA_NAME)
493 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
494 }
495
496 /* We may need to canonicalize the comparison. For
497 example, op0 might be a constant while op1 is an
498 SSA_NAME. Failure to canonicalize will cause us to
499 miss threading opportunities. */
500 if (tree_swap_operands_p (op0, op1, false))
501 {
502 tree tmp;
503 cond_code = swap_tree_comparison (cond_code);
504 tmp = op0;
505 op0 = op1;
506 op1 = tmp;
507 }
508
509 /* Stuff the operator and operands into our dummy conditional
510 expression. */
511 gimple_cond_set_code (dummy_cond, cond_code);
512 gimple_cond_set_lhs (dummy_cond, op0);
513 gimple_cond_set_rhs (dummy_cond, op1);
514
515 /* We absolutely do not care about any type conversions
516 we only care about a zero/nonzero value. */
517 fold_defer_overflow_warnings ();
518
519 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
520 if (cached_lhs)
521 while (CONVERT_EXPR_P (cached_lhs))
522 cached_lhs = TREE_OPERAND (cached_lhs, 0);
523
524 fold_undefer_overflow_warnings ((cached_lhs
525 && is_gimple_min_invariant (cached_lhs)),
526 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
527
528 /* If we have not simplified the condition down to an invariant,
529 then use the pass specific callback to simplify the condition. */
530 if (!cached_lhs
531 || !is_gimple_min_invariant (cached_lhs))
532 cached_lhs = (*simplify) (dummy_cond, stmt);
533
534 return cached_lhs;
535 }
536
537 if (code == GIMPLE_SWITCH)
538 cond = gimple_switch_index (stmt);
539 else if (code == GIMPLE_GOTO)
540 cond = gimple_goto_dest (stmt);
541 else
542 gcc_unreachable ();
543
544 /* We can have conditionals which just test the state of a variable
545 rather than use a relational operator. These are simpler to handle. */
546 if (TREE_CODE (cond) == SSA_NAME)
547 {
548 cached_lhs = cond;
549
550 /* Get the variable's current value from the equivalence chains.
551
552 It is possible to get loops in the SSA_NAME_VALUE chains
553 (consider threading the backedge of a loop where we have
554 a loop invariant SSA_NAME used in the condition. */
555 if (cached_lhs
556 && TREE_CODE (cached_lhs) == SSA_NAME
557 && SSA_NAME_VALUE (cached_lhs))
558 cached_lhs = SSA_NAME_VALUE (cached_lhs);
559
560 /* If we're dominated by a suitable ASSERT_EXPR, then
561 update CACHED_LHS appropriately. */
562 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
563 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
564
565 /* If we haven't simplified to an invariant yet, then use the
566 pass specific callback to try and simplify it further. */
567 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
568 cached_lhs = (*simplify) (stmt, stmt);
569 }
570 else
571 cached_lhs = NULL;
572
573 return cached_lhs;
574 }
575
576 /* TAKEN_EDGE represents the an edge taken as a result of jump threading.
577 See if we can thread around TAKEN_EDGE->dest as well. If so, return
578 the edge out of TAKEN_EDGE->dest that we can statically compute will be
579 traversed.
580
581 We are much more restrictive as to the contents of TAKEN_EDGE->dest
582 as the path isolation code in tree-ssa-threadupdate.c isn't prepared
583 to handle copying intermediate blocks on a threaded path.
584
585 Long term a more consistent and structured approach to path isolation
586 would be a huge help. */
587 static edge
588 thread_around_empty_block (edge taken_edge,
589 gimple dummy_cond,
590 bool handle_dominating_asserts,
591 tree (*simplify) (gimple, gimple),
592 bitmap visited)
593 {
594 basic_block bb = taken_edge->dest;
595 gimple_stmt_iterator gsi;
596 gimple stmt;
597 tree cond;
598
599 /* This block must have a single predecessor (E->dest). */
600 if (!single_pred_p (bb))
601 return NULL;
602
603 /* This block must have more than one successor. */
604 if (single_succ_p (bb))
605 return NULL;
606
607 /* This block can have no PHI nodes. This is overly conservative. */
608 if (!gsi_end_p (gsi_start_phis (bb)))
609 return NULL;
610
611 /* Skip over DEBUG statements at the start of the block. */
612 gsi = gsi_start_nondebug_bb (bb);
613
614 if (gsi_end_p (gsi))
615 return NULL;
616
617 /* This block can have no statements other than its control altering
618 statement. This is overly conservative. */
619 stmt = gsi_stmt (gsi);
620 if (gimple_code (stmt) != GIMPLE_COND
621 && gimple_code (stmt) != GIMPLE_GOTO
622 && gimple_code (stmt) != GIMPLE_SWITCH)
623 return NULL;
624
625 /* Extract and simplify the condition. */
626 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
627 simplify, handle_dominating_asserts);
628
629 /* If the condition can be statically computed and we have not already
630 visited the destination edge, then add the taken edge to our thread
631 path. */
632 if (cond && is_gimple_min_invariant (cond))
633 {
634 edge taken_edge = find_taken_edge (bb, cond);
635
636 if (bitmap_bit_p (visited, taken_edge->dest->index))
637 return NULL;
638 bitmap_set_bit (visited, taken_edge->dest->index);
639 return taken_edge;
640 }
641
642 return NULL;
643 }
644
645 /* E1 and E2 are edges into the same basic block. Return TRUE if the
646 PHI arguments associated with those edges are equal or there are no
647 PHI arguments, otherwise return FALSE. */
648
649 static bool
650 phi_args_equal_on_edges (edge e1, edge e2)
651 {
652 gimple_stmt_iterator gsi;
653 int indx1 = e1->dest_idx;
654 int indx2 = e2->dest_idx;
655
656 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
657 {
658 gimple phi = gsi_stmt (gsi);
659
660 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
661 gimple_phi_arg_def (phi, indx2), 0))
662 return false;
663 }
664 return true;
665 }
666
667 /* We are exiting E->src, see if E->dest ends with a conditional
668 jump which has a known value when reached via E.
669
670 Special care is necessary if E is a back edge in the CFG as we
671 may have already recorded equivalences for E->dest into our
672 various tables, including the result of the conditional at
673 the end of E->dest. Threading opportunities are severely
674 limited in that case to avoid short-circuiting the loop
675 incorrectly.
676
677 Note it is quite common for the first block inside a loop to
678 end with a conditional which is either always true or always
679 false when reached via the loop backedge. Thus we do not want
680 to blindly disable threading across a loop backedge.
681
682 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
683 to avoid allocating memory.
684
685 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
686 the simplified condition with left-hand sides of ASSERT_EXPRs they are
687 used in.
688
689 STACK is used to undo temporary equivalences created during the walk of
690 E->dest.
691
692 SIMPLIFY is a pass-specific function used to simplify statements. */
693
694 void
695 thread_across_edge (gimple dummy_cond,
696 edge e,
697 bool handle_dominating_asserts,
698 VEC(tree, heap) **stack,
699 tree (*simplify) (gimple, gimple))
700 {
701 gimple stmt;
702
703 /* If E is a backedge, then we want to verify that the COND_EXPR,
704 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
705 by any statements in e->dest. If it is affected, then it is not
706 safe to thread this edge. */
707 if (e->flags & EDGE_DFS_BACK)
708 {
709 ssa_op_iter iter;
710 use_operand_p use_p;
711 gimple last = gsi_stmt (gsi_last_bb (e->dest));
712
713 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
714 {
715 tree use = USE_FROM_PTR (use_p);
716
717 if (TREE_CODE (use) == SSA_NAME
718 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
719 && gimple_bb (SSA_NAME_DEF_STMT (use)) == e->dest)
720 goto fail;
721 }
722 }
723
724 stmt_count = 0;
725
726 /* PHIs create temporary equivalences. */
727 if (!record_temporary_equivalences_from_phis (e, stack))
728 goto fail;
729
730 /* Now walk each statement recording any context sensitive
731 temporary equivalences we can detect. */
732 stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
733 if (!stmt)
734 goto fail;
735
736 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
737 will be taken. */
738 if (gimple_code (stmt) == GIMPLE_COND
739 || gimple_code (stmt) == GIMPLE_GOTO
740 || gimple_code (stmt) == GIMPLE_SWITCH)
741 {
742 tree cond;
743
744 /* Extract and simplify the condition. */
745 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
746 handle_dominating_asserts);
747
748 if (cond && is_gimple_min_invariant (cond))
749 {
750 edge taken_edge = find_taken_edge (e->dest, cond);
751 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
752 bitmap visited;
753 edge e2;
754
755 if (dest == e->dest)
756 goto fail;
757
758 /* DEST could be null for a computed jump to an absolute
759 address. If DEST is not null, then see if we can thread
760 through it as well, this helps capture secondary effects
761 of threading without having to re-run DOM or VRP. */
762 if (dest)
763 {
764 /* We don't want to thread back to a block we have already
765 visited. This may be overly conservative. */
766 visited = BITMAP_ALLOC (NULL);
767 bitmap_set_bit (visited, dest->index);
768 bitmap_set_bit (visited, e->dest->index);
769 do
770 {
771 e2 = thread_around_empty_block (taken_edge,
772 dummy_cond,
773 handle_dominating_asserts,
774 simplify,
775 visited);
776 if (e2)
777 taken_edge = e2;
778 }
779 while (e2);
780 BITMAP_FREE (visited);
781 }
782
783 remove_temporary_equivalences (stack);
784 register_jump_thread (e, taken_edge, NULL);
785 return;
786 }
787 }
788
789 /* We were unable to determine what out edge from E->dest is taken. However,
790 we might still be able to thread through successors of E->dest. This
791 often occurs when E->dest is a joiner block which then fans back out
792 based on redundant tests.
793
794 If so, we'll copy E->dest and redirect the appropriate predecessor to
795 the copy. Within the copy of E->dest, we'll thread one or more edges
796 to points deeper in the CFG.
797
798 This is a stopgap until we have a more structured approach to path
799 isolation. */
800 {
801 edge e2, e3, taken_edge;
802 edge_iterator ei;
803 bool found = false;
804 bitmap visited = BITMAP_ALLOC (NULL);
805
806 /* Look at each successor of E->dest to see if we can thread through it. */
807 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
808 {
809 /* Avoid threading to any block we have already visited. */
810 bitmap_clear (visited);
811 bitmap_set_bit (visited, taken_edge->dest->index);
812 bitmap_set_bit (visited, e->dest->index);
813
814 /* Record whether or not we were able to thread through a successor
815 of E->dest. */
816 found = false;
817 e3 = taken_edge;
818 do
819 {
820 e2 = thread_around_empty_block (e3,
821 dummy_cond,
822 handle_dominating_asserts,
823 simplify,
824 visited);
825 if (e2)
826 {
827 e3 = e2;
828 found = true;
829 }
830 }
831 while (e2);
832
833 /* If we were able to thread through a successor of E->dest, then
834 record the jump threading opportunity. */
835 if (found)
836 {
837 edge tmp;
838 /* If there is already an edge from the block to be duplicated
839 (E2->src) to the final target (E3->dest), then make sure that
840 the PHI args associated with the edges E2 and E3 are the
841 same. */
842 tmp = find_edge (taken_edge->src, e3->dest);
843 if (!tmp || phi_args_equal_on_edges (tmp, e3))
844 register_jump_thread (e, taken_edge, e3);
845 }
846
847 }
848 BITMAP_FREE (visited);
849 }
850
851 fail:
852 remove_temporary_equivalences (stack);
853 }