einfo.ads (Has_Volatile_Full_Access): Rename into...
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hash-set.h"
26 #include "machmode.h"
27 #include "vec.h"
28 #include "double-int.h"
29 #include "input.h"
30 #include "alias.h"
31 #include "symtab.h"
32 #include "wide-int.h"
33 #include "inchash.h"
34 #include "tree.h"
35 #include "fold-const.h"
36 #include "flags.h"
37 #include "tm_p.h"
38 #include "predict.h"
39 #include "hard-reg-set.h"
40 #include "input.h"
41 #include "function.h"
42 #include "dominance.h"
43 #include "basic-block.h"
44 #include "cfgloop.h"
45 #include "timevar.h"
46 #include "dumpfile.h"
47 #include "tree-ssa-alias.h"
48 #include "internal-fn.h"
49 #include "gimple-expr.h"
50 #include "is-a.h"
51 #include "gimple.h"
52 #include "gimple-iterator.h"
53 #include "gimple-ssa.h"
54 #include "tree-cfg.h"
55 #include "tree-phinodes.h"
56 #include "ssa-iterators.h"
57 #include "stringpool.h"
58 #include "tree-ssanames.h"
59 #include "tree-ssa-propagate.h"
60 #include "tree-ssa-threadupdate.h"
61 #include "langhooks.h"
62 #include "params.h"
63 #include "tree-ssa-scopedtables.h"
64 #include "tree-ssa-threadedge.h"
65 #include "tree-ssa-loop.h"
66 #include "builtins.h"
67 #include "cfg.h"
68 #include "cfganal.h"
69
70 /* To avoid code explosion due to jump threading, we limit the
71 number of statements we are going to copy. This variable
72 holds the number of statements currently seen that we'll have
73 to copy as part of the jump threading process. */
74 static int stmt_count;
75
76 /* Array to record value-handles per SSA_NAME. */
77 vec<tree> ssa_name_values;
78
79 /* Set the value for the SSA name NAME to VALUE. */
80
81 void
82 set_ssa_name_value (tree name, tree value)
83 {
84 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
85 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
86 if (value && TREE_OVERFLOW_P (value))
87 value = drop_tree_overflow (value);
88 ssa_name_values[SSA_NAME_VERSION (name)] = value;
89 }
90
91 /* Initialize the per SSA_NAME value-handles array. Returns it. */
92 void
93 threadedge_initialize_values (void)
94 {
95 gcc_assert (!ssa_name_values.exists ());
96 ssa_name_values.create (num_ssa_names);
97 }
98
99 /* Free the per SSA_NAME value-handle array. */
100 void
101 threadedge_finalize_values (void)
102 {
103 ssa_name_values.release ();
104 }
105
106 /* Return TRUE if we may be able to thread an incoming edge into
107 BB to an outgoing edge from BB. Return FALSE otherwise. */
108
109 bool
110 potentially_threadable_block (basic_block bb)
111 {
112 gimple_stmt_iterator gsi;
113
114 /* Special case. We can get blocks that are forwarders, but are
115 not optimized away because they forward from outside a loop
116 to the loop header. We want to thread through them as we can
117 sometimes thread to the loop exit, which is obviously profitable.
118 the interesting case here is when the block has PHIs. */
119 if (gsi_end_p (gsi_start_nondebug_bb (bb))
120 && !gsi_end_p (gsi_start_phis (bb)))
121 return true;
122
123 /* If BB has a single successor or a single predecessor, then
124 there is no threading opportunity. */
125 if (single_succ_p (bb) || single_pred_p (bb))
126 return false;
127
128 /* If BB does not end with a conditional, switch or computed goto,
129 then there is no threading opportunity. */
130 gsi = gsi_last_bb (bb);
131 if (gsi_end_p (gsi)
132 || ! gsi_stmt (gsi)
133 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
134 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
135 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
136 return false;
137
138 return true;
139 }
140
141 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
142 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
143 BB. If no such ASSERT_EXPR is found, return OP. */
144
145 static tree
146 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
147 {
148 imm_use_iterator imm_iter;
149 gimple use_stmt;
150 use_operand_p use_p;
151
152 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
153 {
154 use_stmt = USE_STMT (use_p);
155 if (use_stmt != stmt
156 && gimple_assign_single_p (use_stmt)
157 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
158 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
159 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
160 {
161 return gimple_assign_lhs (use_stmt);
162 }
163 }
164 return op;
165 }
166
167 /* Record temporary equivalences created by PHIs at the target of the
168 edge E. Record unwind information for the equivalences onto STACK.
169
170 If a PHI which prevents threading is encountered, then return FALSE
171 indicating we should not thread this edge, else return TRUE.
172
173 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
174 of any equivalences recorded. We use this to make invalidation after
175 traversing back edges less painful. */
176
177 static bool
178 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
179 {
180 gphi_iterator gsi;
181
182 /* Each PHI creates a temporary equivalence, record them.
183 These are context sensitive equivalences and will be removed
184 later. */
185 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
186 {
187 gphi *phi = gsi.phi ();
188 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
189 tree dst = gimple_phi_result (phi);
190
191 /* If the desired argument is not the same as this PHI's result
192 and it is set by a PHI in E->dest, then we can not thread
193 through E->dest. */
194 if (src != dst
195 && TREE_CODE (src) == SSA_NAME
196 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
197 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
198 return false;
199
200 /* We consider any non-virtual PHI as a statement since it
201 count result in a constant assignment or copy operation. */
202 if (!virtual_operand_p (dst))
203 stmt_count++;
204
205 const_and_copies->record_const_or_copy (dst, src);
206 }
207 return true;
208 }
209
210 /* Fold the RHS of an assignment statement and return it as a tree.
211 May return NULL_TREE if no simplification is possible. */
212
213 static tree
214 fold_assignment_stmt (gimple stmt)
215 {
216 enum tree_code subcode = gimple_assign_rhs_code (stmt);
217
218 switch (get_gimple_rhs_class (subcode))
219 {
220 case GIMPLE_SINGLE_RHS:
221 return fold (gimple_assign_rhs1 (stmt));
222
223 case GIMPLE_UNARY_RHS:
224 {
225 tree lhs = gimple_assign_lhs (stmt);
226 tree op0 = gimple_assign_rhs1 (stmt);
227 return fold_unary (subcode, TREE_TYPE (lhs), op0);
228 }
229
230 case GIMPLE_BINARY_RHS:
231 {
232 tree lhs = gimple_assign_lhs (stmt);
233 tree op0 = gimple_assign_rhs1 (stmt);
234 tree op1 = gimple_assign_rhs2 (stmt);
235 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
236 }
237
238 case GIMPLE_TERNARY_RHS:
239 {
240 tree lhs = gimple_assign_lhs (stmt);
241 tree op0 = gimple_assign_rhs1 (stmt);
242 tree op1 = gimple_assign_rhs2 (stmt);
243 tree op2 = gimple_assign_rhs3 (stmt);
244
245 /* Sadly, we have to handle conditional assignments specially
246 here, because fold expects all the operands of an expression
247 to be folded before the expression itself is folded, but we
248 can't just substitute the folded condition here. */
249 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
250 op0 = fold (op0);
251
252 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
253 }
254
255 default:
256 gcc_unreachable ();
257 }
258 }
259
260 /* Try to simplify each statement in E->dest, ultimately leading to
261 a simplification of the COND_EXPR at the end of E->dest.
262
263 Record unwind information for temporary equivalences onto STACK.
264
265 Use SIMPLIFY (a pointer to a callback function) to further simplify
266 statements using pass specific information.
267
268 We might consider marking just those statements which ultimately
269 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
270 would be recovered by trying to simplify fewer statements.
271
272 If we are able to simplify a statement into the form
273 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
274 a context sensitive equivalence which may help us simplify
275 later statements in E->dest. */
276
277 static gimple
278 record_temporary_equivalences_from_stmts_at_dest (edge e,
279 const_and_copies *const_and_copies,
280 tree (*simplify) (gimple,
281 gimple),
282 bool backedge_seen)
283 {
284 gimple stmt = NULL;
285 gimple_stmt_iterator gsi;
286 int max_stmt_count;
287
288 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
289
290 /* Walk through each statement in the block recording equivalences
291 we discover. Note any equivalences we discover are context
292 sensitive (ie, are dependent on traversing E) and must be unwound
293 when we're finished processing E. */
294 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
295 {
296 tree cached_lhs = NULL;
297
298 stmt = gsi_stmt (gsi);
299
300 /* Ignore empty statements and labels. */
301 if (gimple_code (stmt) == GIMPLE_NOP
302 || gimple_code (stmt) == GIMPLE_LABEL
303 || is_gimple_debug (stmt))
304 continue;
305
306 /* If the statement has volatile operands, then we assume we
307 can not thread through this block. This is overly
308 conservative in some ways. */
309 if (gimple_code (stmt) == GIMPLE_ASM
310 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
311 return NULL;
312
313 /* If duplicating this block is going to cause too much code
314 expansion, then do not thread through this block. */
315 stmt_count++;
316 if (stmt_count > max_stmt_count)
317 return NULL;
318
319 /* If this is not a statement that sets an SSA_NAME to a new
320 value, then do not try to simplify this statement as it will
321 not simplify in any way that is helpful for jump threading. */
322 if ((gimple_code (stmt) != GIMPLE_ASSIGN
323 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
324 && (gimple_code (stmt) != GIMPLE_CALL
325 || gimple_call_lhs (stmt) == NULL_TREE
326 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
327 {
328 /* STMT might still have DEFS and we need to invalidate any known
329 equivalences for them.
330
331 Consider if STMT is a GIMPLE_ASM with one or more outputs that
332 feeds a conditional inside a loop. We might derive an equivalence
333 due to the conditional. */
334 tree op;
335 ssa_op_iter iter;
336
337 if (backedge_seen)
338 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
339 const_and_copies->invalidate (op);
340
341 continue;
342 }
343
344 /* The result of __builtin_object_size depends on all the arguments
345 of a phi node. Temporarily using only one edge produces invalid
346 results. For example
347
348 if (x < 6)
349 goto l;
350 else
351 goto l;
352
353 l:
354 r = PHI <&w[2].a[1](2), &a.a[6](3)>
355 __builtin_object_size (r, 0)
356
357 The result of __builtin_object_size is defined to be the maximum of
358 remaining bytes. If we use only one edge on the phi, the result will
359 change to be the remaining bytes for the corresponding phi argument.
360
361 Similarly for __builtin_constant_p:
362
363 r = PHI <1(2), 2(3)>
364 __builtin_constant_p (r)
365
366 Both PHI arguments are constant, but x ? 1 : 2 is still not
367 constant. */
368
369 if (is_gimple_call (stmt))
370 {
371 tree fndecl = gimple_call_fndecl (stmt);
372 if (fndecl
373 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
374 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
375 {
376 if (backedge_seen)
377 {
378 tree lhs = gimple_get_lhs (stmt);
379 const_and_copies->invalidate (lhs);
380 }
381 continue;
382 }
383 }
384
385 /* At this point we have a statement which assigns an RHS to an
386 SSA_VAR on the LHS. We want to try and simplify this statement
387 to expose more context sensitive equivalences which in turn may
388 allow us to simplify the condition at the end of the loop.
389
390 Handle simple copy operations as well as implied copies from
391 ASSERT_EXPRs. */
392 if (gimple_assign_single_p (stmt)
393 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
394 cached_lhs = gimple_assign_rhs1 (stmt);
395 else if (gimple_assign_single_p (stmt)
396 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
397 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
398 else
399 {
400 /* A statement that is not a trivial copy or ASSERT_EXPR.
401 We're going to temporarily copy propagate the operands
402 and see if that allows us to simplify this statement. */
403 tree *copy;
404 ssa_op_iter iter;
405 use_operand_p use_p;
406 unsigned int num, i = 0;
407
408 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
409 copy = XCNEWVEC (tree, num);
410
411 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
412 the operands. */
413 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
414 {
415 tree tmp = NULL;
416 tree use = USE_FROM_PTR (use_p);
417
418 copy[i++] = use;
419 if (TREE_CODE (use) == SSA_NAME)
420 tmp = SSA_NAME_VALUE (use);
421 if (tmp)
422 SET_USE (use_p, tmp);
423 }
424
425 /* Try to fold/lookup the new expression. Inserting the
426 expression into the hash table is unlikely to help. */
427 if (is_gimple_call (stmt))
428 cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false);
429 else
430 cached_lhs = fold_assignment_stmt (stmt);
431
432 if (!cached_lhs
433 || (TREE_CODE (cached_lhs) != SSA_NAME
434 && !is_gimple_min_invariant (cached_lhs)))
435 cached_lhs = (*simplify) (stmt, stmt);
436
437 /* Restore the statement's original uses/defs. */
438 i = 0;
439 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
440 SET_USE (use_p, copy[i++]);
441
442 free (copy);
443 }
444
445 /* Record the context sensitive equivalence if we were able
446 to simplify this statement.
447
448 If we have traversed a backedge at some point during threading,
449 then always enter something here. Either a real equivalence,
450 or a NULL_TREE equivalence which is effectively invalidation of
451 prior equivalences. */
452 if (cached_lhs
453 && (TREE_CODE (cached_lhs) == SSA_NAME
454 || is_gimple_min_invariant (cached_lhs)))
455 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt), cached_lhs);
456 else if (backedge_seen)
457 const_and_copies->invalidate (gimple_get_lhs (stmt));
458 }
459 return stmt;
460 }
461
462 /* Once we have passed a backedge in the CFG when threading, we do not want to
463 utilize edge equivalences for simplification purpose. They are no longer
464 necessarily valid. We use this callback rather than the ones provided by
465 DOM/VRP to achieve that effect. */
466 static tree
467 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED, gimple stmt2 ATTRIBUTE_UNUSED)
468 {
469 return NULL_TREE;
470 }
471
472 /* Simplify the control statement at the end of the block E->dest.
473
474 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
475 is available to use/clobber in DUMMY_COND.
476
477 Use SIMPLIFY (a pointer to a callback function) to further simplify
478 a condition using pass specific information.
479
480 Return the simplified condition or NULL if simplification could
481 not be performed. */
482
483 static tree
484 simplify_control_stmt_condition (edge e,
485 gimple stmt,
486 gcond *dummy_cond,
487 tree (*simplify) (gimple, gimple),
488 bool handle_dominating_asserts)
489 {
490 tree cond, cached_lhs;
491 enum gimple_code code = gimple_code (stmt);
492
493 /* For comparisons, we have to update both operands, then try
494 to simplify the comparison. */
495 if (code == GIMPLE_COND)
496 {
497 tree op0, op1;
498 enum tree_code cond_code;
499
500 op0 = gimple_cond_lhs (stmt);
501 op1 = gimple_cond_rhs (stmt);
502 cond_code = gimple_cond_code (stmt);
503
504 /* Get the current value of both operands. */
505 if (TREE_CODE (op0) == SSA_NAME)
506 {
507 for (int i = 0; i < 2; i++)
508 {
509 if (TREE_CODE (op0) == SSA_NAME
510 && SSA_NAME_VALUE (op0))
511 op0 = SSA_NAME_VALUE (op0);
512 else
513 break;
514 }
515 }
516
517 if (TREE_CODE (op1) == SSA_NAME)
518 {
519 for (int i = 0; i < 2; i++)
520 {
521 if (TREE_CODE (op1) == SSA_NAME
522 && SSA_NAME_VALUE (op1))
523 op1 = SSA_NAME_VALUE (op1);
524 else
525 break;
526 }
527 }
528
529 if (handle_dominating_asserts)
530 {
531 /* Now see if the operand was consumed by an ASSERT_EXPR
532 which dominates E->src. If so, we want to replace the
533 operand with the LHS of the ASSERT_EXPR. */
534 if (TREE_CODE (op0) == SSA_NAME)
535 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
536
537 if (TREE_CODE (op1) == SSA_NAME)
538 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
539 }
540
541 /* We may need to canonicalize the comparison. For
542 example, op0 might be a constant while op1 is an
543 SSA_NAME. Failure to canonicalize will cause us to
544 miss threading opportunities. */
545 if (tree_swap_operands_p (op0, op1, false))
546 {
547 cond_code = swap_tree_comparison (cond_code);
548 std::swap (op0, op1);
549 }
550
551 /* Stuff the operator and operands into our dummy conditional
552 expression. */
553 gimple_cond_set_code (dummy_cond, cond_code);
554 gimple_cond_set_lhs (dummy_cond, op0);
555 gimple_cond_set_rhs (dummy_cond, op1);
556
557 /* We absolutely do not care about any type conversions
558 we only care about a zero/nonzero value. */
559 fold_defer_overflow_warnings ();
560
561 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
562 if (cached_lhs)
563 while (CONVERT_EXPR_P (cached_lhs))
564 cached_lhs = TREE_OPERAND (cached_lhs, 0);
565
566 fold_undefer_overflow_warnings ((cached_lhs
567 && is_gimple_min_invariant (cached_lhs)),
568 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
569
570 /* If we have not simplified the condition down to an invariant,
571 then use the pass specific callback to simplify the condition. */
572 if (!cached_lhs
573 || !is_gimple_min_invariant (cached_lhs))
574 cached_lhs = (*simplify) (dummy_cond, stmt);
575
576 return cached_lhs;
577 }
578
579 if (code == GIMPLE_SWITCH)
580 cond = gimple_switch_index (as_a <gswitch *> (stmt));
581 else if (code == GIMPLE_GOTO)
582 cond = gimple_goto_dest (stmt);
583 else
584 gcc_unreachable ();
585
586 /* We can have conditionals which just test the state of a variable
587 rather than use a relational operator. These are simpler to handle. */
588 if (TREE_CODE (cond) == SSA_NAME)
589 {
590 tree original_lhs = cond;
591 cached_lhs = cond;
592
593 /* Get the variable's current value from the equivalence chains.
594
595 It is possible to get loops in the SSA_NAME_VALUE chains
596 (consider threading the backedge of a loop where we have
597 a loop invariant SSA_NAME used in the condition. */
598 if (cached_lhs)
599 {
600 for (int i = 0; i < 2; i++)
601 {
602 if (TREE_CODE (cached_lhs) == SSA_NAME
603 && SSA_NAME_VALUE (cached_lhs))
604 cached_lhs = SSA_NAME_VALUE (cached_lhs);
605 else
606 break;
607 }
608 }
609
610 /* If we're dominated by a suitable ASSERT_EXPR, then
611 update CACHED_LHS appropriately. */
612 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
613 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
614
615 /* If we haven't simplified to an invariant yet, then use the
616 pass specific callback to try and simplify it further. */
617 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
618 cached_lhs = (*simplify) (stmt, stmt);
619
620 /* We couldn't find an invariant. But, callers of this
621 function may be able to do something useful with the
622 unmodified destination. */
623 if (!cached_lhs)
624 cached_lhs = original_lhs;
625 }
626 else
627 cached_lhs = NULL;
628
629 return cached_lhs;
630 }
631
632 /* Copy debug stmts from DEST's chain of single predecessors up to
633 SRC, so that we don't lose the bindings as PHI nodes are introduced
634 when DEST gains new predecessors. */
635 void
636 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
637 {
638 if (!MAY_HAVE_DEBUG_STMTS)
639 return;
640
641 if (!single_pred_p (dest))
642 return;
643
644 gcc_checking_assert (dest != src);
645
646 gimple_stmt_iterator gsi = gsi_after_labels (dest);
647 int i = 0;
648 const int alloc_count = 16; // ?? Should this be a PARAM?
649
650 /* Estimate the number of debug vars overridden in the beginning of
651 DEST, to tell how many we're going to need to begin with. */
652 for (gimple_stmt_iterator si = gsi;
653 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
654 {
655 gimple stmt = gsi_stmt (si);
656 if (!is_gimple_debug (stmt))
657 break;
658 i++;
659 }
660
661 auto_vec<tree, alloc_count> fewvars;
662 hash_set<tree> *vars = NULL;
663
664 /* If we're already starting with 3/4 of alloc_count, go for a
665 hash_set, otherwise start with an unordered stack-allocated
666 VEC. */
667 if (i * 4 > alloc_count * 3)
668 vars = new hash_set<tree>;
669
670 /* Now go through the initial debug stmts in DEST again, this time
671 actually inserting in VARS or FEWVARS. Don't bother checking for
672 duplicates in FEWVARS. */
673 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
674 {
675 gimple stmt = gsi_stmt (si);
676 if (!is_gimple_debug (stmt))
677 break;
678
679 tree var;
680
681 if (gimple_debug_bind_p (stmt))
682 var = gimple_debug_bind_get_var (stmt);
683 else if (gimple_debug_source_bind_p (stmt))
684 var = gimple_debug_source_bind_get_var (stmt);
685 else
686 gcc_unreachable ();
687
688 if (vars)
689 vars->add (var);
690 else
691 fewvars.quick_push (var);
692 }
693
694 basic_block bb = dest;
695
696 do
697 {
698 bb = single_pred (bb);
699 for (gimple_stmt_iterator si = gsi_last_bb (bb);
700 !gsi_end_p (si); gsi_prev (&si))
701 {
702 gimple stmt = gsi_stmt (si);
703 if (!is_gimple_debug (stmt))
704 continue;
705
706 tree var;
707
708 if (gimple_debug_bind_p (stmt))
709 var = gimple_debug_bind_get_var (stmt);
710 else if (gimple_debug_source_bind_p (stmt))
711 var = gimple_debug_source_bind_get_var (stmt);
712 else
713 gcc_unreachable ();
714
715 /* Discard debug bind overlaps. ??? Unlike stmts from src,
716 copied into a new block that will precede BB, debug bind
717 stmts in bypassed BBs may actually be discarded if
718 they're overwritten by subsequent debug bind stmts, which
719 might be a problem once we introduce stmt frontier notes
720 or somesuch. Adding `&& bb == src' to the condition
721 below will preserve all potentially relevant debug
722 notes. */
723 if (vars && vars->add (var))
724 continue;
725 else if (!vars)
726 {
727 int i = fewvars.length ();
728 while (i--)
729 if (fewvars[i] == var)
730 break;
731 if (i >= 0)
732 continue;
733
734 if (fewvars.length () < (unsigned) alloc_count)
735 fewvars.quick_push (var);
736 else
737 {
738 vars = new hash_set<tree>;
739 for (i = 0; i < alloc_count; i++)
740 vars->add (fewvars[i]);
741 fewvars.release ();
742 vars->add (var);
743 }
744 }
745
746 stmt = gimple_copy (stmt);
747 /* ??? Should we drop the location of the copy to denote
748 they're artificial bindings? */
749 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
750 }
751 }
752 while (bb != src && single_pred_p (bb));
753
754 if (vars)
755 delete vars;
756 else if (fewvars.exists ())
757 fewvars.release ();
758 }
759
760 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
761 need not be duplicated as part of the CFG/SSA updating process).
762
763 If it is threadable, add it to PATH and VISITED and recurse, ultimately
764 returning TRUE from the toplevel call. Otherwise do nothing and
765 return false.
766
767 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
768 try and simplify the condition at the end of TAKEN_EDGE->dest. */
769 static bool
770 thread_around_empty_blocks (edge taken_edge,
771 gcond *dummy_cond,
772 bool handle_dominating_asserts,
773 tree (*simplify) (gimple, gimple),
774 bitmap visited,
775 vec<jump_thread_edge *> *path,
776 bool *backedge_seen_p)
777 {
778 basic_block bb = taken_edge->dest;
779 gimple_stmt_iterator gsi;
780 gimple stmt;
781 tree cond;
782
783 /* The key property of these blocks is that they need not be duplicated
784 when threading. Thus they can not have visible side effects such
785 as PHI nodes. */
786 if (!gsi_end_p (gsi_start_phis (bb)))
787 return false;
788
789 /* Skip over DEBUG statements at the start of the block. */
790 gsi = gsi_start_nondebug_bb (bb);
791
792 /* If the block has no statements, but does have a single successor, then
793 it's just a forwarding block and we can thread through it trivially.
794
795 However, note that just threading through empty blocks with single
796 successors is not inherently profitable. For the jump thread to
797 be profitable, we must avoid a runtime conditional.
798
799 By taking the return value from the recursive call, we get the
800 desired effect of returning TRUE when we found a profitable jump
801 threading opportunity and FALSE otherwise.
802
803 This is particularly important when this routine is called after
804 processing a joiner block. Returning TRUE too aggressively in
805 that case results in pointless duplication of the joiner block. */
806 if (gsi_end_p (gsi))
807 {
808 if (single_succ_p (bb))
809 {
810 taken_edge = single_succ_edge (bb);
811 if (!bitmap_bit_p (visited, taken_edge->dest->index))
812 {
813 jump_thread_edge *x
814 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
815 path->safe_push (x);
816 bitmap_set_bit (visited, taken_edge->dest->index);
817 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
818 if (*backedge_seen_p)
819 simplify = dummy_simplify;
820 return thread_around_empty_blocks (taken_edge,
821 dummy_cond,
822 handle_dominating_asserts,
823 simplify,
824 visited,
825 path,
826 backedge_seen_p);
827 }
828 }
829
830 /* We have a block with no statements, but multiple successors? */
831 return false;
832 }
833
834 /* The only real statements this block can have are a control
835 flow altering statement. Anything else stops the thread. */
836 stmt = gsi_stmt (gsi);
837 if (gimple_code (stmt) != GIMPLE_COND
838 && gimple_code (stmt) != GIMPLE_GOTO
839 && gimple_code (stmt) != GIMPLE_SWITCH)
840 return false;
841
842 /* If we have traversed a backedge, then we do not want to look
843 at certain expressions in the table that can not be relied upon.
844 Luckily the only code that looked at those expressions is the
845 SIMPLIFY callback, which we replace if we can no longer use it. */
846 if (*backedge_seen_p)
847 simplify = dummy_simplify;
848
849 /* Extract and simplify the condition. */
850 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
851 simplify, handle_dominating_asserts);
852
853 /* If the condition can be statically computed and we have not already
854 visited the destination edge, then add the taken edge to our thread
855 path. */
856 if (cond && is_gimple_min_invariant (cond))
857 {
858 taken_edge = find_taken_edge (bb, cond);
859
860 if (bitmap_bit_p (visited, taken_edge->dest->index))
861 return false;
862 bitmap_set_bit (visited, taken_edge->dest->index);
863
864 jump_thread_edge *x
865 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
866 path->safe_push (x);
867 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
868 if (*backedge_seen_p)
869 simplify = dummy_simplify;
870
871 thread_around_empty_blocks (taken_edge,
872 dummy_cond,
873 handle_dominating_asserts,
874 simplify,
875 visited,
876 path,
877 backedge_seen_p);
878 return true;
879 }
880
881 return false;
882 }
883
884 /* Return true if the CFG contains at least one path from START_BB to END_BB.
885 When a path is found, record in PATH the blocks from END_BB to START_BB.
886 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
887 the recursion to basic blocks belonging to LOOP. */
888
889 static bool
890 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
891 vec<basic_block, va_gc> *&path,
892 hash_set<basic_block> *visited_bbs, loop_p loop)
893 {
894 if (loop != start_bb->loop_father)
895 return false;
896
897 if (start_bb == end_bb)
898 {
899 vec_safe_push (path, start_bb);
900 return true;
901 }
902
903 if (!visited_bbs->add (start_bb))
904 {
905 edge e;
906 edge_iterator ei;
907 FOR_EACH_EDGE (e, ei, start_bb->succs)
908 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
909 {
910 vec_safe_push (path, start_bb);
911 return true;
912 }
913 }
914
915 return false;
916 }
917
918 static int max_threaded_paths;
919
920 /* We trace the value of the variable EXPR back through any phi nodes looking
921 for places where it gets a constant value and save the path. Stop after
922 having recorded MAX_PATHS jump threading paths. */
923
924 static void
925 fsm_find_control_statement_thread_paths (tree expr,
926 hash_set<basic_block> *visited_bbs,
927 vec<basic_block, va_gc> *&path,
928 bool seen_loop_phi)
929 {
930 tree var = SSA_NAME_VAR (expr);
931 gimple def_stmt = SSA_NAME_DEF_STMT (expr);
932 basic_block var_bb = gimple_bb (def_stmt);
933
934 if (var == NULL || var_bb == NULL)
935 return;
936
937 /* For the moment we assume that an SSA chain only contains phi nodes, and
938 eventually one of the phi arguments will be an integer constant. In the
939 future, this could be extended to also handle simple assignments of
940 arithmetic operations. */
941 if (gimple_code (def_stmt) != GIMPLE_PHI)
942 return;
943
944 /* Avoid infinite recursion. */
945 if (visited_bbs->add (var_bb))
946 return;
947
948 gphi *phi = as_a <gphi *> (def_stmt);
949 int next_path_length = 0;
950 basic_block last_bb_in_path = path->last ();
951
952 if (loop_containing_stmt (phi)->header == gimple_bb (phi))
953 {
954 /* Do not walk through more than one loop PHI node. */
955 if (seen_loop_phi)
956 return;
957 seen_loop_phi = true;
958 }
959
960 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
961 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
962 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
963 if (var_bb != last_bb_in_path)
964 {
965 edge e;
966 int e_count = 0;
967 edge_iterator ei;
968 vec<basic_block, va_gc> *next_path;
969 vec_alloc (next_path, n_basic_blocks_for_fn (cfun));
970
971 FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
972 {
973 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
974
975 if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
976 e->src->loop_father))
977 ++e_count;
978
979 delete visited_bbs;
980
981 /* If there is more than one path, stop. */
982 if (e_count > 1)
983 {
984 vec_free (next_path);
985 return;
986 }
987 }
988
989 /* Stop if we have not found a path: this could occur when the recursion
990 is stopped by one of the bounds. */
991 if (e_count == 0)
992 {
993 vec_free (next_path);
994 return;
995 }
996
997 /* Append all the nodes from NEXT_PATH to PATH. */
998 vec_safe_splice (path, next_path);
999 next_path_length = next_path->length ();
1000 vec_free (next_path);
1001 }
1002
1003 gcc_assert (path->last () == var_bb);
1004
1005 /* Iterate over the arguments of PHI. */
1006 unsigned int i;
1007 for (i = 0; i < gimple_phi_num_args (phi); i++)
1008 {
1009 tree arg = gimple_phi_arg_def (phi, i);
1010 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
1011
1012 /* Skip edges pointing outside the current loop. */
1013 if (!arg || var_bb->loop_father != bbi->loop_father)
1014 continue;
1015
1016 if (TREE_CODE (arg) == SSA_NAME)
1017 {
1018 vec_safe_push (path, bbi);
1019 /* Recursively follow SSA_NAMEs looking for a constant definition. */
1020 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
1021 seen_loop_phi);
1022
1023 path->pop ();
1024 continue;
1025 }
1026
1027 if (TREE_CODE (arg) != INTEGER_CST)
1028 continue;
1029
1030 int path_length = path->length ();
1031 /* A path with less than 2 basic blocks should not be jump-threaded. */
1032 if (path_length < 2)
1033 continue;
1034
1035 if (path_length > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
1036 {
1037 if (dump_file && (dump_flags & TDF_DETAILS))
1038 fprintf (dump_file, "FSM jump-thread path not considered: "
1039 "the number of basic blocks on the path "
1040 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
1041 continue;
1042 }
1043
1044 if (max_threaded_paths <= 0)
1045 {
1046 if (dump_file && (dump_flags & TDF_DETAILS))
1047 fprintf (dump_file, "FSM jump-thread path not considered: "
1048 "the number of previously recorded FSM paths to thread "
1049 "exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
1050 continue;
1051 }
1052
1053 /* Add BBI to the path. */
1054 vec_safe_push (path, bbi);
1055 ++path_length;
1056
1057 int n_insns = 0;
1058 gimple_stmt_iterator gsi;
1059 int j;
1060 loop_p loop = (*path)[0]->loop_father;
1061 bool path_crosses_loops = false;
1062
1063 /* Count the number of instructions on the path: as these instructions
1064 will have to be duplicated, we will not record the path if there are
1065 too many instructions on the path. Also check that all the blocks in
1066 the path belong to a single loop. */
1067 for (j = 1; j < path_length - 1; j++)
1068 {
1069 basic_block bb = (*path)[j];
1070
1071 if (bb->loop_father != loop)
1072 {
1073 path_crosses_loops = true;
1074 break;
1075 }
1076
1077 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1078 {
1079 gimple stmt = gsi_stmt (gsi);
1080 /* Do not count empty statements and labels. */
1081 if (gimple_code (stmt) != GIMPLE_NOP
1082 && gimple_code (stmt) != GIMPLE_LABEL
1083 && !is_gimple_debug (stmt))
1084 ++n_insns;
1085 }
1086 }
1087
1088 if (path_crosses_loops)
1089 {
1090 if (dump_file && (dump_flags & TDF_DETAILS))
1091 fprintf (dump_file, "FSM jump-thread path not considered: "
1092 "the path crosses loops.\n");
1093 path->pop ();
1094 continue;
1095 }
1096
1097 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
1098 {
1099 if (dump_file && (dump_flags & TDF_DETAILS))
1100 fprintf (dump_file, "FSM jump-thread path not considered: "
1101 "the number of instructions on the path "
1102 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
1103 path->pop ();
1104 continue;
1105 }
1106
1107 vec<jump_thread_edge *> *jump_thread_path
1108 = new vec<jump_thread_edge *> ();
1109
1110 /* Record the edges between the blocks in PATH. */
1111 for (j = 0; j < path_length - 1; j++)
1112 {
1113 edge e = find_edge ((*path)[path_length - j - 1],
1114 (*path)[path_length - j - 2]);
1115 gcc_assert (e);
1116 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
1117 jump_thread_path->safe_push (x);
1118 }
1119
1120 /* Add the edge taken when the control variable has value ARG. */
1121 edge taken_edge = find_taken_edge ((*path)[0], arg);
1122 jump_thread_edge *x
1123 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
1124 jump_thread_path->safe_push (x);
1125
1126 register_jump_thread (jump_thread_path);
1127 --max_threaded_paths;
1128
1129 /* Remove BBI from the path. */
1130 path->pop ();
1131 }
1132
1133 /* Remove all the nodes that we added from NEXT_PATH. */
1134 if (next_path_length)
1135 vec_safe_truncate (path, (path->length () - next_path_length));
1136 }
1137
1138 /* We are exiting E->src, see if E->dest ends with a conditional
1139 jump which has a known value when reached via E.
1140
1141 E->dest can have arbitrary side effects which, if threading is
1142 successful, will be maintained.
1143
1144 Special care is necessary if E is a back edge in the CFG as we
1145 may have already recorded equivalences for E->dest into our
1146 various tables, including the result of the conditional at
1147 the end of E->dest. Threading opportunities are severely
1148 limited in that case to avoid short-circuiting the loop
1149 incorrectly.
1150
1151 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1152 to avoid allocating memory.
1153
1154 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1155 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1156 used in.
1157
1158 STACK is used to undo temporary equivalences created during the walk of
1159 E->dest.
1160
1161 SIMPLIFY is a pass-specific function used to simplify statements.
1162
1163 Our caller is responsible for restoring the state of the expression
1164 and const_and_copies stacks.
1165
1166 Positive return value is success. Zero return value is failure, but
1167 the block can still be duplicated as a joiner in a jump thread path,
1168 negative indicates the block should not be duplicated and thus is not
1169 suitable for a joiner in a jump threading path. */
1170
1171 static int
1172 thread_through_normal_block (edge e,
1173 gcond *dummy_cond,
1174 bool handle_dominating_asserts,
1175 const_and_copies *const_and_copies,
1176 tree (*simplify) (gimple, gimple),
1177 vec<jump_thread_edge *> *path,
1178 bitmap visited,
1179 bool *backedge_seen_p)
1180 {
1181 /* If we have traversed a backedge, then we do not want to look
1182 at certain expressions in the table that can not be relied upon.
1183 Luckily the only code that looked at those expressions is the
1184 SIMPLIFY callback, which we replace if we can no longer use it. */
1185 if (*backedge_seen_p)
1186 simplify = dummy_simplify;
1187
1188 /* PHIs create temporary equivalences.
1189 Note that if we found a PHI that made the block non-threadable, then
1190 we need to bubble that up to our caller in the same manner we do
1191 when we prematurely stop processing statements below. */
1192 if (!record_temporary_equivalences_from_phis (e, const_and_copies))
1193 return -1;
1194
1195 /* Now walk each statement recording any context sensitive
1196 temporary equivalences we can detect. */
1197 gimple stmt
1198 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies, simplify,
1199 *backedge_seen_p);
1200
1201 /* There's two reasons STMT might be null, and distinguishing
1202 between them is important.
1203
1204 First the block may not have had any statements. For example, it
1205 might have some PHIs and unconditionally transfer control elsewhere.
1206 Such blocks are suitable for jump threading, particularly as a
1207 joiner block.
1208
1209 The second reason would be if we did not process all the statements
1210 in the block (because there were too many to make duplicating the
1211 block profitable. If we did not look at all the statements, then
1212 we may not have invalidated everything needing invalidation. Thus
1213 we must signal to our caller that this block is not suitable for
1214 use as a joiner in a threading path. */
1215 if (!stmt)
1216 {
1217 /* First case. The statement simply doesn't have any instructions, but
1218 does have PHIs. */
1219 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1220 && !gsi_end_p (gsi_start_phis (e->dest)))
1221 return 0;
1222
1223 /* Second case. */
1224 return -1;
1225 }
1226
1227 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1228 will be taken. */
1229 if (gimple_code (stmt) == GIMPLE_COND
1230 || gimple_code (stmt) == GIMPLE_GOTO
1231 || gimple_code (stmt) == GIMPLE_SWITCH)
1232 {
1233 tree cond;
1234
1235 /* Extract and simplify the condition. */
1236 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
1237 handle_dominating_asserts);
1238
1239 if (!cond)
1240 return 0;
1241
1242 if (is_gimple_min_invariant (cond))
1243 {
1244 edge taken_edge = find_taken_edge (e->dest, cond);
1245 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1246
1247 /* DEST could be NULL for a computed jump to an absolute
1248 address. */
1249 if (dest == NULL
1250 || dest == e->dest
1251 || bitmap_bit_p (visited, dest->index))
1252 return 0;
1253
1254 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1255 first edge on the path. */
1256 if (path->length () == 0)
1257 {
1258 jump_thread_edge *x
1259 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1260 path->safe_push (x);
1261 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1262 }
1263
1264 jump_thread_edge *x
1265 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1266 path->safe_push (x);
1267 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1268 if (*backedge_seen_p)
1269 simplify = dummy_simplify;
1270
1271 /* See if we can thread through DEST as well, this helps capture
1272 secondary effects of threading without having to re-run DOM or
1273 VRP.
1274
1275 We don't want to thread back to a block we have already
1276 visited. This may be overly conservative. */
1277 bitmap_set_bit (visited, dest->index);
1278 bitmap_set_bit (visited, e->dest->index);
1279 thread_around_empty_blocks (taken_edge,
1280 dummy_cond,
1281 handle_dominating_asserts,
1282 simplify,
1283 visited,
1284 path,
1285 backedge_seen_p);
1286 return 1;
1287 }
1288
1289 if (!flag_expensive_optimizations
1290 || optimize_function_for_size_p (cfun)
1291 || TREE_CODE (cond) != SSA_NAME
1292 || e->dest->loop_father != e->src->loop_father
1293 || loop_depth (e->dest->loop_father) == 0)
1294 return 0;
1295
1296 /* When COND cannot be simplified, try to find paths from a control
1297 statement back through the PHI nodes which would affect that control
1298 statement. */
1299 vec<basic_block, va_gc> *bb_path;
1300 vec_alloc (bb_path, n_basic_blocks_for_fn (cfun));
1301 vec_safe_push (bb_path, e->dest);
1302 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
1303
1304 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
1305 fsm_find_control_statement_thread_paths (cond, visited_bbs, bb_path,
1306 false);
1307
1308 delete visited_bbs;
1309 vec_free (bb_path);
1310 }
1311 return 0;
1312 }
1313
1314 /* We are exiting E->src, see if E->dest ends with a conditional
1315 jump which has a known value when reached via E.
1316
1317 Special care is necessary if E is a back edge in the CFG as we
1318 may have already recorded equivalences for E->dest into our
1319 various tables, including the result of the conditional at
1320 the end of E->dest. Threading opportunities are severely
1321 limited in that case to avoid short-circuiting the loop
1322 incorrectly.
1323
1324 Note it is quite common for the first block inside a loop to
1325 end with a conditional which is either always true or always
1326 false when reached via the loop backedge. Thus we do not want
1327 to blindly disable threading across a loop backedge.
1328
1329 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1330 to avoid allocating memory.
1331
1332 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1333 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1334 used in.
1335
1336 STACK is used to undo temporary equivalences created during the walk of
1337 E->dest.
1338
1339 SIMPLIFY is a pass-specific function used to simplify statements. */
1340
1341 void
1342 thread_across_edge (gcond *dummy_cond,
1343 edge e,
1344 bool handle_dominating_asserts,
1345 const_and_copies *const_and_copies,
1346 tree (*simplify) (gimple, gimple))
1347 {
1348 bitmap visited = BITMAP_ALLOC (NULL);
1349 bool backedge_seen;
1350
1351 stmt_count = 0;
1352
1353 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1354 bitmap_clear (visited);
1355 bitmap_set_bit (visited, e->src->index);
1356 bitmap_set_bit (visited, e->dest->index);
1357 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1358 if (backedge_seen)
1359 simplify = dummy_simplify;
1360
1361 int threaded = thread_through_normal_block (e, dummy_cond,
1362 handle_dominating_asserts,
1363 const_and_copies, simplify, path,
1364 visited, &backedge_seen);
1365 if (threaded > 0)
1366 {
1367 propagate_threaded_block_debug_into (path->last ()->e->dest,
1368 e->dest);
1369 const_and_copies->pop_to_marker ();
1370 BITMAP_FREE (visited);
1371 register_jump_thread (path);
1372 return;
1373 }
1374 else
1375 {
1376 /* Negative and zero return values indicate no threading was possible,
1377 thus there should be no edges on the thread path and no need to walk
1378 through the vector entries. */
1379 gcc_assert (path->length () == 0);
1380 path->release ();
1381 delete path;
1382
1383 /* A negative status indicates the target block was deemed too big to
1384 duplicate. Just quit now rather than trying to use the block as
1385 a joiner in a jump threading path.
1386
1387 This prevents unnecessary code growth, but more importantly if we
1388 do not look at all the statements in the block, then we may have
1389 missed some invalidations if we had traversed a backedge! */
1390 if (threaded < 0)
1391 {
1392 BITMAP_FREE (visited);
1393 const_and_copies->pop_to_marker ();
1394 return;
1395 }
1396 }
1397
1398 /* We were unable to determine what out edge from E->dest is taken. However,
1399 we might still be able to thread through successors of E->dest. This
1400 often occurs when E->dest is a joiner block which then fans back out
1401 based on redundant tests.
1402
1403 If so, we'll copy E->dest and redirect the appropriate predecessor to
1404 the copy. Within the copy of E->dest, we'll thread one or more edges
1405 to points deeper in the CFG.
1406
1407 This is a stopgap until we have a more structured approach to path
1408 isolation. */
1409 {
1410 edge taken_edge;
1411 edge_iterator ei;
1412 bool found;
1413
1414 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1415 we can safely redirect any of the edges. Just punt those cases. */
1416 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1417 if (taken_edge->flags & EDGE_ABNORMAL)
1418 {
1419 const_and_copies->pop_to_marker ();
1420 BITMAP_FREE (visited);
1421 return;
1422 }
1423
1424 /* Look at each successor of E->dest to see if we can thread through it. */
1425 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1426 {
1427 /* Push a fresh marker so we can unwind the equivalences created
1428 for each of E->dest's successors. */
1429 const_and_copies->push_marker ();
1430
1431 /* Avoid threading to any block we have already visited. */
1432 bitmap_clear (visited);
1433 bitmap_set_bit (visited, e->src->index);
1434 bitmap_set_bit (visited, e->dest->index);
1435 bitmap_set_bit (visited, taken_edge->dest->index);
1436 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1437
1438 /* Record whether or not we were able to thread through a successor
1439 of E->dest. */
1440 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1441 path->safe_push (x);
1442
1443 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1444 path->safe_push (x);
1445 found = false;
1446 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1447 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1448 if (backedge_seen)
1449 simplify = dummy_simplify;
1450 found = thread_around_empty_blocks (taken_edge,
1451 dummy_cond,
1452 handle_dominating_asserts,
1453 simplify,
1454 visited,
1455 path,
1456 &backedge_seen);
1457
1458 if (backedge_seen)
1459 simplify = dummy_simplify;
1460
1461 if (!found)
1462 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1463 handle_dominating_asserts,
1464 const_and_copies, simplify, path, visited,
1465 &backedge_seen) > 0;
1466
1467 /* If we were able to thread through a successor of E->dest, then
1468 record the jump threading opportunity. */
1469 if (found)
1470 {
1471 propagate_threaded_block_debug_into (path->last ()->e->dest,
1472 taken_edge->dest);
1473 register_jump_thread (path);
1474 }
1475 else
1476 {
1477 delete_jump_thread_path (path);
1478 }
1479
1480 /* And unwind the equivalence table. */
1481 const_and_copies->pop_to_marker ();
1482 }
1483 BITMAP_FREE (visited);
1484 }
1485
1486 const_and_copies->pop_to_marker ();
1487 }