coretypes.h: Include machmode.h...
[gcc.git] / gcc / tree-ssa-threadedge.c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "hash-set.h"
26 #include "vec.h"
27 #include "input.h"
28 #include "alias.h"
29 #include "symtab.h"
30 #include "inchash.h"
31 #include "tree.h"
32 #include "fold-const.h"
33 #include "flags.h"
34 #include "tm_p.h"
35 #include "predict.h"
36 #include "hard-reg-set.h"
37 #include "input.h"
38 #include "function.h"
39 #include "dominance.h"
40 #include "basic-block.h"
41 #include "cfgloop.h"
42 #include "timevar.h"
43 #include "dumpfile.h"
44 #include "tree-ssa-alias.h"
45 #include "internal-fn.h"
46 #include "gimple-expr.h"
47 #include "is-a.h"
48 #include "gimple.h"
49 #include "gimple-iterator.h"
50 #include "gimple-ssa.h"
51 #include "tree-cfg.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-ssa-propagate.h"
57 #include "tree-ssa-threadupdate.h"
58 #include "langhooks.h"
59 #include "params.h"
60 #include "tree-ssa-scopedtables.h"
61 #include "tree-ssa-threadedge.h"
62 #include "tree-ssa-loop.h"
63 #include "builtins.h"
64 #include "cfg.h"
65 #include "cfganal.h"
66
67 /* To avoid code explosion due to jump threading, we limit the
68 number of statements we are going to copy. This variable
69 holds the number of statements currently seen that we'll have
70 to copy as part of the jump threading process. */
71 static int stmt_count;
72
73 /* Array to record value-handles per SSA_NAME. */
74 vec<tree> ssa_name_values;
75
76 /* Set the value for the SSA name NAME to VALUE. */
77
78 void
79 set_ssa_name_value (tree name, tree value)
80 {
81 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
82 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
83 if (value && TREE_OVERFLOW_P (value))
84 value = drop_tree_overflow (value);
85 ssa_name_values[SSA_NAME_VERSION (name)] = value;
86 }
87
88 /* Initialize the per SSA_NAME value-handles array. Returns it. */
89 void
90 threadedge_initialize_values (void)
91 {
92 gcc_assert (!ssa_name_values.exists ());
93 ssa_name_values.create (num_ssa_names);
94 }
95
96 /* Free the per SSA_NAME value-handle array. */
97 void
98 threadedge_finalize_values (void)
99 {
100 ssa_name_values.release ();
101 }
102
103 /* Return TRUE if we may be able to thread an incoming edge into
104 BB to an outgoing edge from BB. Return FALSE otherwise. */
105
106 bool
107 potentially_threadable_block (basic_block bb)
108 {
109 gimple_stmt_iterator gsi;
110
111 /* Special case. We can get blocks that are forwarders, but are
112 not optimized away because they forward from outside a loop
113 to the loop header. We want to thread through them as we can
114 sometimes thread to the loop exit, which is obviously profitable.
115 the interesting case here is when the block has PHIs. */
116 if (gsi_end_p (gsi_start_nondebug_bb (bb))
117 && !gsi_end_p (gsi_start_phis (bb)))
118 return true;
119
120 /* If BB has a single successor or a single predecessor, then
121 there is no threading opportunity. */
122 if (single_succ_p (bb) || single_pred_p (bb))
123 return false;
124
125 /* If BB does not end with a conditional, switch or computed goto,
126 then there is no threading opportunity. */
127 gsi = gsi_last_bb (bb);
128 if (gsi_end_p (gsi)
129 || ! gsi_stmt (gsi)
130 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
131 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
132 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
133 return false;
134
135 return true;
136 }
137
138 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
139 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
140 BB. If no such ASSERT_EXPR is found, return OP. */
141
142 static tree
143 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
144 {
145 imm_use_iterator imm_iter;
146 gimple use_stmt;
147 use_operand_p use_p;
148
149 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
150 {
151 use_stmt = USE_STMT (use_p);
152 if (use_stmt != stmt
153 && gimple_assign_single_p (use_stmt)
154 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
155 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
156 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
157 {
158 return gimple_assign_lhs (use_stmt);
159 }
160 }
161 return op;
162 }
163
164 /* Record temporary equivalences created by PHIs at the target of the
165 edge E. Record unwind information for the equivalences onto STACK.
166
167 If a PHI which prevents threading is encountered, then return FALSE
168 indicating we should not thread this edge, else return TRUE.
169
170 If SRC_MAP/DST_MAP exist, then mark the source and destination SSA_NAMEs
171 of any equivalences recorded. We use this to make invalidation after
172 traversing back edges less painful. */
173
174 static bool
175 record_temporary_equivalences_from_phis (edge e, const_and_copies *const_and_copies)
176 {
177 gphi_iterator gsi;
178
179 /* Each PHI creates a temporary equivalence, record them.
180 These are context sensitive equivalences and will be removed
181 later. */
182 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
183 {
184 gphi *phi = gsi.phi ();
185 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
186 tree dst = gimple_phi_result (phi);
187
188 /* If the desired argument is not the same as this PHI's result
189 and it is set by a PHI in E->dest, then we can not thread
190 through E->dest. */
191 if (src != dst
192 && TREE_CODE (src) == SSA_NAME
193 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
194 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
195 return false;
196
197 /* We consider any non-virtual PHI as a statement since it
198 count result in a constant assignment or copy operation. */
199 if (!virtual_operand_p (dst))
200 stmt_count++;
201
202 const_and_copies->record_const_or_copy (dst, src);
203 }
204 return true;
205 }
206
207 /* Fold the RHS of an assignment statement and return it as a tree.
208 May return NULL_TREE if no simplification is possible. */
209
210 static tree
211 fold_assignment_stmt (gimple stmt)
212 {
213 enum tree_code subcode = gimple_assign_rhs_code (stmt);
214
215 switch (get_gimple_rhs_class (subcode))
216 {
217 case GIMPLE_SINGLE_RHS:
218 return fold (gimple_assign_rhs1 (stmt));
219
220 case GIMPLE_UNARY_RHS:
221 {
222 tree lhs = gimple_assign_lhs (stmt);
223 tree op0 = gimple_assign_rhs1 (stmt);
224 return fold_unary (subcode, TREE_TYPE (lhs), op0);
225 }
226
227 case GIMPLE_BINARY_RHS:
228 {
229 tree lhs = gimple_assign_lhs (stmt);
230 tree op0 = gimple_assign_rhs1 (stmt);
231 tree op1 = gimple_assign_rhs2 (stmt);
232 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
233 }
234
235 case GIMPLE_TERNARY_RHS:
236 {
237 tree lhs = gimple_assign_lhs (stmt);
238 tree op0 = gimple_assign_rhs1 (stmt);
239 tree op1 = gimple_assign_rhs2 (stmt);
240 tree op2 = gimple_assign_rhs3 (stmt);
241
242 /* Sadly, we have to handle conditional assignments specially
243 here, because fold expects all the operands of an expression
244 to be folded before the expression itself is folded, but we
245 can't just substitute the folded condition here. */
246 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
247 op0 = fold (op0);
248
249 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
250 }
251
252 default:
253 gcc_unreachable ();
254 }
255 }
256
257 /* Try to simplify each statement in E->dest, ultimately leading to
258 a simplification of the COND_EXPR at the end of E->dest.
259
260 Record unwind information for temporary equivalences onto STACK.
261
262 Use SIMPLIFY (a pointer to a callback function) to further simplify
263 statements using pass specific information.
264
265 We might consider marking just those statements which ultimately
266 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
267 would be recovered by trying to simplify fewer statements.
268
269 If we are able to simplify a statement into the form
270 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
271 a context sensitive equivalence which may help us simplify
272 later statements in E->dest. */
273
274 static gimple
275 record_temporary_equivalences_from_stmts_at_dest (edge e,
276 const_and_copies *const_and_copies,
277 tree (*simplify) (gimple,
278 gimple),
279 bool backedge_seen)
280 {
281 gimple stmt = NULL;
282 gimple_stmt_iterator gsi;
283 int max_stmt_count;
284
285 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
286
287 /* Walk through each statement in the block recording equivalences
288 we discover. Note any equivalences we discover are context
289 sensitive (ie, are dependent on traversing E) and must be unwound
290 when we're finished processing E. */
291 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
292 {
293 tree cached_lhs = NULL;
294
295 stmt = gsi_stmt (gsi);
296
297 /* Ignore empty statements and labels. */
298 if (gimple_code (stmt) == GIMPLE_NOP
299 || gimple_code (stmt) == GIMPLE_LABEL
300 || is_gimple_debug (stmt))
301 continue;
302
303 /* If the statement has volatile operands, then we assume we
304 can not thread through this block. This is overly
305 conservative in some ways. */
306 if (gimple_code (stmt) == GIMPLE_ASM
307 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
308 return NULL;
309
310 /* If duplicating this block is going to cause too much code
311 expansion, then do not thread through this block. */
312 stmt_count++;
313 if (stmt_count > max_stmt_count)
314 return NULL;
315
316 /* If this is not a statement that sets an SSA_NAME to a new
317 value, then do not try to simplify this statement as it will
318 not simplify in any way that is helpful for jump threading. */
319 if ((gimple_code (stmt) != GIMPLE_ASSIGN
320 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
321 && (gimple_code (stmt) != GIMPLE_CALL
322 || gimple_call_lhs (stmt) == NULL_TREE
323 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
324 {
325 /* STMT might still have DEFS and we need to invalidate any known
326 equivalences for them.
327
328 Consider if STMT is a GIMPLE_ASM with one or more outputs that
329 feeds a conditional inside a loop. We might derive an equivalence
330 due to the conditional. */
331 tree op;
332 ssa_op_iter iter;
333
334 if (backedge_seen)
335 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_DEF)
336 const_and_copies->invalidate (op);
337
338 continue;
339 }
340
341 /* The result of __builtin_object_size depends on all the arguments
342 of a phi node. Temporarily using only one edge produces invalid
343 results. For example
344
345 if (x < 6)
346 goto l;
347 else
348 goto l;
349
350 l:
351 r = PHI <&w[2].a[1](2), &a.a[6](3)>
352 __builtin_object_size (r, 0)
353
354 The result of __builtin_object_size is defined to be the maximum of
355 remaining bytes. If we use only one edge on the phi, the result will
356 change to be the remaining bytes for the corresponding phi argument.
357
358 Similarly for __builtin_constant_p:
359
360 r = PHI <1(2), 2(3)>
361 __builtin_constant_p (r)
362
363 Both PHI arguments are constant, but x ? 1 : 2 is still not
364 constant. */
365
366 if (is_gimple_call (stmt))
367 {
368 tree fndecl = gimple_call_fndecl (stmt);
369 if (fndecl
370 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
371 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
372 {
373 if (backedge_seen)
374 {
375 tree lhs = gimple_get_lhs (stmt);
376 const_and_copies->invalidate (lhs);
377 }
378 continue;
379 }
380 }
381
382 /* At this point we have a statement which assigns an RHS to an
383 SSA_VAR on the LHS. We want to try and simplify this statement
384 to expose more context sensitive equivalences which in turn may
385 allow us to simplify the condition at the end of the loop.
386
387 Handle simple copy operations as well as implied copies from
388 ASSERT_EXPRs. */
389 if (gimple_assign_single_p (stmt)
390 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
391 cached_lhs = gimple_assign_rhs1 (stmt);
392 else if (gimple_assign_single_p (stmt)
393 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
394 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
395 else
396 {
397 /* A statement that is not a trivial copy or ASSERT_EXPR.
398 We're going to temporarily copy propagate the operands
399 and see if that allows us to simplify this statement. */
400 tree *copy;
401 ssa_op_iter iter;
402 use_operand_p use_p;
403 unsigned int num, i = 0;
404
405 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
406 copy = XCNEWVEC (tree, num);
407
408 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
409 the operands. */
410 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
411 {
412 tree tmp = NULL;
413 tree use = USE_FROM_PTR (use_p);
414
415 copy[i++] = use;
416 if (TREE_CODE (use) == SSA_NAME)
417 tmp = SSA_NAME_VALUE (use);
418 if (tmp)
419 SET_USE (use_p, tmp);
420 }
421
422 /* Try to fold/lookup the new expression. Inserting the
423 expression into the hash table is unlikely to help. */
424 if (is_gimple_call (stmt))
425 cached_lhs = fold_call_stmt (as_a <gcall *> (stmt), false);
426 else
427 cached_lhs = fold_assignment_stmt (stmt);
428
429 if (!cached_lhs
430 || (TREE_CODE (cached_lhs) != SSA_NAME
431 && !is_gimple_min_invariant (cached_lhs)))
432 cached_lhs = (*simplify) (stmt, stmt);
433
434 /* Restore the statement's original uses/defs. */
435 i = 0;
436 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
437 SET_USE (use_p, copy[i++]);
438
439 free (copy);
440 }
441
442 /* Record the context sensitive equivalence if we were able
443 to simplify this statement.
444
445 If we have traversed a backedge at some point during threading,
446 then always enter something here. Either a real equivalence,
447 or a NULL_TREE equivalence which is effectively invalidation of
448 prior equivalences. */
449 if (cached_lhs
450 && (TREE_CODE (cached_lhs) == SSA_NAME
451 || is_gimple_min_invariant (cached_lhs)))
452 const_and_copies->record_const_or_copy (gimple_get_lhs (stmt), cached_lhs);
453 else if (backedge_seen)
454 const_and_copies->invalidate (gimple_get_lhs (stmt));
455 }
456 return stmt;
457 }
458
459 /* Once we have passed a backedge in the CFG when threading, we do not want to
460 utilize edge equivalences for simplification purpose. They are no longer
461 necessarily valid. We use this callback rather than the ones provided by
462 DOM/VRP to achieve that effect. */
463 static tree
464 dummy_simplify (gimple stmt1 ATTRIBUTE_UNUSED, gimple stmt2 ATTRIBUTE_UNUSED)
465 {
466 return NULL_TREE;
467 }
468
469 /* Simplify the control statement at the end of the block E->dest.
470
471 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
472 is available to use/clobber in DUMMY_COND.
473
474 Use SIMPLIFY (a pointer to a callback function) to further simplify
475 a condition using pass specific information.
476
477 Return the simplified condition or NULL if simplification could
478 not be performed. */
479
480 static tree
481 simplify_control_stmt_condition (edge e,
482 gimple stmt,
483 gcond *dummy_cond,
484 tree (*simplify) (gimple, gimple),
485 bool handle_dominating_asserts)
486 {
487 tree cond, cached_lhs;
488 enum gimple_code code = gimple_code (stmt);
489
490 /* For comparisons, we have to update both operands, then try
491 to simplify the comparison. */
492 if (code == GIMPLE_COND)
493 {
494 tree op0, op1;
495 enum tree_code cond_code;
496
497 op0 = gimple_cond_lhs (stmt);
498 op1 = gimple_cond_rhs (stmt);
499 cond_code = gimple_cond_code (stmt);
500
501 /* Get the current value of both operands. */
502 if (TREE_CODE (op0) == SSA_NAME)
503 {
504 for (int i = 0; i < 2; i++)
505 {
506 if (TREE_CODE (op0) == SSA_NAME
507 && SSA_NAME_VALUE (op0))
508 op0 = SSA_NAME_VALUE (op0);
509 else
510 break;
511 }
512 }
513
514 if (TREE_CODE (op1) == SSA_NAME)
515 {
516 for (int i = 0; i < 2; i++)
517 {
518 if (TREE_CODE (op1) == SSA_NAME
519 && SSA_NAME_VALUE (op1))
520 op1 = SSA_NAME_VALUE (op1);
521 else
522 break;
523 }
524 }
525
526 if (handle_dominating_asserts)
527 {
528 /* Now see if the operand was consumed by an ASSERT_EXPR
529 which dominates E->src. If so, we want to replace the
530 operand with the LHS of the ASSERT_EXPR. */
531 if (TREE_CODE (op0) == SSA_NAME)
532 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
533
534 if (TREE_CODE (op1) == SSA_NAME)
535 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
536 }
537
538 /* We may need to canonicalize the comparison. For
539 example, op0 might be a constant while op1 is an
540 SSA_NAME. Failure to canonicalize will cause us to
541 miss threading opportunities. */
542 if (tree_swap_operands_p (op0, op1, false))
543 {
544 cond_code = swap_tree_comparison (cond_code);
545 std::swap (op0, op1);
546 }
547
548 /* Stuff the operator and operands into our dummy conditional
549 expression. */
550 gimple_cond_set_code (dummy_cond, cond_code);
551 gimple_cond_set_lhs (dummy_cond, op0);
552 gimple_cond_set_rhs (dummy_cond, op1);
553
554 /* We absolutely do not care about any type conversions
555 we only care about a zero/nonzero value. */
556 fold_defer_overflow_warnings ();
557
558 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
559 if (cached_lhs)
560 while (CONVERT_EXPR_P (cached_lhs))
561 cached_lhs = TREE_OPERAND (cached_lhs, 0);
562
563 fold_undefer_overflow_warnings ((cached_lhs
564 && is_gimple_min_invariant (cached_lhs)),
565 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
566
567 /* If we have not simplified the condition down to an invariant,
568 then use the pass specific callback to simplify the condition. */
569 if (!cached_lhs
570 || !is_gimple_min_invariant (cached_lhs))
571 cached_lhs = (*simplify) (dummy_cond, stmt);
572
573 return cached_lhs;
574 }
575
576 if (code == GIMPLE_SWITCH)
577 cond = gimple_switch_index (as_a <gswitch *> (stmt));
578 else if (code == GIMPLE_GOTO)
579 cond = gimple_goto_dest (stmt);
580 else
581 gcc_unreachable ();
582
583 /* We can have conditionals which just test the state of a variable
584 rather than use a relational operator. These are simpler to handle. */
585 if (TREE_CODE (cond) == SSA_NAME)
586 {
587 tree original_lhs = cond;
588 cached_lhs = cond;
589
590 /* Get the variable's current value from the equivalence chains.
591
592 It is possible to get loops in the SSA_NAME_VALUE chains
593 (consider threading the backedge of a loop where we have
594 a loop invariant SSA_NAME used in the condition. */
595 if (cached_lhs)
596 {
597 for (int i = 0; i < 2; i++)
598 {
599 if (TREE_CODE (cached_lhs) == SSA_NAME
600 && SSA_NAME_VALUE (cached_lhs))
601 cached_lhs = SSA_NAME_VALUE (cached_lhs);
602 else
603 break;
604 }
605 }
606
607 /* If we're dominated by a suitable ASSERT_EXPR, then
608 update CACHED_LHS appropriately. */
609 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
610 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
611
612 /* If we haven't simplified to an invariant yet, then use the
613 pass specific callback to try and simplify it further. */
614 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
615 cached_lhs = (*simplify) (stmt, stmt);
616
617 /* We couldn't find an invariant. But, callers of this
618 function may be able to do something useful with the
619 unmodified destination. */
620 if (!cached_lhs)
621 cached_lhs = original_lhs;
622 }
623 else
624 cached_lhs = NULL;
625
626 return cached_lhs;
627 }
628
629 /* Copy debug stmts from DEST's chain of single predecessors up to
630 SRC, so that we don't lose the bindings as PHI nodes are introduced
631 when DEST gains new predecessors. */
632 void
633 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
634 {
635 if (!MAY_HAVE_DEBUG_STMTS)
636 return;
637
638 if (!single_pred_p (dest))
639 return;
640
641 gcc_checking_assert (dest != src);
642
643 gimple_stmt_iterator gsi = gsi_after_labels (dest);
644 int i = 0;
645 const int alloc_count = 16; // ?? Should this be a PARAM?
646
647 /* Estimate the number of debug vars overridden in the beginning of
648 DEST, to tell how many we're going to need to begin with. */
649 for (gimple_stmt_iterator si = gsi;
650 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
651 {
652 gimple stmt = gsi_stmt (si);
653 if (!is_gimple_debug (stmt))
654 break;
655 i++;
656 }
657
658 auto_vec<tree, alloc_count> fewvars;
659 hash_set<tree> *vars = NULL;
660
661 /* If we're already starting with 3/4 of alloc_count, go for a
662 hash_set, otherwise start with an unordered stack-allocated
663 VEC. */
664 if (i * 4 > alloc_count * 3)
665 vars = new hash_set<tree>;
666
667 /* Now go through the initial debug stmts in DEST again, this time
668 actually inserting in VARS or FEWVARS. Don't bother checking for
669 duplicates in FEWVARS. */
670 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
671 {
672 gimple stmt = gsi_stmt (si);
673 if (!is_gimple_debug (stmt))
674 break;
675
676 tree var;
677
678 if (gimple_debug_bind_p (stmt))
679 var = gimple_debug_bind_get_var (stmt);
680 else if (gimple_debug_source_bind_p (stmt))
681 var = gimple_debug_source_bind_get_var (stmt);
682 else
683 gcc_unreachable ();
684
685 if (vars)
686 vars->add (var);
687 else
688 fewvars.quick_push (var);
689 }
690
691 basic_block bb = dest;
692
693 do
694 {
695 bb = single_pred (bb);
696 for (gimple_stmt_iterator si = gsi_last_bb (bb);
697 !gsi_end_p (si); gsi_prev (&si))
698 {
699 gimple stmt = gsi_stmt (si);
700 if (!is_gimple_debug (stmt))
701 continue;
702
703 tree var;
704
705 if (gimple_debug_bind_p (stmt))
706 var = gimple_debug_bind_get_var (stmt);
707 else if (gimple_debug_source_bind_p (stmt))
708 var = gimple_debug_source_bind_get_var (stmt);
709 else
710 gcc_unreachable ();
711
712 /* Discard debug bind overlaps. ??? Unlike stmts from src,
713 copied into a new block that will precede BB, debug bind
714 stmts in bypassed BBs may actually be discarded if
715 they're overwritten by subsequent debug bind stmts, which
716 might be a problem once we introduce stmt frontier notes
717 or somesuch. Adding `&& bb == src' to the condition
718 below will preserve all potentially relevant debug
719 notes. */
720 if (vars && vars->add (var))
721 continue;
722 else if (!vars)
723 {
724 int i = fewvars.length ();
725 while (i--)
726 if (fewvars[i] == var)
727 break;
728 if (i >= 0)
729 continue;
730
731 if (fewvars.length () < (unsigned) alloc_count)
732 fewvars.quick_push (var);
733 else
734 {
735 vars = new hash_set<tree>;
736 for (i = 0; i < alloc_count; i++)
737 vars->add (fewvars[i]);
738 fewvars.release ();
739 vars->add (var);
740 }
741 }
742
743 stmt = gimple_copy (stmt);
744 /* ??? Should we drop the location of the copy to denote
745 they're artificial bindings? */
746 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
747 }
748 }
749 while (bb != src && single_pred_p (bb));
750
751 if (vars)
752 delete vars;
753 else if (fewvars.exists ())
754 fewvars.release ();
755 }
756
757 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
758 need not be duplicated as part of the CFG/SSA updating process).
759
760 If it is threadable, add it to PATH and VISITED and recurse, ultimately
761 returning TRUE from the toplevel call. Otherwise do nothing and
762 return false.
763
764 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
765 try and simplify the condition at the end of TAKEN_EDGE->dest. */
766 static bool
767 thread_around_empty_blocks (edge taken_edge,
768 gcond *dummy_cond,
769 bool handle_dominating_asserts,
770 tree (*simplify) (gimple, gimple),
771 bitmap visited,
772 vec<jump_thread_edge *> *path,
773 bool *backedge_seen_p)
774 {
775 basic_block bb = taken_edge->dest;
776 gimple_stmt_iterator gsi;
777 gimple stmt;
778 tree cond;
779
780 /* The key property of these blocks is that they need not be duplicated
781 when threading. Thus they can not have visible side effects such
782 as PHI nodes. */
783 if (!gsi_end_p (gsi_start_phis (bb)))
784 return false;
785
786 /* Skip over DEBUG statements at the start of the block. */
787 gsi = gsi_start_nondebug_bb (bb);
788
789 /* If the block has no statements, but does have a single successor, then
790 it's just a forwarding block and we can thread through it trivially.
791
792 However, note that just threading through empty blocks with single
793 successors is not inherently profitable. For the jump thread to
794 be profitable, we must avoid a runtime conditional.
795
796 By taking the return value from the recursive call, we get the
797 desired effect of returning TRUE when we found a profitable jump
798 threading opportunity and FALSE otherwise.
799
800 This is particularly important when this routine is called after
801 processing a joiner block. Returning TRUE too aggressively in
802 that case results in pointless duplication of the joiner block. */
803 if (gsi_end_p (gsi))
804 {
805 if (single_succ_p (bb))
806 {
807 taken_edge = single_succ_edge (bb);
808 if (!bitmap_bit_p (visited, taken_edge->dest->index))
809 {
810 jump_thread_edge *x
811 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
812 path->safe_push (x);
813 bitmap_set_bit (visited, taken_edge->dest->index);
814 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
815 if (*backedge_seen_p)
816 simplify = dummy_simplify;
817 return thread_around_empty_blocks (taken_edge,
818 dummy_cond,
819 handle_dominating_asserts,
820 simplify,
821 visited,
822 path,
823 backedge_seen_p);
824 }
825 }
826
827 /* We have a block with no statements, but multiple successors? */
828 return false;
829 }
830
831 /* The only real statements this block can have are a control
832 flow altering statement. Anything else stops the thread. */
833 stmt = gsi_stmt (gsi);
834 if (gimple_code (stmt) != GIMPLE_COND
835 && gimple_code (stmt) != GIMPLE_GOTO
836 && gimple_code (stmt) != GIMPLE_SWITCH)
837 return false;
838
839 /* If we have traversed a backedge, then we do not want to look
840 at certain expressions in the table that can not be relied upon.
841 Luckily the only code that looked at those expressions is the
842 SIMPLIFY callback, which we replace if we can no longer use it. */
843 if (*backedge_seen_p)
844 simplify = dummy_simplify;
845
846 /* Extract and simplify the condition. */
847 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
848 simplify, handle_dominating_asserts);
849
850 /* If the condition can be statically computed and we have not already
851 visited the destination edge, then add the taken edge to our thread
852 path. */
853 if (cond && is_gimple_min_invariant (cond))
854 {
855 taken_edge = find_taken_edge (bb, cond);
856
857 if (bitmap_bit_p (visited, taken_edge->dest->index))
858 return false;
859 bitmap_set_bit (visited, taken_edge->dest->index);
860
861 jump_thread_edge *x
862 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
863 path->safe_push (x);
864 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
865 if (*backedge_seen_p)
866 simplify = dummy_simplify;
867
868 thread_around_empty_blocks (taken_edge,
869 dummy_cond,
870 handle_dominating_asserts,
871 simplify,
872 visited,
873 path,
874 backedge_seen_p);
875 return true;
876 }
877
878 return false;
879 }
880
881 /* Return true if the CFG contains at least one path from START_BB to END_BB.
882 When a path is found, record in PATH the blocks from END_BB to START_BB.
883 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
884 the recursion to basic blocks belonging to LOOP. */
885
886 static bool
887 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
888 vec<basic_block, va_gc> *&path,
889 hash_set<basic_block> *visited_bbs, loop_p loop)
890 {
891 if (loop != start_bb->loop_father)
892 return false;
893
894 if (start_bb == end_bb)
895 {
896 vec_safe_push (path, start_bb);
897 return true;
898 }
899
900 if (!visited_bbs->add (start_bb))
901 {
902 edge e;
903 edge_iterator ei;
904 FOR_EACH_EDGE (e, ei, start_bb->succs)
905 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
906 {
907 vec_safe_push (path, start_bb);
908 return true;
909 }
910 }
911
912 return false;
913 }
914
915 static int max_threaded_paths;
916
917 /* We trace the value of the variable EXPR back through any phi nodes looking
918 for places where it gets a constant value and save the path. Stop after
919 having recorded MAX_PATHS jump threading paths. */
920
921 static void
922 fsm_find_control_statement_thread_paths (tree expr,
923 hash_set<basic_block> *visited_bbs,
924 vec<basic_block, va_gc> *&path,
925 bool seen_loop_phi)
926 {
927 tree var = SSA_NAME_VAR (expr);
928 gimple def_stmt = SSA_NAME_DEF_STMT (expr);
929 basic_block var_bb = gimple_bb (def_stmt);
930
931 if (var == NULL || var_bb == NULL)
932 return;
933
934 /* For the moment we assume that an SSA chain only contains phi nodes, and
935 eventually one of the phi arguments will be an integer constant. In the
936 future, this could be extended to also handle simple assignments of
937 arithmetic operations. */
938 if (gimple_code (def_stmt) != GIMPLE_PHI)
939 return;
940
941 /* Avoid infinite recursion. */
942 if (visited_bbs->add (var_bb))
943 return;
944
945 gphi *phi = as_a <gphi *> (def_stmt);
946 int next_path_length = 0;
947 basic_block last_bb_in_path = path->last ();
948
949 if (loop_containing_stmt (phi)->header == gimple_bb (phi))
950 {
951 /* Do not walk through more than one loop PHI node. */
952 if (seen_loop_phi)
953 return;
954 seen_loop_phi = true;
955 }
956
957 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
958 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
959 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
960 if (var_bb != last_bb_in_path)
961 {
962 edge e;
963 int e_count = 0;
964 edge_iterator ei;
965 vec<basic_block, va_gc> *next_path;
966 vec_alloc (next_path, n_basic_blocks_for_fn (cfun));
967
968 FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
969 {
970 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
971
972 if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
973 e->src->loop_father))
974 ++e_count;
975
976 delete visited_bbs;
977
978 /* If there is more than one path, stop. */
979 if (e_count > 1)
980 {
981 vec_free (next_path);
982 return;
983 }
984 }
985
986 /* Stop if we have not found a path: this could occur when the recursion
987 is stopped by one of the bounds. */
988 if (e_count == 0)
989 {
990 vec_free (next_path);
991 return;
992 }
993
994 /* Append all the nodes from NEXT_PATH to PATH. */
995 vec_safe_splice (path, next_path);
996 next_path_length = next_path->length ();
997 vec_free (next_path);
998 }
999
1000 gcc_assert (path->last () == var_bb);
1001
1002 /* Iterate over the arguments of PHI. */
1003 unsigned int i;
1004 for (i = 0; i < gimple_phi_num_args (phi); i++)
1005 {
1006 tree arg = gimple_phi_arg_def (phi, i);
1007 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
1008
1009 /* Skip edges pointing outside the current loop. */
1010 if (!arg || var_bb->loop_father != bbi->loop_father)
1011 continue;
1012
1013 if (TREE_CODE (arg) == SSA_NAME)
1014 {
1015 vec_safe_push (path, bbi);
1016 /* Recursively follow SSA_NAMEs looking for a constant definition. */
1017 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
1018 seen_loop_phi);
1019
1020 path->pop ();
1021 continue;
1022 }
1023
1024 if (TREE_CODE (arg) != INTEGER_CST)
1025 continue;
1026
1027 int path_length = path->length ();
1028 /* A path with less than 2 basic blocks should not be jump-threaded. */
1029 if (path_length < 2)
1030 continue;
1031
1032 if (path_length > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
1033 {
1034 if (dump_file && (dump_flags & TDF_DETAILS))
1035 fprintf (dump_file, "FSM jump-thread path not considered: "
1036 "the number of basic blocks on the path "
1037 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
1038 continue;
1039 }
1040
1041 if (max_threaded_paths <= 0)
1042 {
1043 if (dump_file && (dump_flags & TDF_DETAILS))
1044 fprintf (dump_file, "FSM jump-thread path not considered: "
1045 "the number of previously recorded FSM paths to thread "
1046 "exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
1047 continue;
1048 }
1049
1050 /* Add BBI to the path. */
1051 vec_safe_push (path, bbi);
1052 ++path_length;
1053
1054 int n_insns = 0;
1055 gimple_stmt_iterator gsi;
1056 int j;
1057 loop_p loop = (*path)[0]->loop_father;
1058 bool path_crosses_loops = false;
1059
1060 /* Count the number of instructions on the path: as these instructions
1061 will have to be duplicated, we will not record the path if there are
1062 too many instructions on the path. Also check that all the blocks in
1063 the path belong to a single loop. */
1064 for (j = 1; j < path_length - 1; j++)
1065 {
1066 basic_block bb = (*path)[j];
1067
1068 if (bb->loop_father != loop)
1069 {
1070 path_crosses_loops = true;
1071 break;
1072 }
1073
1074 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1075 {
1076 gimple stmt = gsi_stmt (gsi);
1077 /* Do not count empty statements and labels. */
1078 if (gimple_code (stmt) != GIMPLE_NOP
1079 && gimple_code (stmt) != GIMPLE_LABEL
1080 && !is_gimple_debug (stmt))
1081 ++n_insns;
1082 }
1083 }
1084
1085 if (path_crosses_loops)
1086 {
1087 if (dump_file && (dump_flags & TDF_DETAILS))
1088 fprintf (dump_file, "FSM jump-thread path not considered: "
1089 "the path crosses loops.\n");
1090 path->pop ();
1091 continue;
1092 }
1093
1094 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
1095 {
1096 if (dump_file && (dump_flags & TDF_DETAILS))
1097 fprintf (dump_file, "FSM jump-thread path not considered: "
1098 "the number of instructions on the path "
1099 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
1100 path->pop ();
1101 continue;
1102 }
1103
1104 vec<jump_thread_edge *> *jump_thread_path
1105 = new vec<jump_thread_edge *> ();
1106
1107 /* Record the edges between the blocks in PATH. */
1108 for (j = 0; j < path_length - 1; j++)
1109 {
1110 edge e = find_edge ((*path)[path_length - j - 1],
1111 (*path)[path_length - j - 2]);
1112 gcc_assert (e);
1113 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
1114 jump_thread_path->safe_push (x);
1115 }
1116
1117 /* Add the edge taken when the control variable has value ARG. */
1118 edge taken_edge = find_taken_edge ((*path)[0], arg);
1119 jump_thread_edge *x
1120 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
1121 jump_thread_path->safe_push (x);
1122
1123 register_jump_thread (jump_thread_path);
1124 --max_threaded_paths;
1125
1126 /* Remove BBI from the path. */
1127 path->pop ();
1128 }
1129
1130 /* Remove all the nodes that we added from NEXT_PATH. */
1131 if (next_path_length)
1132 vec_safe_truncate (path, (path->length () - next_path_length));
1133 }
1134
1135 /* We are exiting E->src, see if E->dest ends with a conditional
1136 jump which has a known value when reached via E.
1137
1138 E->dest can have arbitrary side effects which, if threading is
1139 successful, will be maintained.
1140
1141 Special care is necessary if E is a back edge in the CFG as we
1142 may have already recorded equivalences for E->dest into our
1143 various tables, including the result of the conditional at
1144 the end of E->dest. Threading opportunities are severely
1145 limited in that case to avoid short-circuiting the loop
1146 incorrectly.
1147
1148 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1149 to avoid allocating memory.
1150
1151 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1152 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1153 used in.
1154
1155 STACK is used to undo temporary equivalences created during the walk of
1156 E->dest.
1157
1158 SIMPLIFY is a pass-specific function used to simplify statements.
1159
1160 Our caller is responsible for restoring the state of the expression
1161 and const_and_copies stacks.
1162
1163 Positive return value is success. Zero return value is failure, but
1164 the block can still be duplicated as a joiner in a jump thread path,
1165 negative indicates the block should not be duplicated and thus is not
1166 suitable for a joiner in a jump threading path. */
1167
1168 static int
1169 thread_through_normal_block (edge e,
1170 gcond *dummy_cond,
1171 bool handle_dominating_asserts,
1172 const_and_copies *const_and_copies,
1173 tree (*simplify) (gimple, gimple),
1174 vec<jump_thread_edge *> *path,
1175 bitmap visited,
1176 bool *backedge_seen_p)
1177 {
1178 /* If we have traversed a backedge, then we do not want to look
1179 at certain expressions in the table that can not be relied upon.
1180 Luckily the only code that looked at those expressions is the
1181 SIMPLIFY callback, which we replace if we can no longer use it. */
1182 if (*backedge_seen_p)
1183 simplify = dummy_simplify;
1184
1185 /* PHIs create temporary equivalences.
1186 Note that if we found a PHI that made the block non-threadable, then
1187 we need to bubble that up to our caller in the same manner we do
1188 when we prematurely stop processing statements below. */
1189 if (!record_temporary_equivalences_from_phis (e, const_and_copies))
1190 return -1;
1191
1192 /* Now walk each statement recording any context sensitive
1193 temporary equivalences we can detect. */
1194 gimple stmt
1195 = record_temporary_equivalences_from_stmts_at_dest (e, const_and_copies, simplify,
1196 *backedge_seen_p);
1197
1198 /* There's two reasons STMT might be null, and distinguishing
1199 between them is important.
1200
1201 First the block may not have had any statements. For example, it
1202 might have some PHIs and unconditionally transfer control elsewhere.
1203 Such blocks are suitable for jump threading, particularly as a
1204 joiner block.
1205
1206 The second reason would be if we did not process all the statements
1207 in the block (because there were too many to make duplicating the
1208 block profitable. If we did not look at all the statements, then
1209 we may not have invalidated everything needing invalidation. Thus
1210 we must signal to our caller that this block is not suitable for
1211 use as a joiner in a threading path. */
1212 if (!stmt)
1213 {
1214 /* First case. The statement simply doesn't have any instructions, but
1215 does have PHIs. */
1216 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
1217 && !gsi_end_p (gsi_start_phis (e->dest)))
1218 return 0;
1219
1220 /* Second case. */
1221 return -1;
1222 }
1223
1224 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1225 will be taken. */
1226 if (gimple_code (stmt) == GIMPLE_COND
1227 || gimple_code (stmt) == GIMPLE_GOTO
1228 || gimple_code (stmt) == GIMPLE_SWITCH)
1229 {
1230 tree cond;
1231
1232 /* Extract and simplify the condition. */
1233 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
1234 handle_dominating_asserts);
1235
1236 if (!cond)
1237 return 0;
1238
1239 if (is_gimple_min_invariant (cond))
1240 {
1241 edge taken_edge = find_taken_edge (e->dest, cond);
1242 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1243
1244 /* DEST could be NULL for a computed jump to an absolute
1245 address. */
1246 if (dest == NULL
1247 || dest == e->dest
1248 || bitmap_bit_p (visited, dest->index))
1249 return 0;
1250
1251 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1252 first edge on the path. */
1253 if (path->length () == 0)
1254 {
1255 jump_thread_edge *x
1256 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1257 path->safe_push (x);
1258 *backedge_seen_p |= ((e->flags & EDGE_DFS_BACK) != 0);
1259 }
1260
1261 jump_thread_edge *x
1262 = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
1263 path->safe_push (x);
1264 *backedge_seen_p |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1265 if (*backedge_seen_p)
1266 simplify = dummy_simplify;
1267
1268 /* See if we can thread through DEST as well, this helps capture
1269 secondary effects of threading without having to re-run DOM or
1270 VRP.
1271
1272 We don't want to thread back to a block we have already
1273 visited. This may be overly conservative. */
1274 bitmap_set_bit (visited, dest->index);
1275 bitmap_set_bit (visited, e->dest->index);
1276 thread_around_empty_blocks (taken_edge,
1277 dummy_cond,
1278 handle_dominating_asserts,
1279 simplify,
1280 visited,
1281 path,
1282 backedge_seen_p);
1283 return 1;
1284 }
1285
1286 if (!flag_expensive_optimizations
1287 || optimize_function_for_size_p (cfun)
1288 || TREE_CODE (cond) != SSA_NAME
1289 || e->dest->loop_father != e->src->loop_father
1290 || loop_depth (e->dest->loop_father) == 0)
1291 return 0;
1292
1293 /* When COND cannot be simplified, try to find paths from a control
1294 statement back through the PHI nodes which would affect that control
1295 statement. */
1296 vec<basic_block, va_gc> *bb_path;
1297 vec_alloc (bb_path, n_basic_blocks_for_fn (cfun));
1298 vec_safe_push (bb_path, e->dest);
1299 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
1300
1301 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
1302 fsm_find_control_statement_thread_paths (cond, visited_bbs, bb_path,
1303 false);
1304
1305 delete visited_bbs;
1306 vec_free (bb_path);
1307 }
1308 return 0;
1309 }
1310
1311 /* We are exiting E->src, see if E->dest ends with a conditional
1312 jump which has a known value when reached via E.
1313
1314 Special care is necessary if E is a back edge in the CFG as we
1315 may have already recorded equivalences for E->dest into our
1316 various tables, including the result of the conditional at
1317 the end of E->dest. Threading opportunities are severely
1318 limited in that case to avoid short-circuiting the loop
1319 incorrectly.
1320
1321 Note it is quite common for the first block inside a loop to
1322 end with a conditional which is either always true or always
1323 false when reached via the loop backedge. Thus we do not want
1324 to blindly disable threading across a loop backedge.
1325
1326 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
1327 to avoid allocating memory.
1328
1329 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
1330 the simplified condition with left-hand sides of ASSERT_EXPRs they are
1331 used in.
1332
1333 STACK is used to undo temporary equivalences created during the walk of
1334 E->dest.
1335
1336 SIMPLIFY is a pass-specific function used to simplify statements. */
1337
1338 void
1339 thread_across_edge (gcond *dummy_cond,
1340 edge e,
1341 bool handle_dominating_asserts,
1342 const_and_copies *const_and_copies,
1343 tree (*simplify) (gimple, gimple))
1344 {
1345 bitmap visited = BITMAP_ALLOC (NULL);
1346 bool backedge_seen;
1347
1348 stmt_count = 0;
1349
1350 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1351 bitmap_clear (visited);
1352 bitmap_set_bit (visited, e->src->index);
1353 bitmap_set_bit (visited, e->dest->index);
1354 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1355 if (backedge_seen)
1356 simplify = dummy_simplify;
1357
1358 int threaded = thread_through_normal_block (e, dummy_cond,
1359 handle_dominating_asserts,
1360 const_and_copies, simplify, path,
1361 visited, &backedge_seen);
1362 if (threaded > 0)
1363 {
1364 propagate_threaded_block_debug_into (path->last ()->e->dest,
1365 e->dest);
1366 const_and_copies->pop_to_marker ();
1367 BITMAP_FREE (visited);
1368 register_jump_thread (path);
1369 return;
1370 }
1371 else
1372 {
1373 /* Negative and zero return values indicate no threading was possible,
1374 thus there should be no edges on the thread path and no need to walk
1375 through the vector entries. */
1376 gcc_assert (path->length () == 0);
1377 path->release ();
1378 delete path;
1379
1380 /* A negative status indicates the target block was deemed too big to
1381 duplicate. Just quit now rather than trying to use the block as
1382 a joiner in a jump threading path.
1383
1384 This prevents unnecessary code growth, but more importantly if we
1385 do not look at all the statements in the block, then we may have
1386 missed some invalidations if we had traversed a backedge! */
1387 if (threaded < 0)
1388 {
1389 BITMAP_FREE (visited);
1390 const_and_copies->pop_to_marker ();
1391 return;
1392 }
1393 }
1394
1395 /* We were unable to determine what out edge from E->dest is taken. However,
1396 we might still be able to thread through successors of E->dest. This
1397 often occurs when E->dest is a joiner block which then fans back out
1398 based on redundant tests.
1399
1400 If so, we'll copy E->dest and redirect the appropriate predecessor to
1401 the copy. Within the copy of E->dest, we'll thread one or more edges
1402 to points deeper in the CFG.
1403
1404 This is a stopgap until we have a more structured approach to path
1405 isolation. */
1406 {
1407 edge taken_edge;
1408 edge_iterator ei;
1409 bool found;
1410
1411 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1412 we can safely redirect any of the edges. Just punt those cases. */
1413 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1414 if (taken_edge->flags & EDGE_ABNORMAL)
1415 {
1416 const_and_copies->pop_to_marker ();
1417 BITMAP_FREE (visited);
1418 return;
1419 }
1420
1421 /* Look at each successor of E->dest to see if we can thread through it. */
1422 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1423 {
1424 /* Push a fresh marker so we can unwind the equivalences created
1425 for each of E->dest's successors. */
1426 const_and_copies->push_marker ();
1427
1428 /* Avoid threading to any block we have already visited. */
1429 bitmap_clear (visited);
1430 bitmap_set_bit (visited, e->src->index);
1431 bitmap_set_bit (visited, e->dest->index);
1432 bitmap_set_bit (visited, taken_edge->dest->index);
1433 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1434
1435 /* Record whether or not we were able to thread through a successor
1436 of E->dest. */
1437 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1438 path->safe_push (x);
1439
1440 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1441 path->safe_push (x);
1442 found = false;
1443 backedge_seen = ((e->flags & EDGE_DFS_BACK) != 0);
1444 backedge_seen |= ((taken_edge->flags & EDGE_DFS_BACK) != 0);
1445 if (backedge_seen)
1446 simplify = dummy_simplify;
1447 found = thread_around_empty_blocks (taken_edge,
1448 dummy_cond,
1449 handle_dominating_asserts,
1450 simplify,
1451 visited,
1452 path,
1453 &backedge_seen);
1454
1455 if (backedge_seen)
1456 simplify = dummy_simplify;
1457
1458 if (!found)
1459 found = thread_through_normal_block (path->last ()->e, dummy_cond,
1460 handle_dominating_asserts,
1461 const_and_copies, simplify, path, visited,
1462 &backedge_seen) > 0;
1463
1464 /* If we were able to thread through a successor of E->dest, then
1465 record the jump threading opportunity. */
1466 if (found)
1467 {
1468 propagate_threaded_block_debug_into (path->last ()->e->dest,
1469 taken_edge->dest);
1470 register_jump_thread (path);
1471 }
1472 else
1473 {
1474 delete_jump_thread_path (path);
1475 }
1476
1477 /* And unwind the equivalence table. */
1478 const_and_copies->pop_to_marker ();
1479 }
1480 BITMAP_FREE (visited);
1481 }
1482
1483 const_and_copies->pop_to_marker ();
1484 }