1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
29 #include "fold-const.h"
31 #include "gimple-iterator.h"
33 #include "tree-ssa-threadupdate.h"
38 /* Given a block B, update the CFG and SSA graph to reflect redirecting
39 one or more in-edges to B to instead reach the destination of an
40 out-edge from B while preserving any side effects in B.
42 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
43 side effects of executing B.
45 1. Make a copy of B (including its outgoing edges and statements). Call
46 the copy B'. Note B' has no incoming edges or PHIs at this time.
48 2. Remove the control statement at the end of B' and all outgoing edges
51 3. Add a new argument to each PHI in C with the same value as the existing
52 argument associated with edge B->C. Associate the new PHI arguments
55 4. For each PHI in B, find or create a PHI in B' with an identical
56 PHI_RESULT. Add an argument to the PHI in B' which has the same
57 value as the PHI in B associated with the edge A->B. Associate
58 the new argument in the PHI in B' with the edge A->B.
60 5. Change the edge A->B to A->B'.
62 5a. This automatically deletes any PHI arguments associated with the
65 5b. This automatically associates each new argument added in step 4
68 6. Repeat for other incoming edges into B.
70 7. Put the duplicated resources in B and all the B' blocks into SSA form.
72 Note that block duplication can be minimized by first collecting the
73 set of unique destination blocks that the incoming edges should
76 We reduce the number of edges and statements we create by not copying all
77 the outgoing edges and the control statement in step #1. We instead create
78 a template block without the outgoing edges and duplicate the template.
80 Another case this code handles is threading through a "joiner" block. In
81 this case, we do not know the destination of the joiner block, but one
82 of the outgoing edges from the joiner block leads to a threadable path. This
83 case largely works as outlined above, except the duplicate of the joiner
84 block still contains a full set of outgoing edges and its control statement.
85 We just redirect one of its outgoing edges to our jump threading path. */
88 /* Steps #5 and #6 of the above algorithm are best implemented by walking
89 all the incoming edges which thread to the same destination edge at
90 the same time. That avoids lots of table lookups to get information
91 for the destination edge.
93 To realize that implementation we create a list of incoming edges
94 which thread to the same outgoing edge. Thus to implement steps
95 #5 and #6 we traverse our hash table of outgoing edge information.
96 For each entry we walk the list of incoming edges which thread to
97 the current outgoing edge. */
105 /* Main data structure recording information regarding B's duplicate
108 /* We need to efficiently record the unique thread destinations of this
109 block and specific information associated with those destinations. We
110 may have many incoming edges threaded to the same outgoing edge. This
111 can be naturally implemented with a hash table. */
113 struct redirection_data
: free_ptr_hash
<redirection_data
>
115 /* We support wiring up two block duplicates in a jump threading path.
117 One is a normal block copy where we remove the control statement
118 and wire up its single remaining outgoing edge to the thread path.
120 The other is a joiner block where we leave the control statement
121 in place, but wire one of the outgoing edges to a thread path.
123 In theory we could have multiple block duplicates in a jump
124 threading path, but I haven't tried that.
126 The duplicate blocks appear in this array in the same order in
127 which they appear in the jump thread path. */
128 basic_block dup_blocks
[2];
130 /* The jump threading path. */
131 vec
<jump_thread_edge
*> *path
;
133 /* A list of incoming edges which we want to thread to the
135 struct el
*incoming_edges
;
137 /* hash_table support. */
138 static inline hashval_t
hash (const redirection_data
*);
139 static inline int equal (const redirection_data
*, const redirection_data
*);
142 /* Dump a jump threading path, including annotations about each
146 dump_jump_thread_path (FILE *dump_file
, vec
<jump_thread_edge
*> path
,
150 " %s%s jump thread: (%d, %d) incoming edge; ",
151 (registering
? "Registering" : "Cancelling"),
152 (path
[0]->type
== EDGE_FSM_THREAD
? " FSM": ""),
153 path
[0]->e
->src
->index
, path
[0]->e
->dest
->index
);
155 for (unsigned int i
= 1; i
< path
.length (); i
++)
157 /* We can get paths with a NULL edge when the final destination
158 of a jump thread turns out to be a constant address. We dump
159 those paths when debugging, so we have to be prepared for that
161 if (path
[i
]->e
== NULL
)
164 if (path
[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
165 fprintf (dump_file
, " (%d, %d) joiner; ",
166 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
167 if (path
[i
]->type
== EDGE_COPY_SRC_BLOCK
)
168 fprintf (dump_file
, " (%d, %d) normal;",
169 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
170 if (path
[i
]->type
== EDGE_NO_COPY_SRC_BLOCK
)
171 fprintf (dump_file
, " (%d, %d) nocopy;",
172 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
173 if (path
[0]->type
== EDGE_FSM_THREAD
)
174 fprintf (dump_file
, " (%d, %d) ",
175 path
[i
]->e
->src
->index
, path
[i
]->e
->dest
->index
);
177 fputc ('\n', dump_file
);
180 /* Simple hashing function. For any given incoming edge E, we're going
181 to be most concerned with the final destination of its jump thread
182 path. So hash on the block index of the final edge in the path. */
185 redirection_data::hash (const redirection_data
*p
)
187 vec
<jump_thread_edge
*> *path
= p
->path
;
188 return path
->last ()->e
->dest
->index
;
191 /* Given two hash table entries, return true if they have the same
192 jump threading path. */
194 redirection_data::equal (const redirection_data
*p1
, const redirection_data
*p2
)
196 vec
<jump_thread_edge
*> *path1
= p1
->path
;
197 vec
<jump_thread_edge
*> *path2
= p2
->path
;
199 if (path1
->length () != path2
->length ())
202 for (unsigned int i
= 1; i
< path1
->length (); i
++)
204 if ((*path1
)[i
]->type
!= (*path2
)[i
]->type
205 || (*path1
)[i
]->e
!= (*path2
)[i
]->e
)
212 /* Rather than search all the edges in jump thread paths each time
213 DOM is able to simply if control statement, we build a hash table
214 with the deleted edges. We only care about the address of the edge,
216 struct removed_edges
: nofree_ptr_hash
<edge_def
>
218 static hashval_t
hash (edge e
) { return htab_hash_pointer (e
); }
219 static bool equal (edge e1
, edge e2
) { return e1
== e2
; }
222 static hash_table
<removed_edges
> *removed_edges
;
224 /* Data structure of information to pass to hash table traversal routines. */
225 struct ssa_local_info_t
227 /* The current block we are working on. */
230 /* We only create a template block for the first duplicated block in a
231 jump threading path as we may need many duplicates of that block.
233 The second duplicate block in a path is specific to that path. Creating
234 and sharing a template for that block is considerably more difficult. */
235 basic_block template_block
;
237 /* TRUE if we thread one or more jumps, FALSE otherwise. */
240 /* Blocks duplicated for the thread. */
241 bitmap duplicate_blocks
;
244 /* Passes which use the jump threading code register jump threading
245 opportunities as they are discovered. We keep the registered
246 jump threading opportunities in this vector as edge pairs
247 (original_edge, target_edge). */
248 static vec
<vec
<jump_thread_edge
*> *> paths
;
250 /* When we start updating the CFG for threading, data necessary for jump
251 threading is attached to the AUX field for the incoming edge. Use these
252 macros to access the underlying structure attached to the AUX field. */
253 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
255 /* Jump threading statistics. */
257 struct thread_stats_d
259 unsigned long num_threaded_edges
;
262 struct thread_stats_d thread_stats
;
265 /* Remove the last statement in block BB if it is a control statement
266 Also remove all outgoing edges except the edge which reaches DEST_BB.
267 If DEST_BB is NULL, then remove all outgoing edges. */
270 remove_ctrl_stmt_and_useless_edges (basic_block bb
, basic_block dest_bb
)
272 gimple_stmt_iterator gsi
;
276 gsi
= gsi_last_bb (bb
);
278 /* If the duplicate ends with a control statement, then remove it.
280 Note that if we are duplicating the template block rather than the
281 original basic block, then the duplicate might not have any real
285 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_COND
286 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_GOTO
287 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_SWITCH
))
288 gsi_remove (&gsi
, true);
290 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
292 if (e
->dest
!= dest_bb
)
298 /* If the remaining edge is a loop exit, there must have
299 a removed edge that was not a loop exit.
301 In that case BB and possibly other blocks were previously
302 in the loop, but are now outside the loop. Thus, we need
303 to update the loop structures. */
304 if (single_succ_p (bb
)
305 && loop_outer (bb
->loop_father
)
306 && loop_exit_edge_p (bb
->loop_father
, single_succ_edge (bb
)))
307 loops_state_set (LOOPS_NEED_FIXUP
);
310 /* Create a duplicate of BB. Record the duplicate block in an array
311 indexed by COUNT stored in RD. */
314 create_block_for_threading (basic_block bb
,
315 struct redirection_data
*rd
,
317 bitmap
*duplicate_blocks
)
322 /* We can use the generic block duplication code and simply remove
323 the stuff we do not need. */
324 rd
->dup_blocks
[count
] = duplicate_block (bb
, NULL
, NULL
);
326 FOR_EACH_EDGE (e
, ei
, rd
->dup_blocks
[count
]->succs
)
329 /* Zero out the profile, since the block is unreachable for now. */
330 rd
->dup_blocks
[count
]->frequency
= 0;
331 rd
->dup_blocks
[count
]->count
= 0;
332 if (duplicate_blocks
)
333 bitmap_set_bit (*duplicate_blocks
, rd
->dup_blocks
[count
]->index
);
336 /* Main data structure to hold information for duplicates of BB. */
338 static hash_table
<redirection_data
> *redirection_data
;
340 /* Given an outgoing edge E lookup and return its entry in our hash table.
342 If INSERT is true, then we insert the entry into the hash table if
343 it is not already present. INCOMING_EDGE is added to the list of incoming
344 edges associated with E in the hash table. */
346 static struct redirection_data
*
347 lookup_redirection_data (edge e
, enum insert_option insert
)
349 struct redirection_data
**slot
;
350 struct redirection_data
*elt
;
351 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
353 /* Build a hash table element so we can see if E is already
355 elt
= XNEW (struct redirection_data
);
357 elt
->dup_blocks
[0] = NULL
;
358 elt
->dup_blocks
[1] = NULL
;
359 elt
->incoming_edges
= NULL
;
361 slot
= redirection_data
->find_slot (elt
, insert
);
363 /* This will only happen if INSERT is false and the entry is not
364 in the hash table. */
371 /* This will only happen if E was not in the hash table and
376 elt
->incoming_edges
= XNEW (struct el
);
377 elt
->incoming_edges
->e
= e
;
378 elt
->incoming_edges
->next
= NULL
;
381 /* E was in the hash table. */
384 /* Free ELT as we do not need it anymore, we will extract the
385 relevant entry from the hash table itself. */
388 /* Get the entry stored in the hash table. */
391 /* If insertion was requested, then we need to add INCOMING_EDGE
392 to the list of incoming edges associated with E. */
395 struct el
*el
= XNEW (struct el
);
396 el
->next
= elt
->incoming_edges
;
398 elt
->incoming_edges
= el
;
405 /* Similar to copy_phi_args, except that the PHI arg exists, it just
406 does not have a value associated with it. */
409 copy_phi_arg_into_existing_phi (edge src_e
, edge tgt_e
)
411 int src_idx
= src_e
->dest_idx
;
412 int tgt_idx
= tgt_e
->dest_idx
;
414 /* Iterate over each PHI in e->dest. */
415 for (gphi_iterator gsi
= gsi_start_phis (src_e
->dest
),
416 gsi2
= gsi_start_phis (tgt_e
->dest
);
418 gsi_next (&gsi
), gsi_next (&gsi2
))
420 gphi
*src_phi
= gsi
.phi ();
421 gphi
*dest_phi
= gsi2
.phi ();
422 tree val
= gimple_phi_arg_def (src_phi
, src_idx
);
423 source_location locus
= gimple_phi_arg_location (src_phi
, src_idx
);
425 SET_PHI_ARG_DEF (dest_phi
, tgt_idx
, val
);
426 gimple_phi_arg_set_location (dest_phi
, tgt_idx
, locus
);
430 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
431 to see if it has constant value in a flow sensitive manner. Set
432 LOCUS to location of the constant phi arg and return the value.
433 Return DEF directly if either PATH or idx is ZERO. */
436 get_value_locus_in_path (tree def
, vec
<jump_thread_edge
*> *path
,
437 basic_block bb
, int idx
, source_location
*locus
)
443 if (path
== NULL
|| idx
== 0)
446 def_phi
= dyn_cast
<gphi
*> (SSA_NAME_DEF_STMT (def
));
450 def_bb
= gimple_bb (def_phi
);
451 /* Don't propagate loop invariants into deeper loops. */
452 if (!def_bb
|| bb_loop_depth (def_bb
) < bb_loop_depth (bb
))
455 /* Backtrack jump threading path from IDX to see if def has constant
457 for (int j
= idx
- 1; j
>= 0; j
--)
459 edge e
= (*path
)[j
]->e
;
460 if (e
->dest
== def_bb
)
462 arg
= gimple_phi_arg_def (def_phi
, e
->dest_idx
);
463 if (is_gimple_min_invariant (arg
))
465 *locus
= gimple_phi_arg_location (def_phi
, e
->dest_idx
);
475 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
476 Try to backtrack jump threading PATH from node IDX to see if the arg
477 has constant value, copy constant value instead of argument itself
481 copy_phi_args (basic_block bb
, edge src_e
, edge tgt_e
,
482 vec
<jump_thread_edge
*> *path
, int idx
)
485 int src_indx
= src_e
->dest_idx
;
487 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
489 gphi
*phi
= gsi
.phi ();
490 tree def
= gimple_phi_arg_def (phi
, src_indx
);
491 source_location locus
= gimple_phi_arg_location (phi
, src_indx
);
493 if (TREE_CODE (def
) == SSA_NAME
494 && !virtual_operand_p (gimple_phi_result (phi
)))
495 def
= get_value_locus_in_path (def
, path
, bb
, idx
, &locus
);
497 add_phi_arg (phi
, def
, tgt_e
, locus
);
501 /* We have recently made a copy of ORIG_BB, including its outgoing
502 edges. The copy is NEW_BB. Every PHI node in every direct successor of
503 ORIG_BB has a new argument associated with edge from NEW_BB to the
504 successor. Initialize the PHI argument so that it is equal to the PHI
505 argument associated with the edge from ORIG_BB to the successor.
506 PATH and IDX are used to check if the new PHI argument has constant
507 value in a flow sensitive manner. */
510 update_destination_phis (basic_block orig_bb
, basic_block new_bb
,
511 vec
<jump_thread_edge
*> *path
, int idx
)
516 FOR_EACH_EDGE (e
, ei
, orig_bb
->succs
)
518 edge e2
= find_edge (new_bb
, e
->dest
);
519 copy_phi_args (e
->dest
, e
, e2
, path
, idx
);
523 /* Given a duplicate block and its single destination (both stored
524 in RD). Create an edge between the duplicate and its single
527 Add an additional argument to any PHI nodes at the single
528 destination. IDX is the start node in jump threading path
529 we start to check to see if the new PHI argument has constant
530 value along the jump threading path. */
533 create_edge_and_update_destination_phis (struct redirection_data
*rd
,
534 basic_block bb
, int idx
)
536 edge e
= make_edge (bb
, rd
->path
->last ()->e
->dest
, EDGE_FALLTHRU
);
538 rescan_loop_exit (e
, true, false);
539 e
->probability
= REG_BR_PROB_BASE
;
540 e
->count
= bb
->count
;
542 /* We used to copy the thread path here. That was added in 2007
543 and dutifully updated through the representation changes in 2013.
545 In 2013 we added code to thread from an interior node through
546 the backedge to another interior node. That runs after the code
547 to thread through loop headers from outside the loop.
549 The latter may delete edges in the CFG, including those
550 which appeared in the jump threading path we copied here. Thus
551 we'd end up using a dangling pointer.
553 After reviewing the 2007/2011 code, I can't see how anything
554 depended on copying the AUX field and clearly copying the jump
555 threading path is problematical due to embedded edge pointers.
556 It has been removed. */
559 /* If there are any PHI nodes at the destination of the outgoing edge
560 from the duplicate block, then we will need to add a new argument
561 to them. The argument should have the same value as the argument
562 associated with the outgoing edge stored in RD. */
563 copy_phi_args (e
->dest
, rd
->path
->last ()->e
, e
, rd
->path
, idx
);
566 /* Look through PATH beginning at START and return TRUE if there are
567 any additional blocks that need to be duplicated. Otherwise,
570 any_remaining_duplicated_blocks (vec
<jump_thread_edge
*> *path
,
573 for (unsigned int i
= start
+ 1; i
< path
->length (); i
++)
575 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
576 || (*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
)
583 /* Compute the amount of profile count/frequency coming into the jump threading
584 path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
585 PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
586 duplicated path, returned in PATH_OUT_COUNT_PTR. LOCAL_INFO is used to
587 identify blocks duplicated for jump threading, which have duplicated
588 edges that need to be ignored in the analysis. Return true if path contains
589 a joiner, false otherwise.
591 In the non-joiner case, this is straightforward - all the counts/frequency
592 flowing into the jump threading path should flow through the duplicated
593 block and out of the duplicated path.
595 In the joiner case, it is very tricky. Some of the counts flowing into
596 the original path go offpath at the joiner. The problem is that while
597 we know how much total count goes off-path in the original control flow,
598 we don't know how many of the counts corresponding to just the jump
599 threading path go offpath at the joiner.
601 For example, assume we have the following control flow and identified
602 jump threading paths:
621 Jump threading paths: A -> J -> Son -> D (path 1)
622 C -> J -> Son -> E (path 2)
624 Note that the control flow could be more complicated:
625 - Each jump threading path may have more than one incoming edge. I.e. A and
626 Ea could represent multiple incoming blocks/edges that are included in
628 - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
629 before or after the "normal" copy block). These are not duplicated onto
630 the jump threading path, as they are single-successor.
631 - Any of the blocks along the path may have other incoming edges that
632 are not part of any jump threading path, but add profile counts along
635 In the aboe example, after all jump threading is complete, we will
636 end up with the following control flow:
645 Eona/ \ ---/---\-------- \Eonc
650 \___________ / \ _____/
655 The main issue to notice here is that when we are processing path 1
656 (A->J->Son->D) we need to figure out the outgoing edge weights to
657 the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
658 sum of the incoming weights to D remain Ed. The problem with simply
659 assuming that Ja (and Jc when processing path 2) has the same outgoing
660 probabilities to its successors as the original block J, is that after
661 all paths are processed and other edges/counts removed (e.g. none
662 of Ec will reach D after processing path 2), we may end up with not
663 enough count flowing along duplicated edge Sona->D.
665 Therefore, in the case of a joiner, we keep track of all counts
666 coming in along the current path, as well as from predecessors not
667 on any jump threading path (Eb in the above example). While we
668 first assume that the duplicated Eona for Ja->Sona has the same
669 probability as the original, we later compensate for other jump
670 threading paths that may eliminate edges. We do that by keep track
671 of all counts coming into the original path that are not in a jump
672 thread (Eb in the above example, but as noted earlier, there could
673 be other predecessors incoming to the path at various points, such
674 as at Son). Call this cumulative non-path count coming into the path
675 before D as Enonpath. We then ensure that the count from Sona->D is as at
676 least as big as (Ed - Enonpath), but no bigger than the minimum
677 weight along the jump threading path. The probabilities of both the
678 original and duplicated joiner block J and Ja will be adjusted
679 accordingly after the updates. */
682 compute_path_counts (struct redirection_data
*rd
,
683 ssa_local_info_t
*local_info
,
684 gcov_type
*path_in_count_ptr
,
685 gcov_type
*path_out_count_ptr
,
686 int *path_in_freq_ptr
)
688 edge e
= rd
->incoming_edges
->e
;
689 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
690 edge elast
= path
->last ()->e
;
691 gcov_type nonpath_count
= 0;
692 bool has_joiner
= false;
693 gcov_type path_in_count
= 0;
694 int path_in_freq
= 0;
696 /* Start by accumulating incoming edge counts to the path's first bb
697 into a couple buckets:
698 path_in_count: total count of incoming edges that flow into the
700 nonpath_count: total count of incoming edges that are not
701 flowing along *any* path. These are the counts
702 that will still flow along the original path after
703 all path duplication is done by potentially multiple
704 calls to this routine.
705 (any other incoming edge counts are for a different jump threading
706 path that will be handled by a later call to this routine.)
707 To make this easier, start by recording all incoming edges that flow into
708 the current path in a bitmap. We could add up the path's incoming edge
709 counts here, but we still need to walk all the first bb's incoming edges
710 below to add up the counts of the other edges not included in this jump
712 struct el
*next
, *el
;
713 bitmap in_edge_srcs
= BITMAP_ALLOC (NULL
);
714 for (el
= rd
->incoming_edges
; el
; el
= next
)
717 bitmap_set_bit (in_edge_srcs
, el
->e
->src
->index
);
721 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
723 vec
<jump_thread_edge
*> *ein_path
= THREAD_PATH (ein
);
724 /* Simply check the incoming edge src against the set captured above. */
726 && bitmap_bit_p (in_edge_srcs
, (*ein_path
)[0]->e
->src
->index
))
728 /* It is necessary but not sufficient that the last path edges
729 are identical. There may be different paths that share the
730 same last path edge in the case where the last edge has a nocopy
732 gcc_assert (ein_path
->last ()->e
== elast
);
733 path_in_count
+= ein
->count
;
734 path_in_freq
+= EDGE_FREQUENCY (ein
);
738 /* Keep track of the incoming edges that are not on any jump-threading
739 path. These counts will still flow out of original path after all
740 jump threading is complete. */
741 nonpath_count
+= ein
->count
;
745 /* This is needed due to insane incoming frequencies. */
746 if (path_in_freq
> BB_FREQ_MAX
)
747 path_in_freq
= BB_FREQ_MAX
;
749 BITMAP_FREE (in_edge_srcs
);
751 /* Now compute the fraction of the total count coming into the first
752 path bb that is from the current threading path. */
753 gcov_type total_count
= e
->dest
->count
;
754 /* Handle incoming profile insanities. */
755 if (total_count
< path_in_count
)
756 path_in_count
= total_count
;
757 int onpath_scale
= GCOV_COMPUTE_SCALE (path_in_count
, total_count
);
759 /* Walk the entire path to do some more computation in order to estimate
760 how much of the path_in_count will flow out of the duplicated threading
761 path. In the non-joiner case this is straightforward (it should be
762 the same as path_in_count, although we will handle incoming profile
763 insanities by setting it equal to the minimum count along the path).
765 In the joiner case, we need to estimate how much of the path_in_count
766 will stay on the threading path after the joiner's conditional branch.
767 We don't really know for sure how much of the counts
768 associated with this path go to each successor of the joiner, but we'll
769 estimate based on the fraction of the total count coming into the path
770 bb was from the threading paths (computed above in onpath_scale).
771 Afterwards, we will need to do some fixup to account for other threading
772 paths and possible profile insanities.
774 In order to estimate the joiner case's counts we also need to update
775 nonpath_count with any additional counts coming into the path. Other
776 blocks along the path may have additional predecessors from outside
778 gcov_type path_out_count
= path_in_count
;
779 gcov_type min_path_count
= path_in_count
;
780 for (unsigned int i
= 1; i
< path
->length (); i
++)
782 edge epath
= (*path
)[i
]->e
;
783 gcov_type cur_count
= epath
->count
;
784 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
787 cur_count
= apply_probability (cur_count
, onpath_scale
);
789 /* In the joiner case we need to update nonpath_count for any edges
790 coming into the path that will contribute to the count flowing
791 into the path successor. */
792 if (has_joiner
&& epath
!= elast
)
794 /* Look for other incoming edges after joiner. */
795 FOR_EACH_EDGE (ein
, ei
, epath
->dest
->preds
)
798 /* Ignore in edges from blocks we have duplicated for a
799 threading path, which have duplicated edge counts until
800 they are redirected by an invocation of this routine. */
801 && !bitmap_bit_p (local_info
->duplicate_blocks
,
803 nonpath_count
+= ein
->count
;
806 if (cur_count
< path_out_count
)
807 path_out_count
= cur_count
;
808 if (epath
->count
< min_path_count
)
809 min_path_count
= epath
->count
;
812 /* We computed path_out_count above assuming that this path targeted
813 the joiner's on-path successor with the same likelihood as it
814 reached the joiner. However, other thread paths through the joiner
815 may take a different path through the normal copy source block
816 (i.e. they have a different elast), meaning that they do not
817 contribute any counts to this path's elast. As a result, it may
818 turn out that this path must have more count flowing to the on-path
819 successor of the joiner. Essentially, all of this path's elast
820 count must be contributed by this path and any nonpath counts
821 (since any path through the joiner with a different elast will not
822 include a copy of this elast in its duplicated path).
823 So ensure that this path's path_out_count is at least the
824 difference between elast->count and nonpath_count. Otherwise the edge
825 counts after threading will not be sane. */
826 if (has_joiner
&& path_out_count
< elast
->count
- nonpath_count
)
828 path_out_count
= elast
->count
- nonpath_count
;
829 /* But neither can we go above the minimum count along the path
830 we are duplicating. This can be an issue due to profile
831 insanities coming in to this pass. */
832 if (path_out_count
> min_path_count
)
833 path_out_count
= min_path_count
;
836 *path_in_count_ptr
= path_in_count
;
837 *path_out_count_ptr
= path_out_count
;
838 *path_in_freq_ptr
= path_in_freq
;
843 /* Update the counts and frequencies for both an original path
844 edge EPATH and its duplicate EDUP. The duplicate source block
845 will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
846 and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
848 update_profile (edge epath
, edge edup
, gcov_type path_in_count
,
849 gcov_type path_out_count
, int path_in_freq
)
852 /* First update the duplicated block's count / frequency. */
855 basic_block dup_block
= edup
->src
;
856 gcc_assert (dup_block
->count
== 0);
857 gcc_assert (dup_block
->frequency
== 0);
858 dup_block
->count
= path_in_count
;
859 dup_block
->frequency
= path_in_freq
;
862 /* Now update the original block's count and frequency in the
863 opposite manner - remove the counts/freq that will flow
864 into the duplicated block. Handle underflow due to precision/
866 epath
->src
->count
-= path_in_count
;
867 if (epath
->src
->count
< 0)
868 epath
->src
->count
= 0;
869 epath
->src
->frequency
-= path_in_freq
;
870 if (epath
->src
->frequency
< 0)
871 epath
->src
->frequency
= 0;
873 /* Next update this path edge's original and duplicated counts. We know
874 that the duplicated path will have path_out_count flowing
875 out of it (in the joiner case this is the count along the duplicated path
876 out of the duplicated joiner). This count can then be removed from the
877 original path edge. */
879 edup
->count
= path_out_count
;
880 epath
->count
-= path_out_count
;
881 gcc_assert (epath
->count
>= 0);
885 /* The duplicate and original joiner blocks may end up with different
886 probabilities (different from both the original and from each other).
887 Recompute the probabilities here once we have updated the edge
888 counts and frequencies. */
891 recompute_probabilities (basic_block bb
)
895 FOR_EACH_EDGE (esucc
, ei
, bb
->succs
)
900 /* Prevent overflow computation due to insane profiles. */
901 if (esucc
->count
< bb
->count
)
902 esucc
->probability
= GCOV_COMPUTE_SCALE (esucc
->count
,
905 /* Can happen with missing/guessed probabilities, since we
906 may determine that more is flowing along duplicated
907 path than joiner succ probabilities allowed.
908 Counts and freqs will be insane after jump threading,
909 at least make sure probability is sane or we will
910 get a flow verification error.
911 Not much we can do to make counts/freqs sane without
912 redoing the profile estimation. */
913 esucc
->probability
= REG_BR_PROB_BASE
;
918 /* Update the counts of the original and duplicated edges from a joiner
919 that go off path, given that we have already determined that the
920 duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
921 outgoing count along the path PATH_OUT_COUNT. The original (on-)path
922 edge from joiner is EPATH. */
925 update_joiner_offpath_counts (edge epath
, basic_block dup_bb
,
926 gcov_type path_in_count
,
927 gcov_type path_out_count
)
929 /* Compute the count that currently flows off path from the joiner.
930 In other words, the total count of joiner's out edges other than
931 epath. Compute this by walking the successors instead of
932 subtracting epath's count from the joiner bb count, since there
933 are sometimes slight insanities where the total out edge count is
934 larger than the bb count (possibly due to rounding/truncation
936 gcov_type total_orig_off_path_count
= 0;
939 FOR_EACH_EDGE (enonpath
, ei
, epath
->src
->succs
)
941 if (enonpath
== epath
)
943 total_orig_off_path_count
+= enonpath
->count
;
946 /* For the path that we are duplicating, the amount that will flow
947 off path from the duplicated joiner is the delta between the
948 path's cumulative in count and the portion of that count we
949 estimated above as flowing from the joiner along the duplicated
951 gcov_type total_dup_off_path_count
= path_in_count
- path_out_count
;
953 /* Now do the actual updates of the off-path edges. */
954 FOR_EACH_EDGE (enonpath
, ei
, epath
->src
->succs
)
956 /* Look for edges going off of the threading path. */
957 if (enonpath
== epath
)
960 /* Find the corresponding edge out of the duplicated joiner. */
961 edge enonpathdup
= find_edge (dup_bb
, enonpath
->dest
);
962 gcc_assert (enonpathdup
);
964 /* We can't use the original probability of the joiner's out
965 edges, since the probabilities of the original branch
966 and the duplicated branches may vary after all threading is
967 complete. But apportion the duplicated joiner's off-path
968 total edge count computed earlier (total_dup_off_path_count)
969 among the duplicated off-path edges based on their original
970 ratio to the full off-path count (total_orig_off_path_count).
972 int scale
= GCOV_COMPUTE_SCALE (enonpath
->count
,
973 total_orig_off_path_count
);
974 /* Give the duplicated offpath edge a portion of the duplicated
976 enonpathdup
->count
= apply_scale (scale
,
977 total_dup_off_path_count
);
978 /* Now update the original offpath edge count, handling underflow
979 due to rounding errors. */
980 enonpath
->count
-= enonpathdup
->count
;
981 if (enonpath
->count
< 0)
987 /* Check if the paths through RD all have estimated frequencies but zero
988 profile counts. This is more accurate than checking the entry block
989 for a zero profile count, since profile insanities sometimes creep in. */
992 estimated_freqs_path (struct redirection_data
*rd
)
994 edge e
= rd
->incoming_edges
->e
;
995 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
998 bool non_zero_freq
= false;
999 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
1003 non_zero_freq
|= ein
->src
->frequency
!= 0;
1006 for (unsigned int i
= 1; i
< path
->length (); i
++)
1008 edge epath
= (*path
)[i
]->e
;
1009 if (epath
->src
->count
)
1011 non_zero_freq
|= epath
->src
->frequency
!= 0;
1013 FOR_EACH_EDGE (esucc
, ei
, epath
->src
->succs
)
1017 non_zero_freq
|= esucc
->src
->frequency
!= 0;
1020 return non_zero_freq
;
1024 /* Invoked for routines that have guessed frequencies and no profile
1025 counts to record the block and edge frequencies for paths through RD
1026 in the profile count fields of those blocks and edges. This is because
1027 ssa_fix_duplicate_block_edges incrementally updates the block and
1028 edge counts as edges are redirected, and it is difficult to do that
1029 for edge frequencies which are computed on the fly from the source
1030 block frequency and probability. When a block frequency is updated
1031 its outgoing edge frequencies are affected and become difficult to
1035 freqs_to_counts_path (struct redirection_data
*rd
)
1037 edge e
= rd
->incoming_edges
->e
;
1038 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1041 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
1043 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1044 errors applying the probability when the frequencies are very
1046 ein
->count
= apply_probability (ein
->src
->frequency
* REG_BR_PROB_BASE
,
1050 for (unsigned int i
= 1; i
< path
->length (); i
++)
1052 edge epath
= (*path
)[i
]->e
;
1054 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1055 errors applying the edge probability when the frequencies are very
1057 epath
->src
->count
= epath
->src
->frequency
* REG_BR_PROB_BASE
;
1058 FOR_EACH_EDGE (esucc
, ei
, epath
->src
->succs
)
1059 esucc
->count
= apply_probability (esucc
->src
->count
,
1060 esucc
->probability
);
1065 /* For routines that have guessed frequencies and no profile counts, where we
1066 used freqs_to_counts_path to record block and edge frequencies for paths
1067 through RD, we clear the counts after completing all updates for RD.
1068 The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1069 but the block frequencies and edge probabilities were updated as well,
1070 so we can simply clear the count fields. */
1073 clear_counts_path (struct redirection_data
*rd
)
1075 edge e
= rd
->incoming_edges
->e
;
1076 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1079 FOR_EACH_EDGE (ein
, ei
, e
->dest
->preds
)
1082 /* First clear counts along original path. */
1083 for (unsigned int i
= 1; i
< path
->length (); i
++)
1085 edge epath
= (*path
)[i
]->e
;
1086 FOR_EACH_EDGE (esucc
, ei
, epath
->src
->succs
)
1088 epath
->src
->count
= 0;
1090 /* Also need to clear the counts along duplicated path. */
1091 for (unsigned int i
= 0; i
< 2; i
++)
1093 basic_block dup
= rd
->dup_blocks
[i
];
1096 FOR_EACH_EDGE (esucc
, ei
, dup
->succs
)
1102 /* Wire up the outgoing edges from the duplicate blocks and
1103 update any PHIs as needed. Also update the profile counts
1104 on the original and duplicate blocks and edges. */
1106 ssa_fix_duplicate_block_edges (struct redirection_data
*rd
,
1107 ssa_local_info_t
*local_info
)
1109 bool multi_incomings
= (rd
->incoming_edges
->next
!= NULL
);
1110 edge e
= rd
->incoming_edges
->e
;
1111 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1112 edge elast
= path
->last ()->e
;
1113 gcov_type path_in_count
= 0;
1114 gcov_type path_out_count
= 0;
1115 int path_in_freq
= 0;
1117 /* This routine updates profile counts, frequencies, and probabilities
1118 incrementally. Since it is difficult to do the incremental updates
1119 using frequencies/probabilities alone, for routines without profile
1120 data we first take a snapshot of the existing block and edge frequencies
1121 by copying them into the empty profile count fields. These counts are
1122 then used to do the incremental updates, and cleared at the end of this
1123 routine. If the function is marked as having a profile, we still check
1124 to see if the paths through RD are using estimated frequencies because
1125 the routine had zero profile counts. */
1126 bool do_freqs_to_counts
= (profile_status_for_fn (cfun
) != PROFILE_READ
1127 || estimated_freqs_path (rd
));
1128 if (do_freqs_to_counts
)
1129 freqs_to_counts_path (rd
);
1131 /* First determine how much profile count to move from original
1132 path to the duplicate path. This is tricky in the presence of
1133 a joiner (see comments for compute_path_counts), where some portion
1134 of the path's counts will flow off-path from the joiner. In the
1135 non-joiner case the path_in_count and path_out_count should be the
1137 bool has_joiner
= compute_path_counts (rd
, local_info
,
1138 &path_in_count
, &path_out_count
,
1141 int cur_path_freq
= path_in_freq
;
1142 for (unsigned int count
= 0, i
= 1; i
< path
->length (); i
++)
1144 edge epath
= (*path
)[i
]->e
;
1146 /* If we were threading through an joiner block, then we want
1147 to keep its control statement and redirect an outgoing edge.
1148 Else we want to remove the control statement & edges, then create
1149 a new outgoing edge. In both cases we may need to update PHIs. */
1150 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1155 gcc_assert (has_joiner
);
1157 /* This updates the PHIs at the destination of the duplicate
1158 block. Pass 0 instead of i if we are threading a path which
1159 has multiple incoming edges. */
1160 update_destination_phis (local_info
->bb
, rd
->dup_blocks
[count
],
1161 path
, multi_incomings
? 0 : i
);
1163 /* Find the edge from the duplicate block to the block we're
1164 threading through. That's the edge we want to redirect. */
1165 victim
= find_edge (rd
->dup_blocks
[count
], (*path
)[i
]->e
->dest
);
1167 /* If there are no remaining blocks on the path to duplicate,
1168 then redirect VICTIM to the final destination of the jump
1170 if (!any_remaining_duplicated_blocks (path
, i
))
1172 e2
= redirect_edge_and_branch (victim
, elast
->dest
);
1173 /* If we redirected the edge, then we need to copy PHI arguments
1174 at the target. If the edge already existed (e2 != victim
1175 case), then the PHIs in the target already have the correct
1178 copy_phi_args (e2
->dest
, elast
, e2
,
1179 path
, multi_incomings
? 0 : i
);
1183 /* Redirect VICTIM to the next duplicated block in the path. */
1184 e2
= redirect_edge_and_branch (victim
, rd
->dup_blocks
[count
+ 1]);
1186 /* We need to update the PHIs in the next duplicated block. We
1187 want the new PHI args to have the same value as they had
1188 in the source of the next duplicate block.
1190 Thus, we need to know which edge we traversed into the
1191 source of the duplicate. Furthermore, we may have
1192 traversed many edges to reach the source of the duplicate.
1194 Walk through the path starting at element I until we
1195 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
1196 the edge from the prior element. */
1197 for (unsigned int j
= i
+ 1; j
< path
->length (); j
++)
1199 if ((*path
)[j
]->type
== EDGE_COPY_SRC_BLOCK
)
1201 copy_phi_arg_into_existing_phi ((*path
)[j
- 1]->e
, e2
);
1207 /* Update the counts and frequency of both the original block
1208 and path edge, and the duplicates. The path duplicate's
1209 incoming count and frequency are the totals for all edges
1210 incoming to this jump threading path computed earlier.
1211 And we know that the duplicated path will have path_out_count
1212 flowing out of it (i.e. along the duplicated path out of the
1213 duplicated joiner). */
1214 update_profile (epath
, e2
, path_in_count
, path_out_count
,
1217 /* Next we need to update the counts of the original and duplicated
1218 edges from the joiner that go off path. */
1219 update_joiner_offpath_counts (epath
, e2
->src
, path_in_count
,
1222 /* Finally, we need to set the probabilities on the duplicated
1223 edges out of the duplicated joiner (e2->src). The probabilities
1224 along the original path will all be updated below after we finish
1225 processing the whole path. */
1226 recompute_probabilities (e2
->src
);
1228 /* Record the frequency flowing to the downstream duplicated
1230 cur_path_freq
= EDGE_FREQUENCY (e2
);
1232 else if ((*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
)
1234 remove_ctrl_stmt_and_useless_edges (rd
->dup_blocks
[count
], NULL
);
1235 create_edge_and_update_destination_phis (rd
, rd
->dup_blocks
[count
],
1236 multi_incomings
? 0 : i
);
1238 single_succ_edge (rd
->dup_blocks
[1])->aux
= NULL
;
1240 /* Update the counts and frequency of both the original block
1241 and path edge, and the duplicates. Since we are now after
1242 any joiner that may have existed on the path, the count
1243 flowing along the duplicated threaded path is path_out_count.
1244 If we didn't have a joiner, then cur_path_freq was the sum
1245 of the total frequencies along all incoming edges to the
1246 thread path (path_in_freq). If we had a joiner, it would have
1247 been updated at the end of that handling to the edge frequency
1248 along the duplicated joiner path edge. */
1249 update_profile (epath
, EDGE_SUCC (rd
->dup_blocks
[count
], 0),
1250 path_out_count
, path_out_count
,
1255 /* No copy case. In this case we don't have an equivalent block
1256 on the duplicated thread path to update, but we do need
1257 to remove the portion of the counts/freqs that were moved
1258 to the duplicated path from the counts/freqs flowing through
1259 this block on the original path. Since all the no-copy edges
1260 are after any joiner, the removed count is the same as
1263 If we didn't have a joiner, then cur_path_freq was the sum
1264 of the total frequencies along all incoming edges to the
1265 thread path (path_in_freq). If we had a joiner, it would have
1266 been updated at the end of that handling to the edge frequency
1267 along the duplicated joiner path edge. */
1268 update_profile (epath
, NULL
, path_out_count
, path_out_count
,
1272 /* Increment the index into the duplicated path when we processed
1273 a duplicated block. */
1274 if ((*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
1275 || (*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
)
1281 /* Now walk orig blocks and update their probabilities, since the
1282 counts and freqs should be updated properly by above loop. */
1283 for (unsigned int i
= 1; i
< path
->length (); i
++)
1285 edge epath
= (*path
)[i
]->e
;
1286 recompute_probabilities (epath
->src
);
1289 /* Done with all profile and frequency updates, clear counts if they
1291 if (do_freqs_to_counts
)
1292 clear_counts_path (rd
);
1295 /* Hash table traversal callback routine to create duplicate blocks. */
1298 ssa_create_duplicates (struct redirection_data
**slot
,
1299 ssa_local_info_t
*local_info
)
1301 struct redirection_data
*rd
= *slot
;
1303 /* The second duplicated block in a jump threading path is specific
1304 to the path. So it gets stored in RD rather than in LOCAL_DATA.
1306 Each time we're called, we have to look through the path and see
1307 if a second block needs to be duplicated.
1309 Note the search starts with the third edge on the path. The first
1310 edge is the incoming edge, the second edge always has its source
1311 duplicated. Thus we start our search with the third edge. */
1312 vec
<jump_thread_edge
*> *path
= rd
->path
;
1313 for (unsigned int i
= 2; i
< path
->length (); i
++)
1315 if ((*path
)[i
]->type
== EDGE_COPY_SRC_BLOCK
1316 || (*path
)[i
]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1318 create_block_for_threading ((*path
)[i
]->e
->src
, rd
, 1,
1319 &local_info
->duplicate_blocks
);
1324 /* Create a template block if we have not done so already. Otherwise
1325 use the template to create a new block. */
1326 if (local_info
->template_block
== NULL
)
1328 create_block_for_threading ((*path
)[1]->e
->src
, rd
, 0,
1329 &local_info
->duplicate_blocks
);
1330 local_info
->template_block
= rd
->dup_blocks
[0];
1332 /* We do not create any outgoing edges for the template. We will
1333 take care of that in a later traversal. That way we do not
1334 create edges that are going to just be deleted. */
1338 create_block_for_threading (local_info
->template_block
, rd
, 0,
1339 &local_info
->duplicate_blocks
);
1341 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1343 ssa_fix_duplicate_block_edges (rd
, local_info
);
1346 /* Keep walking the hash table. */
1350 /* We did not create any outgoing edges for the template block during
1351 block creation. This hash table traversal callback creates the
1352 outgoing edge for the template block. */
1355 ssa_fixup_template_block (struct redirection_data
**slot
,
1356 ssa_local_info_t
*local_info
)
1358 struct redirection_data
*rd
= *slot
;
1360 /* If this is the template block halt the traversal after updating
1363 If we were threading through an joiner block, then we want
1364 to keep its control statement and redirect an outgoing edge.
1365 Else we want to remove the control statement & edges, then create
1366 a new outgoing edge. In both cases we may need to update PHIs. */
1367 if (rd
->dup_blocks
[0] && rd
->dup_blocks
[0] == local_info
->template_block
)
1369 ssa_fix_duplicate_block_edges (rd
, local_info
);
1376 /* Hash table traversal callback to redirect each incoming edge
1377 associated with this hash table element to its new destination. */
1380 ssa_redirect_edges (struct redirection_data
**slot
,
1381 ssa_local_info_t
*local_info
)
1383 struct redirection_data
*rd
= *slot
;
1384 struct el
*next
, *el
;
1386 /* Walk over all the incoming edges associated with this hash table
1388 for (el
= rd
->incoming_edges
; el
; el
= next
)
1391 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1393 /* Go ahead and free this element from the list. Doing this now
1394 avoids the need for another list walk when we destroy the hash
1399 thread_stats
.num_threaded_edges
++;
1401 if (rd
->dup_blocks
[0])
1405 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1406 fprintf (dump_file
, " Threaded jump %d --> %d to %d\n",
1407 e
->src
->index
, e
->dest
->index
, rd
->dup_blocks
[0]->index
);
1409 /* If we redirect a loop latch edge cancel its loop. */
1410 if (e
->src
== e
->src
->loop_father
->latch
)
1411 mark_loop_for_removal (e
->src
->loop_father
);
1413 /* Redirect the incoming edge (possibly to the joiner block) to the
1414 appropriate duplicate block. */
1415 e2
= redirect_edge_and_branch (e
, rd
->dup_blocks
[0]);
1416 gcc_assert (e
== e2
);
1417 flush_pending_stmts (e2
);
1420 /* Go ahead and clear E->aux. It's not needed anymore and failure
1421 to clear it will cause all kinds of unpleasant problems later. */
1422 delete_jump_thread_path (path
);
1427 /* Indicate that we actually threaded one or more jumps. */
1428 if (rd
->incoming_edges
)
1429 local_info
->jumps_threaded
= true;
1434 /* Return true if this block has no executable statements other than
1435 a simple ctrl flow instruction. When the number of outgoing edges
1436 is one, this is equivalent to a "forwarder" block. */
1439 redirection_block_p (basic_block bb
)
1441 gimple_stmt_iterator gsi
;
1443 /* Advance to the first executable statement. */
1444 gsi
= gsi_start_bb (bb
);
1445 while (!gsi_end_p (gsi
)
1446 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_LABEL
1447 || is_gimple_debug (gsi_stmt (gsi
))
1448 || gimple_nop_p (gsi_stmt (gsi
))
1449 || gimple_clobber_p (gsi_stmt (gsi
))))
1452 /* Check if this is an empty block. */
1453 if (gsi_end_p (gsi
))
1456 /* Test that we've reached the terminating control statement. */
1457 return gsi_stmt (gsi
)
1458 && (gimple_code (gsi_stmt (gsi
)) == GIMPLE_COND
1459 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_GOTO
1460 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_SWITCH
);
1463 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1464 is reached via one or more specific incoming edges, we know which
1465 outgoing edge from BB will be traversed.
1467 We want to redirect those incoming edges to the target of the
1468 appropriate outgoing edge. Doing so avoids a conditional branch
1469 and may expose new optimization opportunities. Note that we have
1470 to update dominator tree and SSA graph after such changes.
1472 The key to keeping the SSA graph update manageable is to duplicate
1473 the side effects occurring in BB so that those side effects still
1474 occur on the paths which bypass BB after redirecting edges.
1476 We accomplish this by creating duplicates of BB and arranging for
1477 the duplicates to unconditionally pass control to one specific
1478 successor of BB. We then revector the incoming edges into BB to
1479 the appropriate duplicate of BB.
1481 If NOLOOP_ONLY is true, we only perform the threading as long as it
1482 does not affect the structure of the loops in a nontrivial way.
1484 If JOINERS is true, then thread through joiner blocks as well. */
1487 thread_block_1 (basic_block bb
, bool noloop_only
, bool joiners
)
1489 /* E is an incoming edge into BB that we may or may not want to
1490 redirect to a duplicate of BB. */
1493 ssa_local_info_t local_info
;
1495 local_info
.duplicate_blocks
= BITMAP_ALLOC (NULL
);
1497 /* To avoid scanning a linear array for the element we need we instead
1498 use a hash table. For normal code there should be no noticeable
1499 difference. However, if we have a block with a large number of
1500 incoming and outgoing edges such linear searches can get expensive. */
1502 = new hash_table
<struct redirection_data
> (EDGE_COUNT (bb
->succs
));
1504 /* Record each unique threaded destination into a hash table for
1505 efficient lookups. */
1506 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1511 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1513 if (((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
&& !joiners
)
1514 || ((*path
)[1]->type
== EDGE_COPY_SRC_BLOCK
&& joiners
))
1517 e2
= path
->last ()->e
;
1518 if (!e2
|| noloop_only
)
1520 /* If NOLOOP_ONLY is true, we only allow threading through the
1521 header of a loop to exit edges. */
1523 /* One case occurs when there was loop header buried in a jump
1524 threading path that crosses loop boundaries. We do not try
1525 and thread this elsewhere, so just cancel the jump threading
1526 request by clearing the AUX field now. */
1527 if ((bb
->loop_father
!= e2
->src
->loop_father
1528 && !loop_exit_edge_p (e2
->src
->loop_father
, e2
))
1529 || (e2
->src
->loop_father
!= e2
->dest
->loop_father
1530 && !loop_exit_edge_p (e2
->src
->loop_father
, e2
)))
1532 /* Since this case is not handled by our special code
1533 to thread through a loop header, we must explicitly
1534 cancel the threading request here. */
1535 delete_jump_thread_path (path
);
1540 /* Another case occurs when trying to thread through our
1541 own loop header, possibly from inside the loop. We will
1542 thread these later. */
1544 for (i
= 1; i
< path
->length (); i
++)
1546 if ((*path
)[i
]->e
->src
== bb
->loop_father
->header
1547 && (!loop_exit_edge_p (bb
->loop_father
, e2
)
1548 || (*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
))
1552 if (i
!= path
->length ())
1556 /* Insert the outgoing edge into the hash table if it is not
1557 already in the hash table. */
1558 lookup_redirection_data (e
, INSERT
);
1561 /* We do not update dominance info. */
1562 free_dominance_info (CDI_DOMINATORS
);
1564 /* We know we only thread through the loop header to loop exits.
1565 Let the basic block duplication hook know we are not creating
1566 a multiple entry loop. */
1568 && bb
== bb
->loop_father
->header
)
1569 set_loop_copy (bb
->loop_father
, loop_outer (bb
->loop_father
));
1571 /* Now create duplicates of BB.
1573 Note that for a block with a high outgoing degree we can waste
1574 a lot of time and memory creating and destroying useless edges.
1576 So we first duplicate BB and remove the control structure at the
1577 tail of the duplicate as well as all outgoing edges from the
1578 duplicate. We then use that duplicate block as a template for
1579 the rest of the duplicates. */
1580 local_info
.template_block
= NULL
;
1582 local_info
.jumps_threaded
= false;
1583 redirection_data
->traverse
<ssa_local_info_t
*, ssa_create_duplicates
>
1586 /* The template does not have an outgoing edge. Create that outgoing
1587 edge and update PHI nodes as the edge's target as necessary.
1589 We do this after creating all the duplicates to avoid creating
1590 unnecessary edges. */
1591 redirection_data
->traverse
<ssa_local_info_t
*, ssa_fixup_template_block
>
1594 /* The hash table traversals above created the duplicate blocks (and the
1595 statements within the duplicate blocks). This loop creates PHI nodes for
1596 the duplicated blocks and redirects the incoming edges into BB to reach
1597 the duplicates of BB. */
1598 redirection_data
->traverse
<ssa_local_info_t
*, ssa_redirect_edges
>
1601 /* Done with this block. Clear REDIRECTION_DATA. */
1602 delete redirection_data
;
1603 redirection_data
= NULL
;
1606 && bb
== bb
->loop_father
->header
)
1607 set_loop_copy (bb
->loop_father
, NULL
);
1609 BITMAP_FREE (local_info
.duplicate_blocks
);
1610 local_info
.duplicate_blocks
= NULL
;
1612 /* Indicate to our caller whether or not any jumps were threaded. */
1613 return local_info
.jumps_threaded
;
1616 /* Wrapper for thread_block_1 so that we can first handle jump
1617 thread paths which do not involve copying joiner blocks, then
1618 handle jump thread paths which have joiner blocks.
1620 By doing things this way we can be as aggressive as possible and
1621 not worry that copying a joiner block will create a jump threading
1625 thread_block (basic_block bb
, bool noloop_only
)
1628 retval
= thread_block_1 (bb
, noloop_only
, false);
1629 retval
|= thread_block_1 (bb
, noloop_only
, true);
1634 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
1635 copy of E->dest created during threading, or E->dest if it was not necessary
1636 to copy it (E is its single predecessor). */
1639 thread_single_edge (edge e
)
1641 basic_block bb
= e
->dest
;
1642 struct redirection_data rd
;
1643 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1644 edge eto
= (*path
)[1]->e
;
1646 delete_jump_thread_path (path
);
1649 thread_stats
.num_threaded_edges
++;
1651 if (single_pred_p (bb
))
1653 /* If BB has just a single predecessor, we should only remove the
1654 control statements at its end, and successors except for ETO. */
1655 remove_ctrl_stmt_and_useless_edges (bb
, eto
->dest
);
1657 /* And fixup the flags on the single remaining edge. */
1658 eto
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
| EDGE_ABNORMAL
);
1659 eto
->flags
|= EDGE_FALLTHRU
;
1664 /* Otherwise, we need to create a copy. */
1665 if (e
->dest
== eto
->src
)
1666 update_bb_profile_for_threading (bb
, EDGE_FREQUENCY (e
), e
->count
, eto
);
1668 vec
<jump_thread_edge
*> *npath
= new vec
<jump_thread_edge
*> ();
1669 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_START_JUMP_THREAD
);
1670 npath
->safe_push (x
);
1672 x
= new jump_thread_edge (eto
, EDGE_COPY_SRC_BLOCK
);
1673 npath
->safe_push (x
);
1676 create_block_for_threading (bb
, &rd
, 0, NULL
);
1677 remove_ctrl_stmt_and_useless_edges (rd
.dup_blocks
[0], NULL
);
1678 create_edge_and_update_destination_phis (&rd
, rd
.dup_blocks
[0], 0);
1680 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1681 fprintf (dump_file
, " Threaded jump %d --> %d to %d\n",
1682 e
->src
->index
, e
->dest
->index
, rd
.dup_blocks
[0]->index
);
1684 rd
.dup_blocks
[0]->count
= e
->count
;
1685 rd
.dup_blocks
[0]->frequency
= EDGE_FREQUENCY (e
);
1686 single_succ_edge (rd
.dup_blocks
[0])->count
= e
->count
;
1687 redirect_edge_and_branch (e
, rd
.dup_blocks
[0]);
1688 flush_pending_stmts (e
);
1690 delete_jump_thread_path (npath
);
1691 return rd
.dup_blocks
[0];
1694 /* Callback for dfs_enumerate_from. Returns true if BB is different
1695 from STOP and DBDS_CE_STOP. */
1697 static basic_block dbds_ce_stop
;
1699 dbds_continue_enumeration_p (const_basic_block bb
, const void *stop
)
1701 return (bb
!= (const_basic_block
) stop
1702 && bb
!= dbds_ce_stop
);
1705 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1706 returns the state. */
1710 /* BB does not dominate latch of the LOOP. */
1711 DOMST_NONDOMINATING
,
1712 /* The LOOP is broken (there is no path from the header to its latch. */
1714 /* BB dominates the latch of the LOOP. */
1718 static enum bb_dom_status
1719 determine_bb_domination_status (struct loop
*loop
, basic_block bb
)
1721 basic_block
*bblocks
;
1722 unsigned nblocks
, i
;
1723 bool bb_reachable
= false;
1727 /* This function assumes BB is a successor of LOOP->header.
1728 If that is not the case return DOMST_NONDOMINATING which
1733 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1735 if (e
->src
== loop
->header
)
1743 return DOMST_NONDOMINATING
;
1746 if (bb
== loop
->latch
)
1747 return DOMST_DOMINATING
;
1749 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1752 bblocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1753 dbds_ce_stop
= loop
->header
;
1754 nblocks
= dfs_enumerate_from (loop
->latch
, 1, dbds_continue_enumeration_p
,
1755 bblocks
, loop
->num_nodes
, bb
);
1756 for (i
= 0; i
< nblocks
; i
++)
1757 FOR_EACH_EDGE (e
, ei
, bblocks
[i
]->preds
)
1759 if (e
->src
== loop
->header
)
1762 return DOMST_NONDOMINATING
;
1765 bb_reachable
= true;
1769 return (bb_reachable
? DOMST_DOMINATING
: DOMST_LOOP_BROKEN
);
1772 /* Return true if BB is part of the new pre-header that is created
1773 when threading the latch to DATA. */
1776 def_split_header_continue_p (const_basic_block bb
, const void *data
)
1778 const_basic_block new_header
= (const_basic_block
) data
;
1779 const struct loop
*l
;
1781 if (bb
== new_header
1782 || loop_depth (bb
->loop_father
) < loop_depth (new_header
->loop_father
))
1784 for (l
= bb
->loop_father
; l
; l
= loop_outer (l
))
1785 if (l
== new_header
->loop_father
)
1790 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1791 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1792 to the inside of the loop. */
1795 thread_through_loop_header (struct loop
*loop
, bool may_peel_loop_headers
)
1797 basic_block header
= loop
->header
;
1798 edge e
, tgt_edge
, latch
= loop_latch_edge (loop
);
1800 basic_block tgt_bb
, atgt_bb
;
1801 enum bb_dom_status domst
;
1803 /* We have already threaded through headers to exits, so all the threading
1804 requests now are to the inside of the loop. We need to avoid creating
1805 irreducible regions (i.e., loops with more than one entry block), and
1806 also loop with several latch edges, or new subloops of the loop (although
1807 there are cases where it might be appropriate, it is difficult to decide,
1808 and doing it wrongly may confuse other optimizers).
1810 We could handle more general cases here. However, the intention is to
1811 preserve some information about the loop, which is impossible if its
1812 structure changes significantly, in a way that is not well understood.
1813 Thus we only handle few important special cases, in which also updating
1814 of the loop-carried information should be feasible:
1816 1) Propagation of latch edge to a block that dominates the latch block
1817 of a loop. This aims to handle the following idiom:
1828 After threading the latch edge, this becomes
1839 The original header of the loop is moved out of it, and we may thread
1840 the remaining edges through it without further constraints.
1842 2) All entry edges are propagated to a single basic block that dominates
1843 the latch block of the loop. This aims to handle the following idiom
1844 (normally created for "for" loops):
1867 /* Threading through the header won't improve the code if the header has just
1869 if (single_succ_p (header
))
1872 /* If we threaded the latch using a joiner block, we cancel the
1873 threading opportunity out of an abundance of caution. However,
1874 still allow threading from outside to inside the loop. */
1877 vec
<jump_thread_edge
*> *path
= THREAD_PATH (latch
);
1878 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1880 delete_jump_thread_path (path
);
1887 vec
<jump_thread_edge
*> *path
= THREAD_PATH (latch
);
1888 tgt_edge
= (*path
)[1]->e
;
1889 tgt_bb
= tgt_edge
->dest
;
1891 else if (!may_peel_loop_headers
1892 && !redirection_block_p (loop
->header
))
1898 FOR_EACH_EDGE (e
, ei
, header
->preds
)
1905 /* If latch is not threaded, and there is a header
1906 edge that is not threaded, we would create loop
1907 with multiple entries. */
1911 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
1913 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
1915 tgt_edge
= (*path
)[1]->e
;
1916 atgt_bb
= tgt_edge
->dest
;
1919 /* Two targets of threading would make us create loop
1920 with multiple entries. */
1921 else if (tgt_bb
!= atgt_bb
)
1927 /* There are no threading requests. */
1931 /* Redirecting to empty loop latch is useless. */
1932 if (tgt_bb
== loop
->latch
1933 && empty_block_p (loop
->latch
))
1937 /* The target block must dominate the loop latch, otherwise we would be
1938 creating a subloop. */
1939 domst
= determine_bb_domination_status (loop
, tgt_bb
);
1940 if (domst
== DOMST_NONDOMINATING
)
1942 if (domst
== DOMST_LOOP_BROKEN
)
1944 /* If the loop ceased to exist, mark it as such, and thread through its
1946 mark_loop_for_removal (loop
);
1947 return thread_block (header
, false);
1950 if (tgt_bb
->loop_father
->header
== tgt_bb
)
1952 /* If the target of the threading is a header of a subloop, we need
1953 to create a preheader for it, so that the headers of the two loops
1955 if (EDGE_COUNT (tgt_bb
->preds
) > 2)
1957 tgt_bb
= create_preheader (tgt_bb
->loop_father
, 0);
1958 gcc_assert (tgt_bb
!= NULL
);
1961 tgt_bb
= split_edge (tgt_edge
);
1966 basic_block
*bblocks
;
1967 unsigned nblocks
, i
;
1969 /* First handle the case latch edge is redirected. We are copying
1970 the loop header but not creating a multiple entry loop. Make the
1971 cfg manipulation code aware of that fact. */
1972 set_loop_copy (loop
, loop
);
1973 loop
->latch
= thread_single_edge (latch
);
1974 set_loop_copy (loop
, NULL
);
1975 gcc_assert (single_succ (loop
->latch
) == tgt_bb
);
1976 loop
->header
= tgt_bb
;
1978 /* Remove the new pre-header blocks from our loop. */
1979 bblocks
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1980 nblocks
= dfs_enumerate_from (header
, 0, def_split_header_continue_p
,
1981 bblocks
, loop
->num_nodes
, tgt_bb
);
1982 for (i
= 0; i
< nblocks
; i
++)
1983 if (bblocks
[i
]->loop_father
== loop
)
1985 remove_bb_from_loops (bblocks
[i
]);
1986 add_bb_to_loop (bblocks
[i
], loop_outer (loop
));
1990 /* If the new header has multiple latches mark it so. */
1991 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
1992 if (e
->src
->loop_father
== loop
1993 && e
->src
!= loop
->latch
)
1996 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
);
1999 /* Cancel remaining threading requests that would make the
2000 loop a multiple entry loop. */
2001 FOR_EACH_EDGE (e
, ei
, header
->preds
)
2008 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2009 e2
= path
->last ()->e
;
2011 if (e
->src
->loop_father
!= e2
->dest
->loop_father
2012 && e2
->dest
!= loop
->header
)
2014 delete_jump_thread_path (path
);
2019 /* Thread the remaining edges through the former header. */
2020 thread_block (header
, false);
2024 basic_block new_preheader
;
2026 /* Now consider the case entry edges are redirected to the new entry
2027 block. Remember one entry edge, so that we can find the new
2028 preheader (its destination after threading). */
2029 FOR_EACH_EDGE (e
, ei
, header
->preds
)
2035 /* The duplicate of the header is the new preheader of the loop. Ensure
2036 that it is placed correctly in the loop hierarchy. */
2037 set_loop_copy (loop
, loop_outer (loop
));
2039 thread_block (header
, false);
2040 set_loop_copy (loop
, NULL
);
2041 new_preheader
= e
->dest
;
2043 /* Create the new latch block. This is always necessary, as the latch
2044 must have only a single successor, but the original header had at
2045 least two successors. */
2047 mfb_kj_edge
= single_succ_edge (new_preheader
);
2048 loop
->header
= mfb_kj_edge
->dest
;
2049 latch
= make_forwarder_block (tgt_bb
, mfb_keep_just
, NULL
);
2050 loop
->header
= latch
->dest
;
2051 loop
->latch
= latch
->src
;
2057 /* We failed to thread anything. Cancel the requests. */
2058 FOR_EACH_EDGE (e
, ei
, header
->preds
)
2060 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2064 delete_jump_thread_path (path
);
2071 /* E1 and E2 are edges into the same basic block. Return TRUE if the
2072 PHI arguments associated with those edges are equal or there are no
2073 PHI arguments, otherwise return FALSE. */
2076 phi_args_equal_on_edges (edge e1
, edge e2
)
2079 int indx1
= e1
->dest_idx
;
2080 int indx2
= e2
->dest_idx
;
2082 for (gsi
= gsi_start_phis (e1
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2084 gphi
*phi
= gsi
.phi ();
2086 if (!operand_equal_p (gimple_phi_arg_def (phi
, indx1
),
2087 gimple_phi_arg_def (phi
, indx2
), 0))
2093 /* Walk through the registered jump threads and convert them into a
2094 form convenient for this pass.
2096 Any block which has incoming edges threaded to outgoing edges
2097 will have its entry in THREADED_BLOCK set.
2099 Any threaded edge will have its new outgoing edge stored in the
2100 original edge's AUX field.
2102 This form avoids the need to walk all the edges in the CFG to
2103 discover blocks which need processing and avoids unnecessary
2104 hash table lookups to map from threaded edge to new target. */
2107 mark_threaded_blocks (bitmap threaded_blocks
)
2111 bitmap tmp
= BITMAP_ALLOC (NULL
);
2116 /* It is possible to have jump threads in which one is a subpath
2117 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
2118 block and (B, C), (C, D) where no joiner block exists.
2120 When this occurs ignore the jump thread request with the joiner
2121 block. It's totally subsumed by the simpler jump thread request.
2123 This results in less block copying, simpler CFGs. More importantly,
2124 when we duplicate the joiner block, B, in this case we will create
2125 a new threading opportunity that we wouldn't be able to optimize
2126 until the next jump threading iteration.
2128 So first convert the jump thread requests which do not require a
2130 for (i
= 0; i
< paths
.length (); i
++)
2132 vec
<jump_thread_edge
*> *path
= paths
[i
];
2134 if ((*path
)[1]->type
!= EDGE_COPY_SRC_JOINER_BLOCK
)
2136 edge e
= (*path
)[0]->e
;
2137 e
->aux
= (void *)path
;
2138 bitmap_set_bit (tmp
, e
->dest
->index
);
2142 /* Now iterate again, converting cases where we want to thread
2143 through a joiner block, but only if no other edge on the path
2144 already has a jump thread attached to it. We do this in two passes,
2145 to avoid situations where the order in the paths vec can hide overlapping
2146 threads (the path is recorded on the incoming edge, so we would miss
2147 cases where the second path starts at a downstream edge on the same
2148 path). First record all joiner paths, deleting any in the unexpected
2149 case where there is already a path for that incoming edge. */
2150 for (i
= 0; i
< paths
.length ();)
2152 vec
<jump_thread_edge
*> *path
= paths
[i
];
2154 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
)
2156 /* Attach the path to the starting edge if none is yet recorded. */
2157 if ((*path
)[0]->e
->aux
== NULL
)
2159 (*path
)[0]->e
->aux
= path
;
2164 paths
.unordered_remove (i
);
2165 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2166 dump_jump_thread_path (dump_file
, *path
, false);
2167 delete_jump_thread_path (path
);
2176 /* Second, look for paths that have any other jump thread attached to
2177 them, and either finish converting them or cancel them. */
2178 for (i
= 0; i
< paths
.length ();)
2180 vec
<jump_thread_edge
*> *path
= paths
[i
];
2181 edge e
= (*path
)[0]->e
;
2183 if ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
&& e
->aux
== path
)
2186 for (j
= 1; j
< path
->length (); j
++)
2187 if ((*path
)[j
]->e
->aux
!= NULL
)
2190 /* If we iterated through the entire path without exiting the loop,
2191 then we are good to go, record it. */
2192 if (j
== path
->length ())
2194 bitmap_set_bit (tmp
, e
->dest
->index
);
2200 paths
.unordered_remove (i
);
2201 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2202 dump_jump_thread_path (dump_file
, *path
, false);
2203 delete_jump_thread_path (path
);
2212 /* If optimizing for size, only thread through block if we don't have
2213 to duplicate it or it's an otherwise empty redirection block. */
2214 if (optimize_function_for_size_p (cfun
))
2216 EXECUTE_IF_SET_IN_BITMAP (tmp
, 0, i
, bi
)
2218 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2219 if (EDGE_COUNT (bb
->preds
) > 1
2220 && !redirection_block_p (bb
))
2222 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2226 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2227 delete_jump_thread_path (path
);
2233 bitmap_set_bit (threaded_blocks
, i
);
2237 bitmap_copy (threaded_blocks
, tmp
);
2239 /* Look for jump threading paths which cross multiple loop headers.
2241 The code to thread through loop headers will change the CFG in ways
2242 that break assumptions made by the loop optimization code.
2244 We don't want to blindly cancel the requests. We can instead do better
2245 by trimming off the end of the jump thread path. */
2246 EXECUTE_IF_SET_IN_BITMAP (tmp
, 0, i
, bi
)
2248 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2249 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2253 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2255 for (unsigned int i
= 0, crossed_headers
= 0;
2256 i
< path
->length ();
2259 basic_block dest
= (*path
)[i
]->e
->dest
;
2260 crossed_headers
+= (dest
== dest
->loop_father
->header
);
2261 if (crossed_headers
> 1)
2263 /* Trim from entry I onwards. */
2264 for (unsigned int j
= i
; j
< path
->length (); j
++)
2268 /* Now that we've truncated the path, make sure
2269 what's left is still valid. We need at least
2270 two edges on the path and the last edge can not
2271 be a joiner. This should never happen, but let's
2273 if (path
->length () < 2
2274 || (path
->last ()->type
2275 == EDGE_COPY_SRC_JOINER_BLOCK
))
2277 delete_jump_thread_path (path
);
2287 /* If we have a joiner block (J) which has two successors S1 and S2 and
2288 we are threading though S1 and the final destination of the thread
2289 is S2, then we must verify that any PHI nodes in S2 have the same
2290 PHI arguments for the edge J->S2 and J->S1->...->S2.
2292 We used to detect this prior to registering the jump thread, but
2293 that prohibits propagation of edge equivalences into non-dominated
2294 PHI nodes as the equivalency test might occur before propagation.
2296 This must also occur after we truncate any jump threading paths
2297 as this scenario may only show up after truncation.
2299 This works for now, but will need improvement as part of the FSA
2302 Note since we've moved the thread request data to the edges,
2303 we have to iterate on those rather than the threaded_edges vector. */
2304 EXECUTE_IF_SET_IN_BITMAP (tmp
, 0, i
, bi
)
2306 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2307 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2311 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2312 bool have_joiner
= ((*path
)[1]->type
== EDGE_COPY_SRC_JOINER_BLOCK
);
2316 basic_block joiner
= e
->dest
;
2317 edge final_edge
= path
->last ()->e
;
2318 basic_block final_dest
= final_edge
->dest
;
2319 edge e2
= find_edge (joiner
, final_dest
);
2321 if (e2
&& !phi_args_equal_on_edges (e2
, final_edge
))
2323 delete_jump_thread_path (path
);
2335 /* Return TRUE if BB ends with a switch statement or a computed goto.
2336 Otherwise return false. */
2338 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED
)
2340 gimple
*stmt
= last_stmt (bb
);
2341 if (stmt
&& gimple_code (stmt
) == GIMPLE_SWITCH
)
2343 if (stmt
&& gimple_code (stmt
) == GIMPLE_GOTO
2344 && TREE_CODE (gimple_goto_dest (stmt
)) == SSA_NAME
)
2349 /* Verify that the REGION is a valid jump thread. A jump thread is a special
2350 case of SEME Single Entry Multiple Exits region in which all nodes in the
2351 REGION have exactly one incoming edge. The only exception is the first block
2352 that may not have been connected to the rest of the cfg yet. */
2355 verify_jump_thread (basic_block
*region
, unsigned n_region
)
2357 for (unsigned i
= 0; i
< n_region
; i
++)
2358 gcc_assert (EDGE_COUNT (region
[i
]->preds
) <= 1);
2361 /* Return true when BB is one of the first N items in BBS. */
2364 bb_in_bbs (basic_block bb
, basic_block
*bbs
, int n
)
2366 for (int i
= 0; i
< n
; i
++)
2373 /* Duplicates a jump-thread path of N_REGION basic blocks.
2374 The ENTRY edge is redirected to the duplicate of the region.
2376 Remove the last conditional statement in the last basic block in the REGION,
2377 and create a single fallthru edge pointing to the same destination as the
2380 The new basic blocks are stored to REGION_COPY in the same order as they had
2381 in REGION, provided that REGION_COPY is not NULL.
2383 Returns false if it is unable to copy the region, true otherwise. */
2386 duplicate_thread_path (edge entry
, edge exit
,
2387 basic_block
*region
, unsigned n_region
,
2388 basic_block
*region_copy
)
2391 bool free_region_copy
= false;
2392 struct loop
*loop
= entry
->dest
->loop_father
;
2395 int total_freq
= 0, entry_freq
= 0;
2396 gcov_type total_count
= 0, entry_count
= 0;
2398 if (!can_copy_bbs_p (region
, n_region
))
2401 /* Some sanity checking. Note that we do not check for all possible
2402 missuses of the functions. I.e. if you ask to copy something weird,
2403 it will work, but the state of structures probably will not be
2405 for (i
= 0; i
< n_region
; i
++)
2407 /* We do not handle subloops, i.e. all the blocks must belong to the
2409 if (region
[i
]->loop_father
!= loop
)
2413 initialize_original_copy_tables ();
2415 set_loop_copy (loop
, loop
);
2419 region_copy
= XNEWVEC (basic_block
, n_region
);
2420 free_region_copy
= true;
2423 if (entry
->dest
->count
)
2425 total_count
= entry
->dest
->count
;
2426 entry_count
= entry
->count
;
2427 /* Fix up corner cases, to avoid division by zero or creation of negative
2429 if (entry_count
> total_count
)
2430 entry_count
= total_count
;
2434 total_freq
= entry
->dest
->frequency
;
2435 entry_freq
= EDGE_FREQUENCY (entry
);
2436 /* Fix up corner cases, to avoid division by zero or creation of negative
2438 if (total_freq
== 0)
2440 else if (entry_freq
> total_freq
)
2441 entry_freq
= total_freq
;
2444 copy_bbs (region
, n_region
, region_copy
, &exit
, 1, &exit_copy
, loop
,
2445 split_edge_bb_loc (entry
), false);
2447 /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
2448 following code ensures that all the edges exiting the jump-thread path are
2449 redirected back to the original code: these edges are exceptions
2450 invalidating the property that is propagated by executing all the blocks of
2451 the jump-thread path in order. */
2453 for (i
= 0; i
< n_region
; i
++)
2457 basic_block bb
= region_copy
[i
];
2459 if (single_succ_p (bb
))
2461 /* Make sure the successor is the next node in the path. */
2462 gcc_assert (i
+ 1 == n_region
2463 || region_copy
[i
+ 1] == single_succ_edge (bb
)->dest
);
2467 /* Special case the last block on the path: make sure that it does not
2468 jump back on the copied path. */
2469 if (i
+ 1 == n_region
)
2471 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2472 if (bb_in_bbs (e
->dest
, region_copy
, n_region
- 1))
2474 basic_block orig
= get_bb_original (e
->dest
);
2476 redirect_edge_and_branch_force (e
, orig
);
2481 /* Redirect all other edges jumping to non-adjacent blocks back to the
2483 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2484 if (region_copy
[i
+ 1] != e
->dest
)
2486 basic_block orig
= get_bb_original (e
->dest
);
2488 redirect_edge_and_branch_force (e
, orig
);
2494 scale_bbs_frequencies_gcov_type (region
, n_region
,
2495 total_count
- entry_count
,
2497 scale_bbs_frequencies_gcov_type (region_copy
, n_region
, entry_count
,
2502 scale_bbs_frequencies_int (region
, n_region
, total_freq
- entry_freq
,
2504 scale_bbs_frequencies_int (region_copy
, n_region
, entry_freq
, total_freq
);
2508 verify_jump_thread (region_copy
, n_region
);
2510 /* Remove the last branch in the jump thread path. */
2511 remove_ctrl_stmt_and_useless_edges (region_copy
[n_region
- 1], exit
->dest
);
2513 /* And fixup the flags on the single remaining edge. */
2514 edge fix_e
= find_edge (region_copy
[n_region
- 1], exit
->dest
);
2515 fix_e
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
| EDGE_ABNORMAL
);
2516 fix_e
->flags
|= EDGE_FALLTHRU
;
2518 edge e
= make_edge (region_copy
[n_region
- 1], exit
->dest
, EDGE_FALLTHRU
);
2521 rescan_loop_exit (e
, true, false);
2522 e
->probability
= REG_BR_PROB_BASE
;
2523 e
->count
= region_copy
[n_region
- 1]->count
;
2526 /* Redirect the entry and add the phi node arguments. */
2527 if (entry
->dest
== loop
->header
)
2528 mark_loop_for_removal (loop
);
2529 redirected
= redirect_edge_and_branch (entry
, get_bb_copy (entry
->dest
));
2530 gcc_assert (redirected
!= NULL
);
2531 flush_pending_stmts (entry
);
2533 /* Add the other PHI node arguments. */
2534 add_phi_args_after_copy (region_copy
, n_region
, NULL
);
2536 if (free_region_copy
)
2539 free_original_copy_tables ();
2543 /* Return true when PATH is a valid jump-thread path. */
2546 valid_jump_thread_path (vec
<jump_thread_edge
*> *path
)
2548 unsigned len
= path
->length ();
2549 bool multiway_branch
= false;
2551 /* Check that the path is connected and see if there's a multi-way
2552 branch on the path. */
2553 for (unsigned int j
= 0; j
< len
- 1; j
++)
2555 if ((*path
)[j
]->e
->dest
!= (*path
)[j
+1]->e
->src
)
2557 gimple
*last
= last_stmt ((*path
)[j
]->e
->dest
);
2558 multiway_branch
|= (last
&& gimple_code (last
) == GIMPLE_SWITCH
);
2561 /* If we are trying to thread the loop latch to a block that does
2562 not dominate the loop latch, then that will create an irreducible
2563 loop. We avoid that unless the jump thread has a multi-way
2564 branch, in which case we have deemed it worth losing other
2565 loop optimizations later if we can eliminate the multi-way branch. */
2566 edge e
= (*path
)[0]->e
;
2567 struct loop
*loop
= e
->dest
->loop_father
;
2568 if (!multiway_branch
2570 && loop_latch_edge (loop
) == e
2571 && (determine_bb_domination_status (loop
, path
->last ()->e
->dest
)
2572 == DOMST_NONDOMINATING
))
2578 /* Remove any queued jump threads that include edge E.
2580 We don't actually remove them here, just record the edges into ax
2581 hash table. That way we can do the search once per iteration of
2582 DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
2585 remove_jump_threads_including (edge_def
*e
)
2587 if (!paths
.exists ())
2591 removed_edges
= new hash_table
<struct removed_edges
> (17);
2593 edge
*slot
= removed_edges
->find_slot (e
, INSERT
);
2597 /* Walk through all blocks and thread incoming edges to the appropriate
2598 outgoing edge for each edge pair recorded in THREADED_EDGES.
2600 It is the caller's responsibility to fix the dominance information
2601 and rewrite duplicated SSA_NAMEs back into SSA form.
2603 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2604 loop headers if it does not simplify the loop.
2606 Returns true if one or more edges were threaded, false otherwise. */
2609 thread_through_all_blocks (bool may_peel_loop_headers
)
2611 bool retval
= false;
2614 bitmap threaded_blocks
;
2617 if (!paths
.exists ())
2623 threaded_blocks
= BITMAP_ALLOC (NULL
);
2624 memset (&thread_stats
, 0, sizeof (thread_stats
));
2626 /* Remove any paths that referenced removed edges. */
2628 for (i
= 0; i
< paths
.length (); )
2631 vec
<jump_thread_edge
*> *path
= paths
[i
];
2633 for (j
= 0; j
< path
->length (); j
++)
2635 edge e
= (*path
)[j
]->e
;
2636 if (removed_edges
->find_slot (e
, NO_INSERT
))
2640 if (j
!= path
->length ())
2642 delete_jump_thread_path (path
);
2643 paths
.unordered_remove (i
);
2649 /* Jump-thread all FSM threads before other jump-threads. */
2650 for (i
= 0; i
< paths
.length ();)
2652 vec
<jump_thread_edge
*> *path
= paths
[i
];
2653 edge entry
= (*path
)[0]->e
;
2655 /* Only code-generate FSM jump-threads in this loop. */
2656 if ((*path
)[0]->type
!= EDGE_FSM_THREAD
)
2662 /* Do not jump-thread twice from the same block. */
2663 if (bitmap_bit_p (threaded_blocks
, entry
->src
->index
)
2664 /* Verify that the jump thread path is still valid: a
2665 previous jump-thread may have changed the CFG, and
2666 invalidated the current path or the requested jump
2667 thread might create irreducible loops which should
2668 generally be avoided. */
2669 || !valid_jump_thread_path (path
))
2671 /* Remove invalid FSM jump-thread paths. */
2672 delete_jump_thread_path (path
);
2673 paths
.unordered_remove (i
);
2677 unsigned len
= path
->length ();
2678 edge exit
= (*path
)[len
- 1]->e
;
2679 basic_block
*region
= XNEWVEC (basic_block
, len
- 1);
2681 for (unsigned int j
= 0; j
< len
- 1; j
++)
2682 region
[j
] = (*path
)[j
]->e
->dest
;
2684 if (duplicate_thread_path (entry
, exit
, region
, len
- 1, NULL
))
2686 /* We do not update dominance info. */
2687 free_dominance_info (CDI_DOMINATORS
);
2688 bitmap_set_bit (threaded_blocks
, entry
->src
->index
);
2690 thread_stats
.num_threaded_edges
++;
2693 delete_jump_thread_path (path
);
2694 paths
.unordered_remove (i
);
2697 /* Remove from PATHS all the jump-threads starting with an edge already
2699 for (i
= 0; i
< paths
.length ();)
2701 vec
<jump_thread_edge
*> *path
= paths
[i
];
2702 edge entry
= (*path
)[0]->e
;
2704 /* Do not jump-thread twice from the same block. */
2705 if (bitmap_bit_p (threaded_blocks
, entry
->src
->index
))
2707 delete_jump_thread_path (path
);
2708 paths
.unordered_remove (i
);
2714 bitmap_clear (threaded_blocks
);
2716 mark_threaded_blocks (threaded_blocks
);
2718 initialize_original_copy_tables ();
2720 /* First perform the threading requests that do not affect
2722 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks
, 0, i
, bi
)
2724 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
2726 if (EDGE_COUNT (bb
->preds
) > 0)
2727 retval
|= thread_block (bb
, true);
2730 /* Then perform the threading through loop headers. We start with the
2731 innermost loop, so that the changes in cfg we perform won't affect
2732 further threading. */
2733 FOR_EACH_LOOP (loop
, LI_FROM_INNERMOST
)
2736 || !bitmap_bit_p (threaded_blocks
, loop
->header
->index
))
2739 retval
|= thread_through_loop_header (loop
, may_peel_loop_headers
);
2742 /* Any jump threading paths that are still attached to edges at this
2743 point must be one of two cases.
2745 First, we could have a jump threading path which went from outside
2746 a loop to inside a loop that was ignored because a prior jump thread
2747 across a backedge was realized (which indirectly causes the loop
2748 above to ignore the latter thread). We can detect these because the
2749 loop structures will be different and we do not currently try to
2752 Second, we could be threading across a backedge to a point within the
2753 same loop. This occurrs for the FSA/FSM optimization and we would
2754 like to optimize it. However, we have to be very careful as this
2755 may completely scramble the loop structures, with the result being
2756 irreducible loops causing us to throw away our loop structure.
2758 As a compromise for the latter case, if the thread path ends in
2759 a block where the last statement is a multiway branch, then go
2760 ahead and thread it, else ignore it. */
2763 FOR_EACH_BB_FN (bb
, cfun
)
2765 /* If we do end up threading here, we can remove elements from
2766 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
2767 for (edge_iterator ei
= ei_start (bb
->preds
);
2768 (e
= ei_safe_edge (ei
));)
2771 vec
<jump_thread_edge
*> *path
= THREAD_PATH (e
);
2773 /* Case 1, threading from outside to inside the loop
2774 after we'd already threaded through the header. */
2775 if ((*path
)[0]->e
->dest
->loop_father
2776 != path
->last ()->e
->src
->loop_father
)
2778 delete_jump_thread_path (path
);
2782 else if (bb_ends_with_multiway_branch (path
->last ()->e
->src
))
2784 /* The code to thread through loop headers may have
2785 split a block with jump threads attached to it.
2787 We can identify this with a disjoint jump threading
2788 path. If found, just remove it. */
2789 for (unsigned int i
= 0; i
< path
->length () - 1; i
++)
2790 if ((*path
)[i
]->e
->dest
!= (*path
)[i
+ 1]->e
->src
)
2792 delete_jump_thread_path (path
);
2798 /* Our path is still valid, thread it. */
2801 if (thread_block ((*path
)[0]->e
->dest
, false))
2805 delete_jump_thread_path (path
);
2813 delete_jump_thread_path (path
);
2822 statistics_counter_event (cfun
, "Jumps threaded",
2823 thread_stats
.num_threaded_edges
);
2825 free_original_copy_tables ();
2827 BITMAP_FREE (threaded_blocks
);
2828 threaded_blocks
= NULL
;
2832 loops_state_set (LOOPS_NEED_FIXUP
);
2835 delete removed_edges
;
2836 removed_edges
= NULL
;
2840 /* Delete the jump threading path PATH. We have to explcitly delete
2841 each entry in the vector, then the container. */
2844 delete_jump_thread_path (vec
<jump_thread_edge
*> *path
)
2846 for (unsigned int i
= 0; i
< path
->length (); i
++)
2852 /* Register a jump threading opportunity. We queue up all the jump
2853 threading opportunities discovered by a pass and update the CFG
2854 and SSA form all at once.
2856 E is the edge we can thread, E2 is the new target edge, i.e., we
2857 are effectively recording that E->dest can be changed to E2->dest
2858 after fixing the SSA graph. */
2861 register_jump_thread (vec
<jump_thread_edge
*> *path
)
2863 if (!dbg_cnt (registered_jump_thread
))
2865 delete_jump_thread_path (path
);
2869 /* First make sure there are no NULL outgoing edges on the jump threading
2870 path. That can happen for jumping to a constant address. */
2871 for (unsigned int i
= 0; i
< path
->length (); i
++)
2872 if ((*path
)[i
]->e
== NULL
)
2874 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2877 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
2878 dump_jump_thread_path (dump_file
, *path
, false);
2881 delete_jump_thread_path (path
);
2885 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2886 dump_jump_thread_path (dump_file
, *path
, true);
2888 if (!paths
.exists ())
2891 paths
.safe_push (path
);