alias.c: Remove unused headers.
[gcc.git] / gcc / tree-ssa-threadupdate.c
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfganal.h"
31 #include "gimple-iterator.h"
32 #include "tree-ssa.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "cfgloop.h"
35 #include "dbgcnt.h"
36 #include "tree-cfg.h"
37
38 /* Given a block B, update the CFG and SSA graph to reflect redirecting
39 one or more in-edges to B to instead reach the destination of an
40 out-edge from B while preserving any side effects in B.
41
42 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
43 side effects of executing B.
44
45 1. Make a copy of B (including its outgoing edges and statements). Call
46 the copy B'. Note B' has no incoming edges or PHIs at this time.
47
48 2. Remove the control statement at the end of B' and all outgoing edges
49 except B'->C.
50
51 3. Add a new argument to each PHI in C with the same value as the existing
52 argument associated with edge B->C. Associate the new PHI arguments
53 with the edge B'->C.
54
55 4. For each PHI in B, find or create a PHI in B' with an identical
56 PHI_RESULT. Add an argument to the PHI in B' which has the same
57 value as the PHI in B associated with the edge A->B. Associate
58 the new argument in the PHI in B' with the edge A->B.
59
60 5. Change the edge A->B to A->B'.
61
62 5a. This automatically deletes any PHI arguments associated with the
63 edge A->B in B.
64
65 5b. This automatically associates each new argument added in step 4
66 with the edge A->B'.
67
68 6. Repeat for other incoming edges into B.
69
70 7. Put the duplicated resources in B and all the B' blocks into SSA form.
71
72 Note that block duplication can be minimized by first collecting the
73 set of unique destination blocks that the incoming edges should
74 be threaded to.
75
76 We reduce the number of edges and statements we create by not copying all
77 the outgoing edges and the control statement in step #1. We instead create
78 a template block without the outgoing edges and duplicate the template.
79
80 Another case this code handles is threading through a "joiner" block. In
81 this case, we do not know the destination of the joiner block, but one
82 of the outgoing edges from the joiner block leads to a threadable path. This
83 case largely works as outlined above, except the duplicate of the joiner
84 block still contains a full set of outgoing edges and its control statement.
85 We just redirect one of its outgoing edges to our jump threading path. */
86
87
88 /* Steps #5 and #6 of the above algorithm are best implemented by walking
89 all the incoming edges which thread to the same destination edge at
90 the same time. That avoids lots of table lookups to get information
91 for the destination edge.
92
93 To realize that implementation we create a list of incoming edges
94 which thread to the same outgoing edge. Thus to implement steps
95 #5 and #6 we traverse our hash table of outgoing edge information.
96 For each entry we walk the list of incoming edges which thread to
97 the current outgoing edge. */
98
99 struct el
100 {
101 edge e;
102 struct el *next;
103 };
104
105 /* Main data structure recording information regarding B's duplicate
106 blocks. */
107
108 /* We need to efficiently record the unique thread destinations of this
109 block and specific information associated with those destinations. We
110 may have many incoming edges threaded to the same outgoing edge. This
111 can be naturally implemented with a hash table. */
112
113 struct redirection_data : free_ptr_hash<redirection_data>
114 {
115 /* We support wiring up two block duplicates in a jump threading path.
116
117 One is a normal block copy where we remove the control statement
118 and wire up its single remaining outgoing edge to the thread path.
119
120 The other is a joiner block where we leave the control statement
121 in place, but wire one of the outgoing edges to a thread path.
122
123 In theory we could have multiple block duplicates in a jump
124 threading path, but I haven't tried that.
125
126 The duplicate blocks appear in this array in the same order in
127 which they appear in the jump thread path. */
128 basic_block dup_blocks[2];
129
130 /* The jump threading path. */
131 vec<jump_thread_edge *> *path;
132
133 /* A list of incoming edges which we want to thread to the
134 same path. */
135 struct el *incoming_edges;
136
137 /* hash_table support. */
138 static inline hashval_t hash (const redirection_data *);
139 static inline int equal (const redirection_data *, const redirection_data *);
140 };
141
142 /* Dump a jump threading path, including annotations about each
143 edge in the path. */
144
145 static void
146 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
147 bool registering)
148 {
149 fprintf (dump_file,
150 " %s%s jump thread: (%d, %d) incoming edge; ",
151 (registering ? "Registering" : "Cancelling"),
152 (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
153 path[0]->e->src->index, path[0]->e->dest->index);
154
155 for (unsigned int i = 1; i < path.length (); i++)
156 {
157 /* We can get paths with a NULL edge when the final destination
158 of a jump thread turns out to be a constant address. We dump
159 those paths when debugging, so we have to be prepared for that
160 possibility here. */
161 if (path[i]->e == NULL)
162 continue;
163
164 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
165 fprintf (dump_file, " (%d, %d) joiner; ",
166 path[i]->e->src->index, path[i]->e->dest->index);
167 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
168 fprintf (dump_file, " (%d, %d) normal;",
169 path[i]->e->src->index, path[i]->e->dest->index);
170 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
171 fprintf (dump_file, " (%d, %d) nocopy;",
172 path[i]->e->src->index, path[i]->e->dest->index);
173 if (path[0]->type == EDGE_FSM_THREAD)
174 fprintf (dump_file, " (%d, %d) ",
175 path[i]->e->src->index, path[i]->e->dest->index);
176 }
177 fputc ('\n', dump_file);
178 }
179
180 /* Simple hashing function. For any given incoming edge E, we're going
181 to be most concerned with the final destination of its jump thread
182 path. So hash on the block index of the final edge in the path. */
183
184 inline hashval_t
185 redirection_data::hash (const redirection_data *p)
186 {
187 vec<jump_thread_edge *> *path = p->path;
188 return path->last ()->e->dest->index;
189 }
190
191 /* Given two hash table entries, return true if they have the same
192 jump threading path. */
193 inline int
194 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
195 {
196 vec<jump_thread_edge *> *path1 = p1->path;
197 vec<jump_thread_edge *> *path2 = p2->path;
198
199 if (path1->length () != path2->length ())
200 return false;
201
202 for (unsigned int i = 1; i < path1->length (); i++)
203 {
204 if ((*path1)[i]->type != (*path2)[i]->type
205 || (*path1)[i]->e != (*path2)[i]->e)
206 return false;
207 }
208
209 return true;
210 }
211
212 /* Rather than search all the edges in jump thread paths each time
213 DOM is able to simply if control statement, we build a hash table
214 with the deleted edges. We only care about the address of the edge,
215 not its contents. */
216 struct removed_edges : nofree_ptr_hash<edge_def>
217 {
218 static hashval_t hash (edge e) { return htab_hash_pointer (e); }
219 static bool equal (edge e1, edge e2) { return e1 == e2; }
220 };
221
222 static hash_table<removed_edges> *removed_edges;
223
224 /* Data structure of information to pass to hash table traversal routines. */
225 struct ssa_local_info_t
226 {
227 /* The current block we are working on. */
228 basic_block bb;
229
230 /* We only create a template block for the first duplicated block in a
231 jump threading path as we may need many duplicates of that block.
232
233 The second duplicate block in a path is specific to that path. Creating
234 and sharing a template for that block is considerably more difficult. */
235 basic_block template_block;
236
237 /* TRUE if we thread one or more jumps, FALSE otherwise. */
238 bool jumps_threaded;
239
240 /* Blocks duplicated for the thread. */
241 bitmap duplicate_blocks;
242 };
243
244 /* Passes which use the jump threading code register jump threading
245 opportunities as they are discovered. We keep the registered
246 jump threading opportunities in this vector as edge pairs
247 (original_edge, target_edge). */
248 static vec<vec<jump_thread_edge *> *> paths;
249
250 /* When we start updating the CFG for threading, data necessary for jump
251 threading is attached to the AUX field for the incoming edge. Use these
252 macros to access the underlying structure attached to the AUX field. */
253 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
254
255 /* Jump threading statistics. */
256
257 struct thread_stats_d
258 {
259 unsigned long num_threaded_edges;
260 };
261
262 struct thread_stats_d thread_stats;
263
264
265 /* Remove the last statement in block BB if it is a control statement
266 Also remove all outgoing edges except the edge which reaches DEST_BB.
267 If DEST_BB is NULL, then remove all outgoing edges. */
268
269 void
270 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
271 {
272 gimple_stmt_iterator gsi;
273 edge e;
274 edge_iterator ei;
275
276 gsi = gsi_last_bb (bb);
277
278 /* If the duplicate ends with a control statement, then remove it.
279
280 Note that if we are duplicating the template block rather than the
281 original basic block, then the duplicate might not have any real
282 statements in it. */
283 if (!gsi_end_p (gsi)
284 && gsi_stmt (gsi)
285 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
286 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
287 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
288 gsi_remove (&gsi, true);
289
290 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
291 {
292 if (e->dest != dest_bb)
293 remove_edge (e);
294 else
295 ei_next (&ei);
296 }
297
298 /* If the remaining edge is a loop exit, there must have
299 a removed edge that was not a loop exit.
300
301 In that case BB and possibly other blocks were previously
302 in the loop, but are now outside the loop. Thus, we need
303 to update the loop structures. */
304 if (single_succ_p (bb)
305 && loop_outer (bb->loop_father)
306 && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
307 loops_state_set (LOOPS_NEED_FIXUP);
308 }
309
310 /* Create a duplicate of BB. Record the duplicate block in an array
311 indexed by COUNT stored in RD. */
312
313 static void
314 create_block_for_threading (basic_block bb,
315 struct redirection_data *rd,
316 unsigned int count,
317 bitmap *duplicate_blocks)
318 {
319 edge_iterator ei;
320 edge e;
321
322 /* We can use the generic block duplication code and simply remove
323 the stuff we do not need. */
324 rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
325
326 FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
327 e->aux = NULL;
328
329 /* Zero out the profile, since the block is unreachable for now. */
330 rd->dup_blocks[count]->frequency = 0;
331 rd->dup_blocks[count]->count = 0;
332 if (duplicate_blocks)
333 bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
334 }
335
336 /* Main data structure to hold information for duplicates of BB. */
337
338 static hash_table<redirection_data> *redirection_data;
339
340 /* Given an outgoing edge E lookup and return its entry in our hash table.
341
342 If INSERT is true, then we insert the entry into the hash table if
343 it is not already present. INCOMING_EDGE is added to the list of incoming
344 edges associated with E in the hash table. */
345
346 static struct redirection_data *
347 lookup_redirection_data (edge e, enum insert_option insert)
348 {
349 struct redirection_data **slot;
350 struct redirection_data *elt;
351 vec<jump_thread_edge *> *path = THREAD_PATH (e);
352
353 /* Build a hash table element so we can see if E is already
354 in the table. */
355 elt = XNEW (struct redirection_data);
356 elt->path = path;
357 elt->dup_blocks[0] = NULL;
358 elt->dup_blocks[1] = NULL;
359 elt->incoming_edges = NULL;
360
361 slot = redirection_data->find_slot (elt, insert);
362
363 /* This will only happen if INSERT is false and the entry is not
364 in the hash table. */
365 if (slot == NULL)
366 {
367 free (elt);
368 return NULL;
369 }
370
371 /* This will only happen if E was not in the hash table and
372 INSERT is true. */
373 if (*slot == NULL)
374 {
375 *slot = elt;
376 elt->incoming_edges = XNEW (struct el);
377 elt->incoming_edges->e = e;
378 elt->incoming_edges->next = NULL;
379 return elt;
380 }
381 /* E was in the hash table. */
382 else
383 {
384 /* Free ELT as we do not need it anymore, we will extract the
385 relevant entry from the hash table itself. */
386 free (elt);
387
388 /* Get the entry stored in the hash table. */
389 elt = *slot;
390
391 /* If insertion was requested, then we need to add INCOMING_EDGE
392 to the list of incoming edges associated with E. */
393 if (insert)
394 {
395 struct el *el = XNEW (struct el);
396 el->next = elt->incoming_edges;
397 el->e = e;
398 elt->incoming_edges = el;
399 }
400
401 return elt;
402 }
403 }
404
405 /* Similar to copy_phi_args, except that the PHI arg exists, it just
406 does not have a value associated with it. */
407
408 static void
409 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
410 {
411 int src_idx = src_e->dest_idx;
412 int tgt_idx = tgt_e->dest_idx;
413
414 /* Iterate over each PHI in e->dest. */
415 for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
416 gsi2 = gsi_start_phis (tgt_e->dest);
417 !gsi_end_p (gsi);
418 gsi_next (&gsi), gsi_next (&gsi2))
419 {
420 gphi *src_phi = gsi.phi ();
421 gphi *dest_phi = gsi2.phi ();
422 tree val = gimple_phi_arg_def (src_phi, src_idx);
423 source_location locus = gimple_phi_arg_location (src_phi, src_idx);
424
425 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
426 gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
427 }
428 }
429
430 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
431 to see if it has constant value in a flow sensitive manner. Set
432 LOCUS to location of the constant phi arg and return the value.
433 Return DEF directly if either PATH or idx is ZERO. */
434
435 static tree
436 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
437 basic_block bb, int idx, source_location *locus)
438 {
439 tree arg;
440 gphi *def_phi;
441 basic_block def_bb;
442
443 if (path == NULL || idx == 0)
444 return def;
445
446 def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
447 if (!def_phi)
448 return def;
449
450 def_bb = gimple_bb (def_phi);
451 /* Don't propagate loop invariants into deeper loops. */
452 if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
453 return def;
454
455 /* Backtrack jump threading path from IDX to see if def has constant
456 value. */
457 for (int j = idx - 1; j >= 0; j--)
458 {
459 edge e = (*path)[j]->e;
460 if (e->dest == def_bb)
461 {
462 arg = gimple_phi_arg_def (def_phi, e->dest_idx);
463 if (is_gimple_min_invariant (arg))
464 {
465 *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
466 return arg;
467 }
468 break;
469 }
470 }
471
472 return def;
473 }
474
475 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
476 Try to backtrack jump threading PATH from node IDX to see if the arg
477 has constant value, copy constant value instead of argument itself
478 if yes. */
479
480 static void
481 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
482 vec<jump_thread_edge *> *path, int idx)
483 {
484 gphi_iterator gsi;
485 int src_indx = src_e->dest_idx;
486
487 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
488 {
489 gphi *phi = gsi.phi ();
490 tree def = gimple_phi_arg_def (phi, src_indx);
491 source_location locus = gimple_phi_arg_location (phi, src_indx);
492
493 if (TREE_CODE (def) == SSA_NAME
494 && !virtual_operand_p (gimple_phi_result (phi)))
495 def = get_value_locus_in_path (def, path, bb, idx, &locus);
496
497 add_phi_arg (phi, def, tgt_e, locus);
498 }
499 }
500
501 /* We have recently made a copy of ORIG_BB, including its outgoing
502 edges. The copy is NEW_BB. Every PHI node in every direct successor of
503 ORIG_BB has a new argument associated with edge from NEW_BB to the
504 successor. Initialize the PHI argument so that it is equal to the PHI
505 argument associated with the edge from ORIG_BB to the successor.
506 PATH and IDX are used to check if the new PHI argument has constant
507 value in a flow sensitive manner. */
508
509 static void
510 update_destination_phis (basic_block orig_bb, basic_block new_bb,
511 vec<jump_thread_edge *> *path, int idx)
512 {
513 edge_iterator ei;
514 edge e;
515
516 FOR_EACH_EDGE (e, ei, orig_bb->succs)
517 {
518 edge e2 = find_edge (new_bb, e->dest);
519 copy_phi_args (e->dest, e, e2, path, idx);
520 }
521 }
522
523 /* Given a duplicate block and its single destination (both stored
524 in RD). Create an edge between the duplicate and its single
525 destination.
526
527 Add an additional argument to any PHI nodes at the single
528 destination. IDX is the start node in jump threading path
529 we start to check to see if the new PHI argument has constant
530 value along the jump threading path. */
531
532 static void
533 create_edge_and_update_destination_phis (struct redirection_data *rd,
534 basic_block bb, int idx)
535 {
536 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
537
538 rescan_loop_exit (e, true, false);
539 e->probability = REG_BR_PROB_BASE;
540 e->count = bb->count;
541
542 /* We used to copy the thread path here. That was added in 2007
543 and dutifully updated through the representation changes in 2013.
544
545 In 2013 we added code to thread from an interior node through
546 the backedge to another interior node. That runs after the code
547 to thread through loop headers from outside the loop.
548
549 The latter may delete edges in the CFG, including those
550 which appeared in the jump threading path we copied here. Thus
551 we'd end up using a dangling pointer.
552
553 After reviewing the 2007/2011 code, I can't see how anything
554 depended on copying the AUX field and clearly copying the jump
555 threading path is problematical due to embedded edge pointers.
556 It has been removed. */
557 e->aux = NULL;
558
559 /* If there are any PHI nodes at the destination of the outgoing edge
560 from the duplicate block, then we will need to add a new argument
561 to them. The argument should have the same value as the argument
562 associated with the outgoing edge stored in RD. */
563 copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
564 }
565
566 /* Look through PATH beginning at START and return TRUE if there are
567 any additional blocks that need to be duplicated. Otherwise,
568 return FALSE. */
569 static bool
570 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
571 unsigned int start)
572 {
573 for (unsigned int i = start + 1; i < path->length (); i++)
574 {
575 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
576 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
577 return true;
578 }
579 return false;
580 }
581
582
583 /* Compute the amount of profile count/frequency coming into the jump threading
584 path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
585 PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
586 duplicated path, returned in PATH_OUT_COUNT_PTR. LOCAL_INFO is used to
587 identify blocks duplicated for jump threading, which have duplicated
588 edges that need to be ignored in the analysis. Return true if path contains
589 a joiner, false otherwise.
590
591 In the non-joiner case, this is straightforward - all the counts/frequency
592 flowing into the jump threading path should flow through the duplicated
593 block and out of the duplicated path.
594
595 In the joiner case, it is very tricky. Some of the counts flowing into
596 the original path go offpath at the joiner. The problem is that while
597 we know how much total count goes off-path in the original control flow,
598 we don't know how many of the counts corresponding to just the jump
599 threading path go offpath at the joiner.
600
601 For example, assume we have the following control flow and identified
602 jump threading paths:
603
604 A B C
605 \ | /
606 Ea \ |Eb / Ec
607 \ | /
608 v v v
609 J <-- Joiner
610 / \
611 Eoff/ \Eon
612 / \
613 v v
614 Soff Son <--- Normal
615 /\
616 Ed/ \ Ee
617 / \
618 v v
619 D E
620
621 Jump threading paths: A -> J -> Son -> D (path 1)
622 C -> J -> Son -> E (path 2)
623
624 Note that the control flow could be more complicated:
625 - Each jump threading path may have more than one incoming edge. I.e. A and
626 Ea could represent multiple incoming blocks/edges that are included in
627 path 1.
628 - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
629 before or after the "normal" copy block). These are not duplicated onto
630 the jump threading path, as they are single-successor.
631 - Any of the blocks along the path may have other incoming edges that
632 are not part of any jump threading path, but add profile counts along
633 the path.
634
635 In the aboe example, after all jump threading is complete, we will
636 end up with the following control flow:
637
638 A B C
639 | | |
640 Ea| |Eb |Ec
641 | | |
642 v v v
643 Ja J Jc
644 / \ / \Eon' / \
645 Eona/ \ ---/---\-------- \Eonc
646 / \ / / \ \
647 v v v v v
648 Sona Soff Son Sonc
649 \ /\ /
650 \___________ / \ _____/
651 \ / \/
652 vv v
653 D E
654
655 The main issue to notice here is that when we are processing path 1
656 (A->J->Son->D) we need to figure out the outgoing edge weights to
657 the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
658 sum of the incoming weights to D remain Ed. The problem with simply
659 assuming that Ja (and Jc when processing path 2) has the same outgoing
660 probabilities to its successors as the original block J, is that after
661 all paths are processed and other edges/counts removed (e.g. none
662 of Ec will reach D after processing path 2), we may end up with not
663 enough count flowing along duplicated edge Sona->D.
664
665 Therefore, in the case of a joiner, we keep track of all counts
666 coming in along the current path, as well as from predecessors not
667 on any jump threading path (Eb in the above example). While we
668 first assume that the duplicated Eona for Ja->Sona has the same
669 probability as the original, we later compensate for other jump
670 threading paths that may eliminate edges. We do that by keep track
671 of all counts coming into the original path that are not in a jump
672 thread (Eb in the above example, but as noted earlier, there could
673 be other predecessors incoming to the path at various points, such
674 as at Son). Call this cumulative non-path count coming into the path
675 before D as Enonpath. We then ensure that the count from Sona->D is as at
676 least as big as (Ed - Enonpath), but no bigger than the minimum
677 weight along the jump threading path. The probabilities of both the
678 original and duplicated joiner block J and Ja will be adjusted
679 accordingly after the updates. */
680
681 static bool
682 compute_path_counts (struct redirection_data *rd,
683 ssa_local_info_t *local_info,
684 gcov_type *path_in_count_ptr,
685 gcov_type *path_out_count_ptr,
686 int *path_in_freq_ptr)
687 {
688 edge e = rd->incoming_edges->e;
689 vec<jump_thread_edge *> *path = THREAD_PATH (e);
690 edge elast = path->last ()->e;
691 gcov_type nonpath_count = 0;
692 bool has_joiner = false;
693 gcov_type path_in_count = 0;
694 int path_in_freq = 0;
695
696 /* Start by accumulating incoming edge counts to the path's first bb
697 into a couple buckets:
698 path_in_count: total count of incoming edges that flow into the
699 current path.
700 nonpath_count: total count of incoming edges that are not
701 flowing along *any* path. These are the counts
702 that will still flow along the original path after
703 all path duplication is done by potentially multiple
704 calls to this routine.
705 (any other incoming edge counts are for a different jump threading
706 path that will be handled by a later call to this routine.)
707 To make this easier, start by recording all incoming edges that flow into
708 the current path in a bitmap. We could add up the path's incoming edge
709 counts here, but we still need to walk all the first bb's incoming edges
710 below to add up the counts of the other edges not included in this jump
711 threading path. */
712 struct el *next, *el;
713 bitmap in_edge_srcs = BITMAP_ALLOC (NULL);
714 for (el = rd->incoming_edges; el; el = next)
715 {
716 next = el->next;
717 bitmap_set_bit (in_edge_srcs, el->e->src->index);
718 }
719 edge ein;
720 edge_iterator ei;
721 FOR_EACH_EDGE (ein, ei, e->dest->preds)
722 {
723 vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
724 /* Simply check the incoming edge src against the set captured above. */
725 if (ein_path
726 && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
727 {
728 /* It is necessary but not sufficient that the last path edges
729 are identical. There may be different paths that share the
730 same last path edge in the case where the last edge has a nocopy
731 source block. */
732 gcc_assert (ein_path->last ()->e == elast);
733 path_in_count += ein->count;
734 path_in_freq += EDGE_FREQUENCY (ein);
735 }
736 else if (!ein_path)
737 {
738 /* Keep track of the incoming edges that are not on any jump-threading
739 path. These counts will still flow out of original path after all
740 jump threading is complete. */
741 nonpath_count += ein->count;
742 }
743 }
744
745 /* This is needed due to insane incoming frequencies. */
746 if (path_in_freq > BB_FREQ_MAX)
747 path_in_freq = BB_FREQ_MAX;
748
749 BITMAP_FREE (in_edge_srcs);
750
751 /* Now compute the fraction of the total count coming into the first
752 path bb that is from the current threading path. */
753 gcov_type total_count = e->dest->count;
754 /* Handle incoming profile insanities. */
755 if (total_count < path_in_count)
756 path_in_count = total_count;
757 int onpath_scale = GCOV_COMPUTE_SCALE (path_in_count, total_count);
758
759 /* Walk the entire path to do some more computation in order to estimate
760 how much of the path_in_count will flow out of the duplicated threading
761 path. In the non-joiner case this is straightforward (it should be
762 the same as path_in_count, although we will handle incoming profile
763 insanities by setting it equal to the minimum count along the path).
764
765 In the joiner case, we need to estimate how much of the path_in_count
766 will stay on the threading path after the joiner's conditional branch.
767 We don't really know for sure how much of the counts
768 associated with this path go to each successor of the joiner, but we'll
769 estimate based on the fraction of the total count coming into the path
770 bb was from the threading paths (computed above in onpath_scale).
771 Afterwards, we will need to do some fixup to account for other threading
772 paths and possible profile insanities.
773
774 In order to estimate the joiner case's counts we also need to update
775 nonpath_count with any additional counts coming into the path. Other
776 blocks along the path may have additional predecessors from outside
777 the path. */
778 gcov_type path_out_count = path_in_count;
779 gcov_type min_path_count = path_in_count;
780 for (unsigned int i = 1; i < path->length (); i++)
781 {
782 edge epath = (*path)[i]->e;
783 gcov_type cur_count = epath->count;
784 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
785 {
786 has_joiner = true;
787 cur_count = apply_probability (cur_count, onpath_scale);
788 }
789 /* In the joiner case we need to update nonpath_count for any edges
790 coming into the path that will contribute to the count flowing
791 into the path successor. */
792 if (has_joiner && epath != elast)
793 {
794 /* Look for other incoming edges after joiner. */
795 FOR_EACH_EDGE (ein, ei, epath->dest->preds)
796 {
797 if (ein != epath
798 /* Ignore in edges from blocks we have duplicated for a
799 threading path, which have duplicated edge counts until
800 they are redirected by an invocation of this routine. */
801 && !bitmap_bit_p (local_info->duplicate_blocks,
802 ein->src->index))
803 nonpath_count += ein->count;
804 }
805 }
806 if (cur_count < path_out_count)
807 path_out_count = cur_count;
808 if (epath->count < min_path_count)
809 min_path_count = epath->count;
810 }
811
812 /* We computed path_out_count above assuming that this path targeted
813 the joiner's on-path successor with the same likelihood as it
814 reached the joiner. However, other thread paths through the joiner
815 may take a different path through the normal copy source block
816 (i.e. they have a different elast), meaning that they do not
817 contribute any counts to this path's elast. As a result, it may
818 turn out that this path must have more count flowing to the on-path
819 successor of the joiner. Essentially, all of this path's elast
820 count must be contributed by this path and any nonpath counts
821 (since any path through the joiner with a different elast will not
822 include a copy of this elast in its duplicated path).
823 So ensure that this path's path_out_count is at least the
824 difference between elast->count and nonpath_count. Otherwise the edge
825 counts after threading will not be sane. */
826 if (has_joiner && path_out_count < elast->count - nonpath_count)
827 {
828 path_out_count = elast->count - nonpath_count;
829 /* But neither can we go above the minimum count along the path
830 we are duplicating. This can be an issue due to profile
831 insanities coming in to this pass. */
832 if (path_out_count > min_path_count)
833 path_out_count = min_path_count;
834 }
835
836 *path_in_count_ptr = path_in_count;
837 *path_out_count_ptr = path_out_count;
838 *path_in_freq_ptr = path_in_freq;
839 return has_joiner;
840 }
841
842
843 /* Update the counts and frequencies for both an original path
844 edge EPATH and its duplicate EDUP. The duplicate source block
845 will get a count/frequency of PATH_IN_COUNT and PATH_IN_FREQ,
846 and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
847 static void
848 update_profile (edge epath, edge edup, gcov_type path_in_count,
849 gcov_type path_out_count, int path_in_freq)
850 {
851
852 /* First update the duplicated block's count / frequency. */
853 if (edup)
854 {
855 basic_block dup_block = edup->src;
856 gcc_assert (dup_block->count == 0);
857 gcc_assert (dup_block->frequency == 0);
858 dup_block->count = path_in_count;
859 dup_block->frequency = path_in_freq;
860 }
861
862 /* Now update the original block's count and frequency in the
863 opposite manner - remove the counts/freq that will flow
864 into the duplicated block. Handle underflow due to precision/
865 rounding issues. */
866 epath->src->count -= path_in_count;
867 if (epath->src->count < 0)
868 epath->src->count = 0;
869 epath->src->frequency -= path_in_freq;
870 if (epath->src->frequency < 0)
871 epath->src->frequency = 0;
872
873 /* Next update this path edge's original and duplicated counts. We know
874 that the duplicated path will have path_out_count flowing
875 out of it (in the joiner case this is the count along the duplicated path
876 out of the duplicated joiner). This count can then be removed from the
877 original path edge. */
878 if (edup)
879 edup->count = path_out_count;
880 epath->count -= path_out_count;
881 gcc_assert (epath->count >= 0);
882 }
883
884
885 /* The duplicate and original joiner blocks may end up with different
886 probabilities (different from both the original and from each other).
887 Recompute the probabilities here once we have updated the edge
888 counts and frequencies. */
889
890 static void
891 recompute_probabilities (basic_block bb)
892 {
893 edge esucc;
894 edge_iterator ei;
895 FOR_EACH_EDGE (esucc, ei, bb->succs)
896 {
897 if (!bb->count)
898 continue;
899
900 /* Prevent overflow computation due to insane profiles. */
901 if (esucc->count < bb->count)
902 esucc->probability = GCOV_COMPUTE_SCALE (esucc->count,
903 bb->count);
904 else
905 /* Can happen with missing/guessed probabilities, since we
906 may determine that more is flowing along duplicated
907 path than joiner succ probabilities allowed.
908 Counts and freqs will be insane after jump threading,
909 at least make sure probability is sane or we will
910 get a flow verification error.
911 Not much we can do to make counts/freqs sane without
912 redoing the profile estimation. */
913 esucc->probability = REG_BR_PROB_BASE;
914 }
915 }
916
917
918 /* Update the counts of the original and duplicated edges from a joiner
919 that go off path, given that we have already determined that the
920 duplicate joiner DUP_BB has incoming count PATH_IN_COUNT and
921 outgoing count along the path PATH_OUT_COUNT. The original (on-)path
922 edge from joiner is EPATH. */
923
924 static void
925 update_joiner_offpath_counts (edge epath, basic_block dup_bb,
926 gcov_type path_in_count,
927 gcov_type path_out_count)
928 {
929 /* Compute the count that currently flows off path from the joiner.
930 In other words, the total count of joiner's out edges other than
931 epath. Compute this by walking the successors instead of
932 subtracting epath's count from the joiner bb count, since there
933 are sometimes slight insanities where the total out edge count is
934 larger than the bb count (possibly due to rounding/truncation
935 errors). */
936 gcov_type total_orig_off_path_count = 0;
937 edge enonpath;
938 edge_iterator ei;
939 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
940 {
941 if (enonpath == epath)
942 continue;
943 total_orig_off_path_count += enonpath->count;
944 }
945
946 /* For the path that we are duplicating, the amount that will flow
947 off path from the duplicated joiner is the delta between the
948 path's cumulative in count and the portion of that count we
949 estimated above as flowing from the joiner along the duplicated
950 path. */
951 gcov_type total_dup_off_path_count = path_in_count - path_out_count;
952
953 /* Now do the actual updates of the off-path edges. */
954 FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
955 {
956 /* Look for edges going off of the threading path. */
957 if (enonpath == epath)
958 continue;
959
960 /* Find the corresponding edge out of the duplicated joiner. */
961 edge enonpathdup = find_edge (dup_bb, enonpath->dest);
962 gcc_assert (enonpathdup);
963
964 /* We can't use the original probability of the joiner's out
965 edges, since the probabilities of the original branch
966 and the duplicated branches may vary after all threading is
967 complete. But apportion the duplicated joiner's off-path
968 total edge count computed earlier (total_dup_off_path_count)
969 among the duplicated off-path edges based on their original
970 ratio to the full off-path count (total_orig_off_path_count).
971 */
972 int scale = GCOV_COMPUTE_SCALE (enonpath->count,
973 total_orig_off_path_count);
974 /* Give the duplicated offpath edge a portion of the duplicated
975 total. */
976 enonpathdup->count = apply_scale (scale,
977 total_dup_off_path_count);
978 /* Now update the original offpath edge count, handling underflow
979 due to rounding errors. */
980 enonpath->count -= enonpathdup->count;
981 if (enonpath->count < 0)
982 enonpath->count = 0;
983 }
984 }
985
986
987 /* Check if the paths through RD all have estimated frequencies but zero
988 profile counts. This is more accurate than checking the entry block
989 for a zero profile count, since profile insanities sometimes creep in. */
990
991 static bool
992 estimated_freqs_path (struct redirection_data *rd)
993 {
994 edge e = rd->incoming_edges->e;
995 vec<jump_thread_edge *> *path = THREAD_PATH (e);
996 edge ein;
997 edge_iterator ei;
998 bool non_zero_freq = false;
999 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1000 {
1001 if (ein->count)
1002 return false;
1003 non_zero_freq |= ein->src->frequency != 0;
1004 }
1005
1006 for (unsigned int i = 1; i < path->length (); i++)
1007 {
1008 edge epath = (*path)[i]->e;
1009 if (epath->src->count)
1010 return false;
1011 non_zero_freq |= epath->src->frequency != 0;
1012 edge esucc;
1013 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1014 {
1015 if (esucc->count)
1016 return false;
1017 non_zero_freq |= esucc->src->frequency != 0;
1018 }
1019 }
1020 return non_zero_freq;
1021 }
1022
1023
1024 /* Invoked for routines that have guessed frequencies and no profile
1025 counts to record the block and edge frequencies for paths through RD
1026 in the profile count fields of those blocks and edges. This is because
1027 ssa_fix_duplicate_block_edges incrementally updates the block and
1028 edge counts as edges are redirected, and it is difficult to do that
1029 for edge frequencies which are computed on the fly from the source
1030 block frequency and probability. When a block frequency is updated
1031 its outgoing edge frequencies are affected and become difficult to
1032 adjust. */
1033
1034 static void
1035 freqs_to_counts_path (struct redirection_data *rd)
1036 {
1037 edge e = rd->incoming_edges->e;
1038 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1039 edge ein;
1040 edge_iterator ei;
1041 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1042 {
1043 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1044 errors applying the probability when the frequencies are very
1045 small. */
1046 ein->count = apply_probability (ein->src->frequency * REG_BR_PROB_BASE,
1047 ein->probability);
1048 }
1049
1050 for (unsigned int i = 1; i < path->length (); i++)
1051 {
1052 edge epath = (*path)[i]->e;
1053 edge esucc;
1054 /* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
1055 errors applying the edge probability when the frequencies are very
1056 small. */
1057 epath->src->count = epath->src->frequency * REG_BR_PROB_BASE;
1058 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1059 esucc->count = apply_probability (esucc->src->count,
1060 esucc->probability);
1061 }
1062 }
1063
1064
1065 /* For routines that have guessed frequencies and no profile counts, where we
1066 used freqs_to_counts_path to record block and edge frequencies for paths
1067 through RD, we clear the counts after completing all updates for RD.
1068 The updates in ssa_fix_duplicate_block_edges are based off the count fields,
1069 but the block frequencies and edge probabilities were updated as well,
1070 so we can simply clear the count fields. */
1071
1072 static void
1073 clear_counts_path (struct redirection_data *rd)
1074 {
1075 edge e = rd->incoming_edges->e;
1076 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1077 edge ein, esucc;
1078 edge_iterator ei;
1079 FOR_EACH_EDGE (ein, ei, e->dest->preds)
1080 ein->count = 0;
1081
1082 /* First clear counts along original path. */
1083 for (unsigned int i = 1; i < path->length (); i++)
1084 {
1085 edge epath = (*path)[i]->e;
1086 FOR_EACH_EDGE (esucc, ei, epath->src->succs)
1087 esucc->count = 0;
1088 epath->src->count = 0;
1089 }
1090 /* Also need to clear the counts along duplicated path. */
1091 for (unsigned int i = 0; i < 2; i++)
1092 {
1093 basic_block dup = rd->dup_blocks[i];
1094 if (!dup)
1095 continue;
1096 FOR_EACH_EDGE (esucc, ei, dup->succs)
1097 esucc->count = 0;
1098 dup->count = 0;
1099 }
1100 }
1101
1102 /* Wire up the outgoing edges from the duplicate blocks and
1103 update any PHIs as needed. Also update the profile counts
1104 on the original and duplicate blocks and edges. */
1105 void
1106 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
1107 ssa_local_info_t *local_info)
1108 {
1109 bool multi_incomings = (rd->incoming_edges->next != NULL);
1110 edge e = rd->incoming_edges->e;
1111 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1112 edge elast = path->last ()->e;
1113 gcov_type path_in_count = 0;
1114 gcov_type path_out_count = 0;
1115 int path_in_freq = 0;
1116
1117 /* This routine updates profile counts, frequencies, and probabilities
1118 incrementally. Since it is difficult to do the incremental updates
1119 using frequencies/probabilities alone, for routines without profile
1120 data we first take a snapshot of the existing block and edge frequencies
1121 by copying them into the empty profile count fields. These counts are
1122 then used to do the incremental updates, and cleared at the end of this
1123 routine. If the function is marked as having a profile, we still check
1124 to see if the paths through RD are using estimated frequencies because
1125 the routine had zero profile counts. */
1126 bool do_freqs_to_counts = (profile_status_for_fn (cfun) != PROFILE_READ
1127 || estimated_freqs_path (rd));
1128 if (do_freqs_to_counts)
1129 freqs_to_counts_path (rd);
1130
1131 /* First determine how much profile count to move from original
1132 path to the duplicate path. This is tricky in the presence of
1133 a joiner (see comments for compute_path_counts), where some portion
1134 of the path's counts will flow off-path from the joiner. In the
1135 non-joiner case the path_in_count and path_out_count should be the
1136 same. */
1137 bool has_joiner = compute_path_counts (rd, local_info,
1138 &path_in_count, &path_out_count,
1139 &path_in_freq);
1140
1141 int cur_path_freq = path_in_freq;
1142 for (unsigned int count = 0, i = 1; i < path->length (); i++)
1143 {
1144 edge epath = (*path)[i]->e;
1145
1146 /* If we were threading through an joiner block, then we want
1147 to keep its control statement and redirect an outgoing edge.
1148 Else we want to remove the control statement & edges, then create
1149 a new outgoing edge. In both cases we may need to update PHIs. */
1150 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1151 {
1152 edge victim;
1153 edge e2;
1154
1155 gcc_assert (has_joiner);
1156
1157 /* This updates the PHIs at the destination of the duplicate
1158 block. Pass 0 instead of i if we are threading a path which
1159 has multiple incoming edges. */
1160 update_destination_phis (local_info->bb, rd->dup_blocks[count],
1161 path, multi_incomings ? 0 : i);
1162
1163 /* Find the edge from the duplicate block to the block we're
1164 threading through. That's the edge we want to redirect. */
1165 victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
1166
1167 /* If there are no remaining blocks on the path to duplicate,
1168 then redirect VICTIM to the final destination of the jump
1169 threading path. */
1170 if (!any_remaining_duplicated_blocks (path, i))
1171 {
1172 e2 = redirect_edge_and_branch (victim, elast->dest);
1173 /* If we redirected the edge, then we need to copy PHI arguments
1174 at the target. If the edge already existed (e2 != victim
1175 case), then the PHIs in the target already have the correct
1176 arguments. */
1177 if (e2 == victim)
1178 copy_phi_args (e2->dest, elast, e2,
1179 path, multi_incomings ? 0 : i);
1180 }
1181 else
1182 {
1183 /* Redirect VICTIM to the next duplicated block in the path. */
1184 e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1185
1186 /* We need to update the PHIs in the next duplicated block. We
1187 want the new PHI args to have the same value as they had
1188 in the source of the next duplicate block.
1189
1190 Thus, we need to know which edge we traversed into the
1191 source of the duplicate. Furthermore, we may have
1192 traversed many edges to reach the source of the duplicate.
1193
1194 Walk through the path starting at element I until we
1195 hit an edge marked with EDGE_COPY_SRC_BLOCK. We want
1196 the edge from the prior element. */
1197 for (unsigned int j = i + 1; j < path->length (); j++)
1198 {
1199 if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1200 {
1201 copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1202 break;
1203 }
1204 }
1205 }
1206
1207 /* Update the counts and frequency of both the original block
1208 and path edge, and the duplicates. The path duplicate's
1209 incoming count and frequency are the totals for all edges
1210 incoming to this jump threading path computed earlier.
1211 And we know that the duplicated path will have path_out_count
1212 flowing out of it (i.e. along the duplicated path out of the
1213 duplicated joiner). */
1214 update_profile (epath, e2, path_in_count, path_out_count,
1215 path_in_freq);
1216
1217 /* Next we need to update the counts of the original and duplicated
1218 edges from the joiner that go off path. */
1219 update_joiner_offpath_counts (epath, e2->src, path_in_count,
1220 path_out_count);
1221
1222 /* Finally, we need to set the probabilities on the duplicated
1223 edges out of the duplicated joiner (e2->src). The probabilities
1224 along the original path will all be updated below after we finish
1225 processing the whole path. */
1226 recompute_probabilities (e2->src);
1227
1228 /* Record the frequency flowing to the downstream duplicated
1229 path blocks. */
1230 cur_path_freq = EDGE_FREQUENCY (e2);
1231 }
1232 else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1233 {
1234 remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1235 create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1236 multi_incomings ? 0 : i);
1237 if (count == 1)
1238 single_succ_edge (rd->dup_blocks[1])->aux = NULL;
1239
1240 /* Update the counts and frequency of both the original block
1241 and path edge, and the duplicates. Since we are now after
1242 any joiner that may have existed on the path, the count
1243 flowing along the duplicated threaded path is path_out_count.
1244 If we didn't have a joiner, then cur_path_freq was the sum
1245 of the total frequencies along all incoming edges to the
1246 thread path (path_in_freq). If we had a joiner, it would have
1247 been updated at the end of that handling to the edge frequency
1248 along the duplicated joiner path edge. */
1249 update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1250 path_out_count, path_out_count,
1251 cur_path_freq);
1252 }
1253 else
1254 {
1255 /* No copy case. In this case we don't have an equivalent block
1256 on the duplicated thread path to update, but we do need
1257 to remove the portion of the counts/freqs that were moved
1258 to the duplicated path from the counts/freqs flowing through
1259 this block on the original path. Since all the no-copy edges
1260 are after any joiner, the removed count is the same as
1261 path_out_count.
1262
1263 If we didn't have a joiner, then cur_path_freq was the sum
1264 of the total frequencies along all incoming edges to the
1265 thread path (path_in_freq). If we had a joiner, it would have
1266 been updated at the end of that handling to the edge frequency
1267 along the duplicated joiner path edge. */
1268 update_profile (epath, NULL, path_out_count, path_out_count,
1269 cur_path_freq);
1270 }
1271
1272 /* Increment the index into the duplicated path when we processed
1273 a duplicated block. */
1274 if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
1275 || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1276 {
1277 count++;
1278 }
1279 }
1280
1281 /* Now walk orig blocks and update their probabilities, since the
1282 counts and freqs should be updated properly by above loop. */
1283 for (unsigned int i = 1; i < path->length (); i++)
1284 {
1285 edge epath = (*path)[i]->e;
1286 recompute_probabilities (epath->src);
1287 }
1288
1289 /* Done with all profile and frequency updates, clear counts if they
1290 were copied. */
1291 if (do_freqs_to_counts)
1292 clear_counts_path (rd);
1293 }
1294
1295 /* Hash table traversal callback routine to create duplicate blocks. */
1296
1297 int
1298 ssa_create_duplicates (struct redirection_data **slot,
1299 ssa_local_info_t *local_info)
1300 {
1301 struct redirection_data *rd = *slot;
1302
1303 /* The second duplicated block in a jump threading path is specific
1304 to the path. So it gets stored in RD rather than in LOCAL_DATA.
1305
1306 Each time we're called, we have to look through the path and see
1307 if a second block needs to be duplicated.
1308
1309 Note the search starts with the third edge on the path. The first
1310 edge is the incoming edge, the second edge always has its source
1311 duplicated. Thus we start our search with the third edge. */
1312 vec<jump_thread_edge *> *path = rd->path;
1313 for (unsigned int i = 2; i < path->length (); i++)
1314 {
1315 if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1316 || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1317 {
1318 create_block_for_threading ((*path)[i]->e->src, rd, 1,
1319 &local_info->duplicate_blocks);
1320 break;
1321 }
1322 }
1323
1324 /* Create a template block if we have not done so already. Otherwise
1325 use the template to create a new block. */
1326 if (local_info->template_block == NULL)
1327 {
1328 create_block_for_threading ((*path)[1]->e->src, rd, 0,
1329 &local_info->duplicate_blocks);
1330 local_info->template_block = rd->dup_blocks[0];
1331
1332 /* We do not create any outgoing edges for the template. We will
1333 take care of that in a later traversal. That way we do not
1334 create edges that are going to just be deleted. */
1335 }
1336 else
1337 {
1338 create_block_for_threading (local_info->template_block, rd, 0,
1339 &local_info->duplicate_blocks);
1340
1341 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1342 block. */
1343 ssa_fix_duplicate_block_edges (rd, local_info);
1344 }
1345
1346 /* Keep walking the hash table. */
1347 return 1;
1348 }
1349
1350 /* We did not create any outgoing edges for the template block during
1351 block creation. This hash table traversal callback creates the
1352 outgoing edge for the template block. */
1353
1354 inline int
1355 ssa_fixup_template_block (struct redirection_data **slot,
1356 ssa_local_info_t *local_info)
1357 {
1358 struct redirection_data *rd = *slot;
1359
1360 /* If this is the template block halt the traversal after updating
1361 it appropriately.
1362
1363 If we were threading through an joiner block, then we want
1364 to keep its control statement and redirect an outgoing edge.
1365 Else we want to remove the control statement & edges, then create
1366 a new outgoing edge. In both cases we may need to update PHIs. */
1367 if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
1368 {
1369 ssa_fix_duplicate_block_edges (rd, local_info);
1370 return 0;
1371 }
1372
1373 return 1;
1374 }
1375
1376 /* Hash table traversal callback to redirect each incoming edge
1377 associated with this hash table element to its new destination. */
1378
1379 int
1380 ssa_redirect_edges (struct redirection_data **slot,
1381 ssa_local_info_t *local_info)
1382 {
1383 struct redirection_data *rd = *slot;
1384 struct el *next, *el;
1385
1386 /* Walk over all the incoming edges associated with this hash table
1387 entry. */
1388 for (el = rd->incoming_edges; el; el = next)
1389 {
1390 edge e = el->e;
1391 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1392
1393 /* Go ahead and free this element from the list. Doing this now
1394 avoids the need for another list walk when we destroy the hash
1395 table. */
1396 next = el->next;
1397 free (el);
1398
1399 thread_stats.num_threaded_edges++;
1400
1401 if (rd->dup_blocks[0])
1402 {
1403 edge e2;
1404
1405 if (dump_file && (dump_flags & TDF_DETAILS))
1406 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
1407 e->src->index, e->dest->index, rd->dup_blocks[0]->index);
1408
1409 /* If we redirect a loop latch edge cancel its loop. */
1410 if (e->src == e->src->loop_father->latch)
1411 mark_loop_for_removal (e->src->loop_father);
1412
1413 /* Redirect the incoming edge (possibly to the joiner block) to the
1414 appropriate duplicate block. */
1415 e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
1416 gcc_assert (e == e2);
1417 flush_pending_stmts (e2);
1418 }
1419
1420 /* Go ahead and clear E->aux. It's not needed anymore and failure
1421 to clear it will cause all kinds of unpleasant problems later. */
1422 delete_jump_thread_path (path);
1423 e->aux = NULL;
1424
1425 }
1426
1427 /* Indicate that we actually threaded one or more jumps. */
1428 if (rd->incoming_edges)
1429 local_info->jumps_threaded = true;
1430
1431 return 1;
1432 }
1433
1434 /* Return true if this block has no executable statements other than
1435 a simple ctrl flow instruction. When the number of outgoing edges
1436 is one, this is equivalent to a "forwarder" block. */
1437
1438 static bool
1439 redirection_block_p (basic_block bb)
1440 {
1441 gimple_stmt_iterator gsi;
1442
1443 /* Advance to the first executable statement. */
1444 gsi = gsi_start_bb (bb);
1445 while (!gsi_end_p (gsi)
1446 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
1447 || is_gimple_debug (gsi_stmt (gsi))
1448 || gimple_nop_p (gsi_stmt (gsi))
1449 || gimple_clobber_p (gsi_stmt (gsi))))
1450 gsi_next (&gsi);
1451
1452 /* Check if this is an empty block. */
1453 if (gsi_end_p (gsi))
1454 return true;
1455
1456 /* Test that we've reached the terminating control statement. */
1457 return gsi_stmt (gsi)
1458 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1459 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1460 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
1461 }
1462
1463 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1464 is reached via one or more specific incoming edges, we know which
1465 outgoing edge from BB will be traversed.
1466
1467 We want to redirect those incoming edges to the target of the
1468 appropriate outgoing edge. Doing so avoids a conditional branch
1469 and may expose new optimization opportunities. Note that we have
1470 to update dominator tree and SSA graph after such changes.
1471
1472 The key to keeping the SSA graph update manageable is to duplicate
1473 the side effects occurring in BB so that those side effects still
1474 occur on the paths which bypass BB after redirecting edges.
1475
1476 We accomplish this by creating duplicates of BB and arranging for
1477 the duplicates to unconditionally pass control to one specific
1478 successor of BB. We then revector the incoming edges into BB to
1479 the appropriate duplicate of BB.
1480
1481 If NOLOOP_ONLY is true, we only perform the threading as long as it
1482 does not affect the structure of the loops in a nontrivial way.
1483
1484 If JOINERS is true, then thread through joiner blocks as well. */
1485
1486 static bool
1487 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
1488 {
1489 /* E is an incoming edge into BB that we may or may not want to
1490 redirect to a duplicate of BB. */
1491 edge e, e2;
1492 edge_iterator ei;
1493 ssa_local_info_t local_info;
1494
1495 local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
1496
1497 /* To avoid scanning a linear array for the element we need we instead
1498 use a hash table. For normal code there should be no noticeable
1499 difference. However, if we have a block with a large number of
1500 incoming and outgoing edges such linear searches can get expensive. */
1501 redirection_data
1502 = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
1503
1504 /* Record each unique threaded destination into a hash table for
1505 efficient lookups. */
1506 FOR_EACH_EDGE (e, ei, bb->preds)
1507 {
1508 if (e->aux == NULL)
1509 continue;
1510
1511 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1512
1513 if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1514 || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1515 continue;
1516
1517 e2 = path->last ()->e;
1518 if (!e2 || noloop_only)
1519 {
1520 /* If NOLOOP_ONLY is true, we only allow threading through the
1521 header of a loop to exit edges. */
1522
1523 /* One case occurs when there was loop header buried in a jump
1524 threading path that crosses loop boundaries. We do not try
1525 and thread this elsewhere, so just cancel the jump threading
1526 request by clearing the AUX field now. */
1527 if ((bb->loop_father != e2->src->loop_father
1528 && !loop_exit_edge_p (e2->src->loop_father, e2))
1529 || (e2->src->loop_father != e2->dest->loop_father
1530 && !loop_exit_edge_p (e2->src->loop_father, e2)))
1531 {
1532 /* Since this case is not handled by our special code
1533 to thread through a loop header, we must explicitly
1534 cancel the threading request here. */
1535 delete_jump_thread_path (path);
1536 e->aux = NULL;
1537 continue;
1538 }
1539
1540 /* Another case occurs when trying to thread through our
1541 own loop header, possibly from inside the loop. We will
1542 thread these later. */
1543 unsigned int i;
1544 for (i = 1; i < path->length (); i++)
1545 {
1546 if ((*path)[i]->e->src == bb->loop_father->header
1547 && (!loop_exit_edge_p (bb->loop_father, e2)
1548 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
1549 break;
1550 }
1551
1552 if (i != path->length ())
1553 continue;
1554 }
1555
1556 /* Insert the outgoing edge into the hash table if it is not
1557 already in the hash table. */
1558 lookup_redirection_data (e, INSERT);
1559 }
1560
1561 /* We do not update dominance info. */
1562 free_dominance_info (CDI_DOMINATORS);
1563
1564 /* We know we only thread through the loop header to loop exits.
1565 Let the basic block duplication hook know we are not creating
1566 a multiple entry loop. */
1567 if (noloop_only
1568 && bb == bb->loop_father->header)
1569 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1570
1571 /* Now create duplicates of BB.
1572
1573 Note that for a block with a high outgoing degree we can waste
1574 a lot of time and memory creating and destroying useless edges.
1575
1576 So we first duplicate BB and remove the control structure at the
1577 tail of the duplicate as well as all outgoing edges from the
1578 duplicate. We then use that duplicate block as a template for
1579 the rest of the duplicates. */
1580 local_info.template_block = NULL;
1581 local_info.bb = bb;
1582 local_info.jumps_threaded = false;
1583 redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
1584 (&local_info);
1585
1586 /* The template does not have an outgoing edge. Create that outgoing
1587 edge and update PHI nodes as the edge's target as necessary.
1588
1589 We do this after creating all the duplicates to avoid creating
1590 unnecessary edges. */
1591 redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
1592 (&local_info);
1593
1594 /* The hash table traversals above created the duplicate blocks (and the
1595 statements within the duplicate blocks). This loop creates PHI nodes for
1596 the duplicated blocks and redirects the incoming edges into BB to reach
1597 the duplicates of BB. */
1598 redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
1599 (&local_info);
1600
1601 /* Done with this block. Clear REDIRECTION_DATA. */
1602 delete redirection_data;
1603 redirection_data = NULL;
1604
1605 if (noloop_only
1606 && bb == bb->loop_father->header)
1607 set_loop_copy (bb->loop_father, NULL);
1608
1609 BITMAP_FREE (local_info.duplicate_blocks);
1610 local_info.duplicate_blocks = NULL;
1611
1612 /* Indicate to our caller whether or not any jumps were threaded. */
1613 return local_info.jumps_threaded;
1614 }
1615
1616 /* Wrapper for thread_block_1 so that we can first handle jump
1617 thread paths which do not involve copying joiner blocks, then
1618 handle jump thread paths which have joiner blocks.
1619
1620 By doing things this way we can be as aggressive as possible and
1621 not worry that copying a joiner block will create a jump threading
1622 opportunity. */
1623
1624 static bool
1625 thread_block (basic_block bb, bool noloop_only)
1626 {
1627 bool retval;
1628 retval = thread_block_1 (bb, noloop_only, false);
1629 retval |= thread_block_1 (bb, noloop_only, true);
1630 return retval;
1631 }
1632
1633
1634 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
1635 copy of E->dest created during threading, or E->dest if it was not necessary
1636 to copy it (E is its single predecessor). */
1637
1638 static basic_block
1639 thread_single_edge (edge e)
1640 {
1641 basic_block bb = e->dest;
1642 struct redirection_data rd;
1643 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1644 edge eto = (*path)[1]->e;
1645
1646 delete_jump_thread_path (path);
1647 e->aux = NULL;
1648
1649 thread_stats.num_threaded_edges++;
1650
1651 if (single_pred_p (bb))
1652 {
1653 /* If BB has just a single predecessor, we should only remove the
1654 control statements at its end, and successors except for ETO. */
1655 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
1656
1657 /* And fixup the flags on the single remaining edge. */
1658 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
1659 eto->flags |= EDGE_FALLTHRU;
1660
1661 return bb;
1662 }
1663
1664 /* Otherwise, we need to create a copy. */
1665 if (e->dest == eto->src)
1666 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
1667
1668 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
1669 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1670 npath->safe_push (x);
1671
1672 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
1673 npath->safe_push (x);
1674 rd.path = npath;
1675
1676 create_block_for_threading (bb, &rd, 0, NULL);
1677 remove_ctrl_stmt_and_useless_edges (rd.dup_blocks[0], NULL);
1678 create_edge_and_update_destination_phis (&rd, rd.dup_blocks[0], 0);
1679
1680 if (dump_file && (dump_flags & TDF_DETAILS))
1681 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
1682 e->src->index, e->dest->index, rd.dup_blocks[0]->index);
1683
1684 rd.dup_blocks[0]->count = e->count;
1685 rd.dup_blocks[0]->frequency = EDGE_FREQUENCY (e);
1686 single_succ_edge (rd.dup_blocks[0])->count = e->count;
1687 redirect_edge_and_branch (e, rd.dup_blocks[0]);
1688 flush_pending_stmts (e);
1689
1690 delete_jump_thread_path (npath);
1691 return rd.dup_blocks[0];
1692 }
1693
1694 /* Callback for dfs_enumerate_from. Returns true if BB is different
1695 from STOP and DBDS_CE_STOP. */
1696
1697 static basic_block dbds_ce_stop;
1698 static bool
1699 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1700 {
1701 return (bb != (const_basic_block) stop
1702 && bb != dbds_ce_stop);
1703 }
1704
1705 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1706 returns the state. */
1707
1708 enum bb_dom_status
1709 {
1710 /* BB does not dominate latch of the LOOP. */
1711 DOMST_NONDOMINATING,
1712 /* The LOOP is broken (there is no path from the header to its latch. */
1713 DOMST_LOOP_BROKEN,
1714 /* BB dominates the latch of the LOOP. */
1715 DOMST_DOMINATING
1716 };
1717
1718 static enum bb_dom_status
1719 determine_bb_domination_status (struct loop *loop, basic_block bb)
1720 {
1721 basic_block *bblocks;
1722 unsigned nblocks, i;
1723 bool bb_reachable = false;
1724 edge_iterator ei;
1725 edge e;
1726
1727 /* This function assumes BB is a successor of LOOP->header.
1728 If that is not the case return DOMST_NONDOMINATING which
1729 is always safe. */
1730 {
1731 bool ok = false;
1732
1733 FOR_EACH_EDGE (e, ei, bb->preds)
1734 {
1735 if (e->src == loop->header)
1736 {
1737 ok = true;
1738 break;
1739 }
1740 }
1741
1742 if (!ok)
1743 return DOMST_NONDOMINATING;
1744 }
1745
1746 if (bb == loop->latch)
1747 return DOMST_DOMINATING;
1748
1749 /* Check that BB dominates LOOP->latch, and that it is back-reachable
1750 from it. */
1751
1752 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1753 dbds_ce_stop = loop->header;
1754 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1755 bblocks, loop->num_nodes, bb);
1756 for (i = 0; i < nblocks; i++)
1757 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1758 {
1759 if (e->src == loop->header)
1760 {
1761 free (bblocks);
1762 return DOMST_NONDOMINATING;
1763 }
1764 if (e->src == bb)
1765 bb_reachable = true;
1766 }
1767
1768 free (bblocks);
1769 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1770 }
1771
1772 /* Return true if BB is part of the new pre-header that is created
1773 when threading the latch to DATA. */
1774
1775 static bool
1776 def_split_header_continue_p (const_basic_block bb, const void *data)
1777 {
1778 const_basic_block new_header = (const_basic_block) data;
1779 const struct loop *l;
1780
1781 if (bb == new_header
1782 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
1783 return false;
1784 for (l = bb->loop_father; l; l = loop_outer (l))
1785 if (l == new_header->loop_father)
1786 return true;
1787 return false;
1788 }
1789
1790 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
1791 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1792 to the inside of the loop. */
1793
1794 static bool
1795 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1796 {
1797 basic_block header = loop->header;
1798 edge e, tgt_edge, latch = loop_latch_edge (loop);
1799 edge_iterator ei;
1800 basic_block tgt_bb, atgt_bb;
1801 enum bb_dom_status domst;
1802
1803 /* We have already threaded through headers to exits, so all the threading
1804 requests now are to the inside of the loop. We need to avoid creating
1805 irreducible regions (i.e., loops with more than one entry block), and
1806 also loop with several latch edges, or new subloops of the loop (although
1807 there are cases where it might be appropriate, it is difficult to decide,
1808 and doing it wrongly may confuse other optimizers).
1809
1810 We could handle more general cases here. However, the intention is to
1811 preserve some information about the loop, which is impossible if its
1812 structure changes significantly, in a way that is not well understood.
1813 Thus we only handle few important special cases, in which also updating
1814 of the loop-carried information should be feasible:
1815
1816 1) Propagation of latch edge to a block that dominates the latch block
1817 of a loop. This aims to handle the following idiom:
1818
1819 first = 1;
1820 while (1)
1821 {
1822 if (first)
1823 initialize;
1824 first = 0;
1825 body;
1826 }
1827
1828 After threading the latch edge, this becomes
1829
1830 first = 1;
1831 if (first)
1832 initialize;
1833 while (1)
1834 {
1835 first = 0;
1836 body;
1837 }
1838
1839 The original header of the loop is moved out of it, and we may thread
1840 the remaining edges through it without further constraints.
1841
1842 2) All entry edges are propagated to a single basic block that dominates
1843 the latch block of the loop. This aims to handle the following idiom
1844 (normally created for "for" loops):
1845
1846 i = 0;
1847 while (1)
1848 {
1849 if (i >= 100)
1850 break;
1851 body;
1852 i++;
1853 }
1854
1855 This becomes
1856
1857 i = 0;
1858 while (1)
1859 {
1860 body;
1861 i++;
1862 if (i >= 100)
1863 break;
1864 }
1865 */
1866
1867 /* Threading through the header won't improve the code if the header has just
1868 one successor. */
1869 if (single_succ_p (header))
1870 goto fail;
1871
1872 /* If we threaded the latch using a joiner block, we cancel the
1873 threading opportunity out of an abundance of caution. However,
1874 still allow threading from outside to inside the loop. */
1875 if (latch->aux)
1876 {
1877 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1878 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1879 {
1880 delete_jump_thread_path (path);
1881 latch->aux = NULL;
1882 }
1883 }
1884
1885 if (latch->aux)
1886 {
1887 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1888 tgt_edge = (*path)[1]->e;
1889 tgt_bb = tgt_edge->dest;
1890 }
1891 else if (!may_peel_loop_headers
1892 && !redirection_block_p (loop->header))
1893 goto fail;
1894 else
1895 {
1896 tgt_bb = NULL;
1897 tgt_edge = NULL;
1898 FOR_EACH_EDGE (e, ei, header->preds)
1899 {
1900 if (!e->aux)
1901 {
1902 if (e == latch)
1903 continue;
1904
1905 /* If latch is not threaded, and there is a header
1906 edge that is not threaded, we would create loop
1907 with multiple entries. */
1908 goto fail;
1909 }
1910
1911 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1912
1913 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1914 goto fail;
1915 tgt_edge = (*path)[1]->e;
1916 atgt_bb = tgt_edge->dest;
1917 if (!tgt_bb)
1918 tgt_bb = atgt_bb;
1919 /* Two targets of threading would make us create loop
1920 with multiple entries. */
1921 else if (tgt_bb != atgt_bb)
1922 goto fail;
1923 }
1924
1925 if (!tgt_bb)
1926 {
1927 /* There are no threading requests. */
1928 return false;
1929 }
1930
1931 /* Redirecting to empty loop latch is useless. */
1932 if (tgt_bb == loop->latch
1933 && empty_block_p (loop->latch))
1934 goto fail;
1935 }
1936
1937 /* The target block must dominate the loop latch, otherwise we would be
1938 creating a subloop. */
1939 domst = determine_bb_domination_status (loop, tgt_bb);
1940 if (domst == DOMST_NONDOMINATING)
1941 goto fail;
1942 if (domst == DOMST_LOOP_BROKEN)
1943 {
1944 /* If the loop ceased to exist, mark it as such, and thread through its
1945 original header. */
1946 mark_loop_for_removal (loop);
1947 return thread_block (header, false);
1948 }
1949
1950 if (tgt_bb->loop_father->header == tgt_bb)
1951 {
1952 /* If the target of the threading is a header of a subloop, we need
1953 to create a preheader for it, so that the headers of the two loops
1954 do not merge. */
1955 if (EDGE_COUNT (tgt_bb->preds) > 2)
1956 {
1957 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1958 gcc_assert (tgt_bb != NULL);
1959 }
1960 else
1961 tgt_bb = split_edge (tgt_edge);
1962 }
1963
1964 if (latch->aux)
1965 {
1966 basic_block *bblocks;
1967 unsigned nblocks, i;
1968
1969 /* First handle the case latch edge is redirected. We are copying
1970 the loop header but not creating a multiple entry loop. Make the
1971 cfg manipulation code aware of that fact. */
1972 set_loop_copy (loop, loop);
1973 loop->latch = thread_single_edge (latch);
1974 set_loop_copy (loop, NULL);
1975 gcc_assert (single_succ (loop->latch) == tgt_bb);
1976 loop->header = tgt_bb;
1977
1978 /* Remove the new pre-header blocks from our loop. */
1979 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1980 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1981 bblocks, loop->num_nodes, tgt_bb);
1982 for (i = 0; i < nblocks; i++)
1983 if (bblocks[i]->loop_father == loop)
1984 {
1985 remove_bb_from_loops (bblocks[i]);
1986 add_bb_to_loop (bblocks[i], loop_outer (loop));
1987 }
1988 free (bblocks);
1989
1990 /* If the new header has multiple latches mark it so. */
1991 FOR_EACH_EDGE (e, ei, loop->header->preds)
1992 if (e->src->loop_father == loop
1993 && e->src != loop->latch)
1994 {
1995 loop->latch = NULL;
1996 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1997 }
1998
1999 /* Cancel remaining threading requests that would make the
2000 loop a multiple entry loop. */
2001 FOR_EACH_EDGE (e, ei, header->preds)
2002 {
2003 edge e2;
2004
2005 if (e->aux == NULL)
2006 continue;
2007
2008 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2009 e2 = path->last ()->e;
2010
2011 if (e->src->loop_father != e2->dest->loop_father
2012 && e2->dest != loop->header)
2013 {
2014 delete_jump_thread_path (path);
2015 e->aux = NULL;
2016 }
2017 }
2018
2019 /* Thread the remaining edges through the former header. */
2020 thread_block (header, false);
2021 }
2022 else
2023 {
2024 basic_block new_preheader;
2025
2026 /* Now consider the case entry edges are redirected to the new entry
2027 block. Remember one entry edge, so that we can find the new
2028 preheader (its destination after threading). */
2029 FOR_EACH_EDGE (e, ei, header->preds)
2030 {
2031 if (e->aux)
2032 break;
2033 }
2034
2035 /* The duplicate of the header is the new preheader of the loop. Ensure
2036 that it is placed correctly in the loop hierarchy. */
2037 set_loop_copy (loop, loop_outer (loop));
2038
2039 thread_block (header, false);
2040 set_loop_copy (loop, NULL);
2041 new_preheader = e->dest;
2042
2043 /* Create the new latch block. This is always necessary, as the latch
2044 must have only a single successor, but the original header had at
2045 least two successors. */
2046 loop->latch = NULL;
2047 mfb_kj_edge = single_succ_edge (new_preheader);
2048 loop->header = mfb_kj_edge->dest;
2049 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
2050 loop->header = latch->dest;
2051 loop->latch = latch->src;
2052 }
2053
2054 return true;
2055
2056 fail:
2057 /* We failed to thread anything. Cancel the requests. */
2058 FOR_EACH_EDGE (e, ei, header->preds)
2059 {
2060 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2061
2062 if (path)
2063 {
2064 delete_jump_thread_path (path);
2065 e->aux = NULL;
2066 }
2067 }
2068 return false;
2069 }
2070
2071 /* E1 and E2 are edges into the same basic block. Return TRUE if the
2072 PHI arguments associated with those edges are equal or there are no
2073 PHI arguments, otherwise return FALSE. */
2074
2075 static bool
2076 phi_args_equal_on_edges (edge e1, edge e2)
2077 {
2078 gphi_iterator gsi;
2079 int indx1 = e1->dest_idx;
2080 int indx2 = e2->dest_idx;
2081
2082 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
2083 {
2084 gphi *phi = gsi.phi ();
2085
2086 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
2087 gimple_phi_arg_def (phi, indx2), 0))
2088 return false;
2089 }
2090 return true;
2091 }
2092
2093 /* Walk through the registered jump threads and convert them into a
2094 form convenient for this pass.
2095
2096 Any block which has incoming edges threaded to outgoing edges
2097 will have its entry in THREADED_BLOCK set.
2098
2099 Any threaded edge will have its new outgoing edge stored in the
2100 original edge's AUX field.
2101
2102 This form avoids the need to walk all the edges in the CFG to
2103 discover blocks which need processing and avoids unnecessary
2104 hash table lookups to map from threaded edge to new target. */
2105
2106 static void
2107 mark_threaded_blocks (bitmap threaded_blocks)
2108 {
2109 unsigned int i;
2110 bitmap_iterator bi;
2111 bitmap tmp = BITMAP_ALLOC (NULL);
2112 basic_block bb;
2113 edge e;
2114 edge_iterator ei;
2115
2116 /* It is possible to have jump threads in which one is a subpath
2117 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
2118 block and (B, C), (C, D) where no joiner block exists.
2119
2120 When this occurs ignore the jump thread request with the joiner
2121 block. It's totally subsumed by the simpler jump thread request.
2122
2123 This results in less block copying, simpler CFGs. More importantly,
2124 when we duplicate the joiner block, B, in this case we will create
2125 a new threading opportunity that we wouldn't be able to optimize
2126 until the next jump threading iteration.
2127
2128 So first convert the jump thread requests which do not require a
2129 joiner block. */
2130 for (i = 0; i < paths.length (); i++)
2131 {
2132 vec<jump_thread_edge *> *path = paths[i];
2133
2134 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
2135 {
2136 edge e = (*path)[0]->e;
2137 e->aux = (void *)path;
2138 bitmap_set_bit (tmp, e->dest->index);
2139 }
2140 }
2141
2142 /* Now iterate again, converting cases where we want to thread
2143 through a joiner block, but only if no other edge on the path
2144 already has a jump thread attached to it. We do this in two passes,
2145 to avoid situations where the order in the paths vec can hide overlapping
2146 threads (the path is recorded on the incoming edge, so we would miss
2147 cases where the second path starts at a downstream edge on the same
2148 path). First record all joiner paths, deleting any in the unexpected
2149 case where there is already a path for that incoming edge. */
2150 for (i = 0; i < paths.length ();)
2151 {
2152 vec<jump_thread_edge *> *path = paths[i];
2153
2154 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
2155 {
2156 /* Attach the path to the starting edge if none is yet recorded. */
2157 if ((*path)[0]->e->aux == NULL)
2158 {
2159 (*path)[0]->e->aux = path;
2160 i++;
2161 }
2162 else
2163 {
2164 paths.unordered_remove (i);
2165 if (dump_file && (dump_flags & TDF_DETAILS))
2166 dump_jump_thread_path (dump_file, *path, false);
2167 delete_jump_thread_path (path);
2168 }
2169 }
2170 else
2171 {
2172 i++;
2173 }
2174 }
2175
2176 /* Second, look for paths that have any other jump thread attached to
2177 them, and either finish converting them or cancel them. */
2178 for (i = 0; i < paths.length ();)
2179 {
2180 vec<jump_thread_edge *> *path = paths[i];
2181 edge e = (*path)[0]->e;
2182
2183 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
2184 {
2185 unsigned int j;
2186 for (j = 1; j < path->length (); j++)
2187 if ((*path)[j]->e->aux != NULL)
2188 break;
2189
2190 /* If we iterated through the entire path without exiting the loop,
2191 then we are good to go, record it. */
2192 if (j == path->length ())
2193 {
2194 bitmap_set_bit (tmp, e->dest->index);
2195 i++;
2196 }
2197 else
2198 {
2199 e->aux = NULL;
2200 paths.unordered_remove (i);
2201 if (dump_file && (dump_flags & TDF_DETAILS))
2202 dump_jump_thread_path (dump_file, *path, false);
2203 delete_jump_thread_path (path);
2204 }
2205 }
2206 else
2207 {
2208 i++;
2209 }
2210 }
2211
2212 /* If optimizing for size, only thread through block if we don't have
2213 to duplicate it or it's an otherwise empty redirection block. */
2214 if (optimize_function_for_size_p (cfun))
2215 {
2216 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2217 {
2218 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2219 if (EDGE_COUNT (bb->preds) > 1
2220 && !redirection_block_p (bb))
2221 {
2222 FOR_EACH_EDGE (e, ei, bb->preds)
2223 {
2224 if (e->aux)
2225 {
2226 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2227 delete_jump_thread_path (path);
2228 e->aux = NULL;
2229 }
2230 }
2231 }
2232 else
2233 bitmap_set_bit (threaded_blocks, i);
2234 }
2235 }
2236 else
2237 bitmap_copy (threaded_blocks, tmp);
2238
2239 /* Look for jump threading paths which cross multiple loop headers.
2240
2241 The code to thread through loop headers will change the CFG in ways
2242 that break assumptions made by the loop optimization code.
2243
2244 We don't want to blindly cancel the requests. We can instead do better
2245 by trimming off the end of the jump thread path. */
2246 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2247 {
2248 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2249 FOR_EACH_EDGE (e, ei, bb->preds)
2250 {
2251 if (e->aux)
2252 {
2253 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2254
2255 for (unsigned int i = 0, crossed_headers = 0;
2256 i < path->length ();
2257 i++)
2258 {
2259 basic_block dest = (*path)[i]->e->dest;
2260 crossed_headers += (dest == dest->loop_father->header);
2261 if (crossed_headers > 1)
2262 {
2263 /* Trim from entry I onwards. */
2264 for (unsigned int j = i; j < path->length (); j++)
2265 delete (*path)[j];
2266 path->truncate (i);
2267
2268 /* Now that we've truncated the path, make sure
2269 what's left is still valid. We need at least
2270 two edges on the path and the last edge can not
2271 be a joiner. This should never happen, but let's
2272 be safe. */
2273 if (path->length () < 2
2274 || (path->last ()->type
2275 == EDGE_COPY_SRC_JOINER_BLOCK))
2276 {
2277 delete_jump_thread_path (path);
2278 e->aux = NULL;
2279 }
2280 break;
2281 }
2282 }
2283 }
2284 }
2285 }
2286
2287 /* If we have a joiner block (J) which has two successors S1 and S2 and
2288 we are threading though S1 and the final destination of the thread
2289 is S2, then we must verify that any PHI nodes in S2 have the same
2290 PHI arguments for the edge J->S2 and J->S1->...->S2.
2291
2292 We used to detect this prior to registering the jump thread, but
2293 that prohibits propagation of edge equivalences into non-dominated
2294 PHI nodes as the equivalency test might occur before propagation.
2295
2296 This must also occur after we truncate any jump threading paths
2297 as this scenario may only show up after truncation.
2298
2299 This works for now, but will need improvement as part of the FSA
2300 optimization.
2301
2302 Note since we've moved the thread request data to the edges,
2303 we have to iterate on those rather than the threaded_edges vector. */
2304 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
2305 {
2306 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2307 FOR_EACH_EDGE (e, ei, bb->preds)
2308 {
2309 if (e->aux)
2310 {
2311 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2312 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
2313
2314 if (have_joiner)
2315 {
2316 basic_block joiner = e->dest;
2317 edge final_edge = path->last ()->e;
2318 basic_block final_dest = final_edge->dest;
2319 edge e2 = find_edge (joiner, final_dest);
2320
2321 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
2322 {
2323 delete_jump_thread_path (path);
2324 e->aux = NULL;
2325 }
2326 }
2327 }
2328 }
2329 }
2330
2331 BITMAP_FREE (tmp);
2332 }
2333
2334
2335 /* Return TRUE if BB ends with a switch statement or a computed goto.
2336 Otherwise return false. */
2337 static bool
2338 bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED)
2339 {
2340 gimple *stmt = last_stmt (bb);
2341 if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
2342 return true;
2343 if (stmt && gimple_code (stmt) == GIMPLE_GOTO
2344 && TREE_CODE (gimple_goto_dest (stmt)) == SSA_NAME)
2345 return true;
2346 return false;
2347 }
2348
2349 /* Verify that the REGION is a valid jump thread. A jump thread is a special
2350 case of SEME Single Entry Multiple Exits region in which all nodes in the
2351 REGION have exactly one incoming edge. The only exception is the first block
2352 that may not have been connected to the rest of the cfg yet. */
2353
2354 DEBUG_FUNCTION void
2355 verify_jump_thread (basic_block *region, unsigned n_region)
2356 {
2357 for (unsigned i = 0; i < n_region; i++)
2358 gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2359 }
2360
2361 /* Return true when BB is one of the first N items in BBS. */
2362
2363 static inline bool
2364 bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2365 {
2366 for (int i = 0; i < n; i++)
2367 if (bb == bbs[i])
2368 return true;
2369
2370 return false;
2371 }
2372
2373 /* Duplicates a jump-thread path of N_REGION basic blocks.
2374 The ENTRY edge is redirected to the duplicate of the region.
2375
2376 Remove the last conditional statement in the last basic block in the REGION,
2377 and create a single fallthru edge pointing to the same destination as the
2378 EXIT edge.
2379
2380 The new basic blocks are stored to REGION_COPY in the same order as they had
2381 in REGION, provided that REGION_COPY is not NULL.
2382
2383 Returns false if it is unable to copy the region, true otherwise. */
2384
2385 static bool
2386 duplicate_thread_path (edge entry, edge exit,
2387 basic_block *region, unsigned n_region,
2388 basic_block *region_copy)
2389 {
2390 unsigned i;
2391 bool free_region_copy = false;
2392 struct loop *loop = entry->dest->loop_father;
2393 edge exit_copy;
2394 edge redirected;
2395 int total_freq = 0, entry_freq = 0;
2396 gcov_type total_count = 0, entry_count = 0;
2397
2398 if (!can_copy_bbs_p (region, n_region))
2399 return false;
2400
2401 /* Some sanity checking. Note that we do not check for all possible
2402 missuses of the functions. I.e. if you ask to copy something weird,
2403 it will work, but the state of structures probably will not be
2404 correct. */
2405 for (i = 0; i < n_region; i++)
2406 {
2407 /* We do not handle subloops, i.e. all the blocks must belong to the
2408 same loop. */
2409 if (region[i]->loop_father != loop)
2410 return false;
2411 }
2412
2413 initialize_original_copy_tables ();
2414
2415 set_loop_copy (loop, loop);
2416
2417 if (!region_copy)
2418 {
2419 region_copy = XNEWVEC (basic_block, n_region);
2420 free_region_copy = true;
2421 }
2422
2423 if (entry->dest->count)
2424 {
2425 total_count = entry->dest->count;
2426 entry_count = entry->count;
2427 /* Fix up corner cases, to avoid division by zero or creation of negative
2428 frequencies. */
2429 if (entry_count > total_count)
2430 entry_count = total_count;
2431 }
2432 else
2433 {
2434 total_freq = entry->dest->frequency;
2435 entry_freq = EDGE_FREQUENCY (entry);
2436 /* Fix up corner cases, to avoid division by zero or creation of negative
2437 frequencies. */
2438 if (total_freq == 0)
2439 total_freq = 1;
2440 else if (entry_freq > total_freq)
2441 entry_freq = total_freq;
2442 }
2443
2444 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
2445 split_edge_bb_loc (entry), false);
2446
2447 /* Fix up: copy_bbs redirects all edges pointing to copied blocks. The
2448 following code ensures that all the edges exiting the jump-thread path are
2449 redirected back to the original code: these edges are exceptions
2450 invalidating the property that is propagated by executing all the blocks of
2451 the jump-thread path in order. */
2452
2453 for (i = 0; i < n_region; i++)
2454 {
2455 edge e;
2456 edge_iterator ei;
2457 basic_block bb = region_copy[i];
2458
2459 if (single_succ_p (bb))
2460 {
2461 /* Make sure the successor is the next node in the path. */
2462 gcc_assert (i + 1 == n_region
2463 || region_copy[i + 1] == single_succ_edge (bb)->dest);
2464 continue;
2465 }
2466
2467 /* Special case the last block on the path: make sure that it does not
2468 jump back on the copied path. */
2469 if (i + 1 == n_region)
2470 {
2471 FOR_EACH_EDGE (e, ei, bb->succs)
2472 if (bb_in_bbs (e->dest, region_copy, n_region - 1))
2473 {
2474 basic_block orig = get_bb_original (e->dest);
2475 if (orig)
2476 redirect_edge_and_branch_force (e, orig);
2477 }
2478 continue;
2479 }
2480
2481 /* Redirect all other edges jumping to non-adjacent blocks back to the
2482 original code. */
2483 FOR_EACH_EDGE (e, ei, bb->succs)
2484 if (region_copy[i + 1] != e->dest)
2485 {
2486 basic_block orig = get_bb_original (e->dest);
2487 if (orig)
2488 redirect_edge_and_branch_force (e, orig);
2489 }
2490 }
2491
2492 if (total_count)
2493 {
2494 scale_bbs_frequencies_gcov_type (region, n_region,
2495 total_count - entry_count,
2496 total_count);
2497 scale_bbs_frequencies_gcov_type (region_copy, n_region, entry_count,
2498 total_count);
2499 }
2500 else
2501 {
2502 scale_bbs_frequencies_int (region, n_region, total_freq - entry_freq,
2503 total_freq);
2504 scale_bbs_frequencies_int (region_copy, n_region, entry_freq, total_freq);
2505 }
2506
2507 if (flag_checking)
2508 verify_jump_thread (region_copy, n_region);
2509
2510 /* Remove the last branch in the jump thread path. */
2511 remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
2512
2513 /* And fixup the flags on the single remaining edge. */
2514 edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2515 fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2516 fix_e->flags |= EDGE_FALLTHRU;
2517
2518 edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2519
2520 if (e) {
2521 rescan_loop_exit (e, true, false);
2522 e->probability = REG_BR_PROB_BASE;
2523 e->count = region_copy[n_region - 1]->count;
2524 }
2525
2526 /* Redirect the entry and add the phi node arguments. */
2527 if (entry->dest == loop->header)
2528 mark_loop_for_removal (loop);
2529 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2530 gcc_assert (redirected != NULL);
2531 flush_pending_stmts (entry);
2532
2533 /* Add the other PHI node arguments. */
2534 add_phi_args_after_copy (region_copy, n_region, NULL);
2535
2536 if (free_region_copy)
2537 free (region_copy);
2538
2539 free_original_copy_tables ();
2540 return true;
2541 }
2542
2543 /* Return true when PATH is a valid jump-thread path. */
2544
2545 static bool
2546 valid_jump_thread_path (vec<jump_thread_edge *> *path)
2547 {
2548 unsigned len = path->length ();
2549 bool multiway_branch = false;
2550
2551 /* Check that the path is connected and see if there's a multi-way
2552 branch on the path. */
2553 for (unsigned int j = 0; j < len - 1; j++)
2554 {
2555 if ((*path)[j]->e->dest != (*path)[j+1]->e->src)
2556 return false;
2557 gimple *last = last_stmt ((*path)[j]->e->dest);
2558 multiway_branch |= (last && gimple_code (last) == GIMPLE_SWITCH);
2559 }
2560
2561 /* If we are trying to thread the loop latch to a block that does
2562 not dominate the loop latch, then that will create an irreducible
2563 loop. We avoid that unless the jump thread has a multi-way
2564 branch, in which case we have deemed it worth losing other
2565 loop optimizations later if we can eliminate the multi-way branch. */
2566 edge e = (*path)[0]->e;
2567 struct loop *loop = e->dest->loop_father;
2568 if (!multiway_branch
2569 && loop->latch
2570 && loop_latch_edge (loop) == e
2571 && (determine_bb_domination_status (loop, path->last ()->e->dest)
2572 == DOMST_NONDOMINATING))
2573 return false;
2574
2575 return true;
2576 }
2577
2578 /* Remove any queued jump threads that include edge E.
2579
2580 We don't actually remove them here, just record the edges into ax
2581 hash table. That way we can do the search once per iteration of
2582 DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
2583
2584 void
2585 remove_jump_threads_including (edge_def *e)
2586 {
2587 if (!paths.exists ())
2588 return;
2589
2590 if (!removed_edges)
2591 removed_edges = new hash_table<struct removed_edges> (17);
2592
2593 edge *slot = removed_edges->find_slot (e, INSERT);
2594 *slot = e;
2595 }
2596
2597 /* Walk through all blocks and thread incoming edges to the appropriate
2598 outgoing edge for each edge pair recorded in THREADED_EDGES.
2599
2600 It is the caller's responsibility to fix the dominance information
2601 and rewrite duplicated SSA_NAMEs back into SSA form.
2602
2603 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2604 loop headers if it does not simplify the loop.
2605
2606 Returns true if one or more edges were threaded, false otherwise. */
2607
2608 bool
2609 thread_through_all_blocks (bool may_peel_loop_headers)
2610 {
2611 bool retval = false;
2612 unsigned int i;
2613 bitmap_iterator bi;
2614 bitmap threaded_blocks;
2615 struct loop *loop;
2616
2617 if (!paths.exists ())
2618 {
2619 retval = false;
2620 goto out;
2621 }
2622
2623 threaded_blocks = BITMAP_ALLOC (NULL);
2624 memset (&thread_stats, 0, sizeof (thread_stats));
2625
2626 /* Remove any paths that referenced removed edges. */
2627 if (removed_edges)
2628 for (i = 0; i < paths.length (); )
2629 {
2630 unsigned int j;
2631 vec<jump_thread_edge *> *path = paths[i];
2632
2633 for (j = 0; j < path->length (); j++)
2634 {
2635 edge e = (*path)[j]->e;
2636 if (removed_edges->find_slot (e, NO_INSERT))
2637 break;
2638 }
2639
2640 if (j != path->length ())
2641 {
2642 delete_jump_thread_path (path);
2643 paths.unordered_remove (i);
2644 continue;
2645 }
2646 i++;
2647 }
2648
2649 /* Jump-thread all FSM threads before other jump-threads. */
2650 for (i = 0; i < paths.length ();)
2651 {
2652 vec<jump_thread_edge *> *path = paths[i];
2653 edge entry = (*path)[0]->e;
2654
2655 /* Only code-generate FSM jump-threads in this loop. */
2656 if ((*path)[0]->type != EDGE_FSM_THREAD)
2657 {
2658 i++;
2659 continue;
2660 }
2661
2662 /* Do not jump-thread twice from the same block. */
2663 if (bitmap_bit_p (threaded_blocks, entry->src->index)
2664 /* Verify that the jump thread path is still valid: a
2665 previous jump-thread may have changed the CFG, and
2666 invalidated the current path or the requested jump
2667 thread might create irreducible loops which should
2668 generally be avoided. */
2669 || !valid_jump_thread_path (path))
2670 {
2671 /* Remove invalid FSM jump-thread paths. */
2672 delete_jump_thread_path (path);
2673 paths.unordered_remove (i);
2674 continue;
2675 }
2676
2677 unsigned len = path->length ();
2678 edge exit = (*path)[len - 1]->e;
2679 basic_block *region = XNEWVEC (basic_block, len - 1);
2680
2681 for (unsigned int j = 0; j < len - 1; j++)
2682 region[j] = (*path)[j]->e->dest;
2683
2684 if (duplicate_thread_path (entry, exit, region, len - 1, NULL))
2685 {
2686 /* We do not update dominance info. */
2687 free_dominance_info (CDI_DOMINATORS);
2688 bitmap_set_bit (threaded_blocks, entry->src->index);
2689 retval = true;
2690 thread_stats.num_threaded_edges++;
2691 }
2692
2693 delete_jump_thread_path (path);
2694 paths.unordered_remove (i);
2695 }
2696
2697 /* Remove from PATHS all the jump-threads starting with an edge already
2698 jump-threaded. */
2699 for (i = 0; i < paths.length ();)
2700 {
2701 vec<jump_thread_edge *> *path = paths[i];
2702 edge entry = (*path)[0]->e;
2703
2704 /* Do not jump-thread twice from the same block. */
2705 if (bitmap_bit_p (threaded_blocks, entry->src->index))
2706 {
2707 delete_jump_thread_path (path);
2708 paths.unordered_remove (i);
2709 }
2710 else
2711 i++;
2712 }
2713
2714 bitmap_clear (threaded_blocks);
2715
2716 mark_threaded_blocks (threaded_blocks);
2717
2718 initialize_original_copy_tables ();
2719
2720 /* First perform the threading requests that do not affect
2721 loop structure. */
2722 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
2723 {
2724 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2725
2726 if (EDGE_COUNT (bb->preds) > 0)
2727 retval |= thread_block (bb, true);
2728 }
2729
2730 /* Then perform the threading through loop headers. We start with the
2731 innermost loop, so that the changes in cfg we perform won't affect
2732 further threading. */
2733 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2734 {
2735 if (!loop->header
2736 || !bitmap_bit_p (threaded_blocks, loop->header->index))
2737 continue;
2738
2739 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
2740 }
2741
2742 /* Any jump threading paths that are still attached to edges at this
2743 point must be one of two cases.
2744
2745 First, we could have a jump threading path which went from outside
2746 a loop to inside a loop that was ignored because a prior jump thread
2747 across a backedge was realized (which indirectly causes the loop
2748 above to ignore the latter thread). We can detect these because the
2749 loop structures will be different and we do not currently try to
2750 optimize this case.
2751
2752 Second, we could be threading across a backedge to a point within the
2753 same loop. This occurrs for the FSA/FSM optimization and we would
2754 like to optimize it. However, we have to be very careful as this
2755 may completely scramble the loop structures, with the result being
2756 irreducible loops causing us to throw away our loop structure.
2757
2758 As a compromise for the latter case, if the thread path ends in
2759 a block where the last statement is a multiway branch, then go
2760 ahead and thread it, else ignore it. */
2761 basic_block bb;
2762 edge e;
2763 FOR_EACH_BB_FN (bb, cfun)
2764 {
2765 /* If we do end up threading here, we can remove elements from
2766 BB->preds. Thus we can not use the FOR_EACH_EDGE iterator. */
2767 for (edge_iterator ei = ei_start (bb->preds);
2768 (e = ei_safe_edge (ei));)
2769 if (e->aux)
2770 {
2771 vec<jump_thread_edge *> *path = THREAD_PATH (e);
2772
2773 /* Case 1, threading from outside to inside the loop
2774 after we'd already threaded through the header. */
2775 if ((*path)[0]->e->dest->loop_father
2776 != path->last ()->e->src->loop_father)
2777 {
2778 delete_jump_thread_path (path);
2779 e->aux = NULL;
2780 ei_next (&ei);
2781 }
2782 else if (bb_ends_with_multiway_branch (path->last ()->e->src))
2783 {
2784 /* The code to thread through loop headers may have
2785 split a block with jump threads attached to it.
2786
2787 We can identify this with a disjoint jump threading
2788 path. If found, just remove it. */
2789 for (unsigned int i = 0; i < path->length () - 1; i++)
2790 if ((*path)[i]->e->dest != (*path)[i + 1]->e->src)
2791 {
2792 delete_jump_thread_path (path);
2793 e->aux = NULL;
2794 ei_next (&ei);
2795 break;
2796 }
2797
2798 /* Our path is still valid, thread it. */
2799 if (e->aux)
2800 {
2801 if (thread_block ((*path)[0]->e->dest, false))
2802 e->aux = NULL;
2803 else
2804 {
2805 delete_jump_thread_path (path);
2806 e->aux = NULL;
2807 ei_next (&ei);
2808 }
2809 }
2810 }
2811 else
2812 {
2813 delete_jump_thread_path (path);
2814 e->aux = NULL;
2815 ei_next (&ei);
2816 }
2817 }
2818 else
2819 ei_next (&ei);
2820 }
2821
2822 statistics_counter_event (cfun, "Jumps threaded",
2823 thread_stats.num_threaded_edges);
2824
2825 free_original_copy_tables ();
2826
2827 BITMAP_FREE (threaded_blocks);
2828 threaded_blocks = NULL;
2829 paths.release ();
2830
2831 if (retval)
2832 loops_state_set (LOOPS_NEED_FIXUP);
2833
2834 out:
2835 delete removed_edges;
2836 removed_edges = NULL;
2837 return retval;
2838 }
2839
2840 /* Delete the jump threading path PATH. We have to explcitly delete
2841 each entry in the vector, then the container. */
2842
2843 void
2844 delete_jump_thread_path (vec<jump_thread_edge *> *path)
2845 {
2846 for (unsigned int i = 0; i < path->length (); i++)
2847 delete (*path)[i];
2848 path->release();
2849 delete path;
2850 }
2851
2852 /* Register a jump threading opportunity. We queue up all the jump
2853 threading opportunities discovered by a pass and update the CFG
2854 and SSA form all at once.
2855
2856 E is the edge we can thread, E2 is the new target edge, i.e., we
2857 are effectively recording that E->dest can be changed to E2->dest
2858 after fixing the SSA graph. */
2859
2860 void
2861 register_jump_thread (vec<jump_thread_edge *> *path)
2862 {
2863 if (!dbg_cnt (registered_jump_thread))
2864 {
2865 delete_jump_thread_path (path);
2866 return;
2867 }
2868
2869 /* First make sure there are no NULL outgoing edges on the jump threading
2870 path. That can happen for jumping to a constant address. */
2871 for (unsigned int i = 0; i < path->length (); i++)
2872 if ((*path)[i]->e == NULL)
2873 {
2874 if (dump_file && (dump_flags & TDF_DETAILS))
2875 {
2876 fprintf (dump_file,
2877 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
2878 dump_jump_thread_path (dump_file, *path, false);
2879 }
2880
2881 delete_jump_thread_path (path);
2882 return;
2883 }
2884
2885 if (dump_file && (dump_flags & TDF_DETAILS))
2886 dump_jump_thread_path (dump_file, *path, true);
2887
2888 if (!paths.exists ())
2889 paths.create (5);
2890
2891 paths.safe_push (path);
2892 }