tree-ssa-threadupdate.c: Do not include "tm.h" or "tm_p.h".
[gcc.git] / gcc / tree-ssa-threadupdate.c
1 /* Thread edges through blocks and update the control flow and SSA graphs.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "flags.h"
25 #include "basic-block.h"
26 #include "function.h"
27 #include "tree-ssa.h"
28 #include "tree-ssa-threadupdate.h"
29 #include "dumpfile.h"
30 #include "cfgloop.h"
31 #include "hash-table.h"
32 #include "dbgcnt.h"
33
34 /* Given a block B, update the CFG and SSA graph to reflect redirecting
35 one or more in-edges to B to instead reach the destination of an
36 out-edge from B while preserving any side effects in B.
37
38 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
39 side effects of executing B.
40
41 1. Make a copy of B (including its outgoing edges and statements). Call
42 the copy B'. Note B' has no incoming edges or PHIs at this time.
43
44 2. Remove the control statement at the end of B' and all outgoing edges
45 except B'->C.
46
47 3. Add a new argument to each PHI in C with the same value as the existing
48 argument associated with edge B->C. Associate the new PHI arguments
49 with the edge B'->C.
50
51 4. For each PHI in B, find or create a PHI in B' with an identical
52 PHI_RESULT. Add an argument to the PHI in B' which has the same
53 value as the PHI in B associated with the edge A->B. Associate
54 the new argument in the PHI in B' with the edge A->B.
55
56 5. Change the edge A->B to A->B'.
57
58 5a. This automatically deletes any PHI arguments associated with the
59 edge A->B in B.
60
61 5b. This automatically associates each new argument added in step 4
62 with the edge A->B'.
63
64 6. Repeat for other incoming edges into B.
65
66 7. Put the duplicated resources in B and all the B' blocks into SSA form.
67
68 Note that block duplication can be minimized by first collecting the
69 set of unique destination blocks that the incoming edges should
70 be threaded to.
71
72 Block duplication can be further minimized by using B instead of
73 creating B' for one destination if all edges into B are going to be
74 threaded to a successor of B. We had code to do this at one time, but
75 I'm not convinced it is correct with the changes to avoid mucking up
76 the loop structure (which may cancel threading requests, thus a block
77 which we thought was going to become unreachable may still be reachable).
78 This code was also going to get ugly with the introduction of the ability
79 for a single jump thread request to bypass multiple blocks.
80
81 We further reduce the number of edges and statements we create by
82 not copying all the outgoing edges and the control statement in
83 step #1. We instead create a template block without the outgoing
84 edges and duplicate the template. */
85
86
87 /* Steps #5 and #6 of the above algorithm are best implemented by walking
88 all the incoming edges which thread to the same destination edge at
89 the same time. That avoids lots of table lookups to get information
90 for the destination edge.
91
92 To realize that implementation we create a list of incoming edges
93 which thread to the same outgoing edge. Thus to implement steps
94 #5 and #6 we traverse our hash table of outgoing edge information.
95 For each entry we walk the list of incoming edges which thread to
96 the current outgoing edge. */
97
98 struct el
99 {
100 edge e;
101 struct el *next;
102 };
103
104 /* Main data structure recording information regarding B's duplicate
105 blocks. */
106
107 /* We need to efficiently record the unique thread destinations of this
108 block and specific information associated with those destinations. We
109 may have many incoming edges threaded to the same outgoing edge. This
110 can be naturally implemented with a hash table. */
111
112 struct redirection_data : typed_free_remove<redirection_data>
113 {
114 /* A duplicate of B with the trailing control statement removed and which
115 targets a single successor of B. */
116 basic_block dup_block;
117
118 /* The jump threading path. */
119 vec<jump_thread_edge *> *path;
120
121 /* A list of incoming edges which we want to thread to the
122 same path. */
123 struct el *incoming_edges;
124
125 /* hash_table support. */
126 typedef redirection_data value_type;
127 typedef redirection_data compare_type;
128 static inline hashval_t hash (const value_type *);
129 static inline int equal (const value_type *, const compare_type *);
130 };
131
132 /* Simple hashing function. For any given incoming edge E, we're going
133 to be most concerned with the final destination of its jump thread
134 path. So hash on the block index of the final edge in the path. */
135
136 inline hashval_t
137 redirection_data::hash (const value_type *p)
138 {
139 vec<jump_thread_edge *> *path = p->path;
140 return path->last ()->e->dest->index;
141 }
142
143 /* Given two hash table entries, return true if they have the same
144 jump threading path. */
145 inline int
146 redirection_data::equal (const value_type *p1, const compare_type *p2)
147 {
148 vec<jump_thread_edge *> *path1 = p1->path;
149 vec<jump_thread_edge *> *path2 = p2->path;
150
151 if (path1->length () != path2->length ())
152 return false;
153
154 for (unsigned int i = 1; i < path1->length (); i++)
155 {
156 if ((*path1)[i]->type != (*path2)[i]->type
157 || (*path1)[i]->e != (*path2)[i]->e)
158 return false;
159 }
160
161 return true;
162 }
163
164 /* Data structure of information to pass to hash table traversal routines. */
165 struct ssa_local_info_t
166 {
167 /* The current block we are working on. */
168 basic_block bb;
169
170 /* A template copy of BB with no outgoing edges or control statement that
171 we use for creating copies. */
172 basic_block template_block;
173
174 /* TRUE if we thread one or more jumps, FALSE otherwise. */
175 bool jumps_threaded;
176 };
177
178 /* Passes which use the jump threading code register jump threading
179 opportunities as they are discovered. We keep the registered
180 jump threading opportunities in this vector as edge pairs
181 (original_edge, target_edge). */
182 static vec<vec<jump_thread_edge *> *> paths;
183
184 /* When we start updating the CFG for threading, data necessary for jump
185 threading is attached to the AUX field for the incoming edge. Use these
186 macros to access the underlying structure attached to the AUX field. */
187 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
188
189 /* Jump threading statistics. */
190
191 struct thread_stats_d
192 {
193 unsigned long num_threaded_edges;
194 };
195
196 struct thread_stats_d thread_stats;
197
198
199 /* Remove the last statement in block BB if it is a control statement
200 Also remove all outgoing edges except the edge which reaches DEST_BB.
201 If DEST_BB is NULL, then remove all outgoing edges. */
202
203 static void
204 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
205 {
206 gimple_stmt_iterator gsi;
207 edge e;
208 edge_iterator ei;
209
210 gsi = gsi_last_bb (bb);
211
212 /* If the duplicate ends with a control statement, then remove it.
213
214 Note that if we are duplicating the template block rather than the
215 original basic block, then the duplicate might not have any real
216 statements in it. */
217 if (!gsi_end_p (gsi)
218 && gsi_stmt (gsi)
219 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
220 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
221 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
222 gsi_remove (&gsi, true);
223
224 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
225 {
226 if (e->dest != dest_bb)
227 remove_edge (e);
228 else
229 ei_next (&ei);
230 }
231 }
232
233 /* Create a duplicate of BB. Record the duplicate block in RD. */
234
235 static void
236 create_block_for_threading (basic_block bb, struct redirection_data *rd)
237 {
238 edge_iterator ei;
239 edge e;
240
241 /* We can use the generic block duplication code and simply remove
242 the stuff we do not need. */
243 rd->dup_block = duplicate_block (bb, NULL, NULL);
244
245 FOR_EACH_EDGE (e, ei, rd->dup_block->succs)
246 e->aux = NULL;
247
248 /* Zero out the profile, since the block is unreachable for now. */
249 rd->dup_block->frequency = 0;
250 rd->dup_block->count = 0;
251 }
252
253 /* Main data structure to hold information for duplicates of BB. */
254
255 static hash_table <redirection_data> redirection_data;
256
257 /* Given an outgoing edge E lookup and return its entry in our hash table.
258
259 If INSERT is true, then we insert the entry into the hash table if
260 it is not already present. INCOMING_EDGE is added to the list of incoming
261 edges associated with E in the hash table. */
262
263 static struct redirection_data *
264 lookup_redirection_data (edge e, enum insert_option insert)
265 {
266 struct redirection_data **slot;
267 struct redirection_data *elt;
268 vec<jump_thread_edge *> *path = THREAD_PATH (e);
269
270 /* Build a hash table element so we can see if E is already
271 in the table. */
272 elt = XNEW (struct redirection_data);
273 elt->path = path;
274 elt->dup_block = NULL;
275 elt->incoming_edges = NULL;
276
277 slot = redirection_data.find_slot (elt, insert);
278
279 /* This will only happen if INSERT is false and the entry is not
280 in the hash table. */
281 if (slot == NULL)
282 {
283 free (elt);
284 return NULL;
285 }
286
287 /* This will only happen if E was not in the hash table and
288 INSERT is true. */
289 if (*slot == NULL)
290 {
291 *slot = elt;
292 elt->incoming_edges = XNEW (struct el);
293 elt->incoming_edges->e = e;
294 elt->incoming_edges->next = NULL;
295 return elt;
296 }
297 /* E was in the hash table. */
298 else
299 {
300 /* Free ELT as we do not need it anymore, we will extract the
301 relevant entry from the hash table itself. */
302 free (elt);
303
304 /* Get the entry stored in the hash table. */
305 elt = *slot;
306
307 /* If insertion was requested, then we need to add INCOMING_EDGE
308 to the list of incoming edges associated with E. */
309 if (insert)
310 {
311 struct el *el = XNEW (struct el);
312 el->next = elt->incoming_edges;
313 el->e = e;
314 elt->incoming_edges = el;
315 }
316
317 return elt;
318 }
319 }
320
321 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
322
323 static void
324 copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
325 {
326 gimple_stmt_iterator gsi;
327 int src_indx = src_e->dest_idx;
328
329 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
330 {
331 gimple phi = gsi_stmt (gsi);
332 source_location locus = gimple_phi_arg_location (phi, src_indx);
333 add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
334 }
335 }
336
337 /* We have recently made a copy of ORIG_BB, including its outgoing
338 edges. The copy is NEW_BB. Every PHI node in every direct successor of
339 ORIG_BB has a new argument associated with edge from NEW_BB to the
340 successor. Initialize the PHI argument so that it is equal to the PHI
341 argument associated with the edge from ORIG_BB to the successor. */
342
343 static void
344 update_destination_phis (basic_block orig_bb, basic_block new_bb)
345 {
346 edge_iterator ei;
347 edge e;
348
349 FOR_EACH_EDGE (e, ei, orig_bb->succs)
350 {
351 edge e2 = find_edge (new_bb, e->dest);
352 copy_phi_args (e->dest, e, e2);
353 }
354 }
355
356 /* Given a duplicate block and its single destination (both stored
357 in RD). Create an edge between the duplicate and its single
358 destination.
359
360 Add an additional argument to any PHI nodes at the single
361 destination. */
362
363 static void
364 create_edge_and_update_destination_phis (struct redirection_data *rd,
365 basic_block bb)
366 {
367 edge e = make_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
368
369 rescan_loop_exit (e, true, false);
370 e->probability = REG_BR_PROB_BASE;
371 e->count = bb->count;
372
373 /* We have to copy path -- which means creating a new vector as well
374 as all the jump_thread_edge entries. */
375 if (rd->path->last ()->e->aux)
376 {
377 vec<jump_thread_edge *> *path = THREAD_PATH (rd->path->last ()->e);
378 vec<jump_thread_edge *> *copy = new vec<jump_thread_edge *> ();
379
380 /* Sadly, the elements of the vector are pointers and need to
381 be copied as well. */
382 for (unsigned int i = 0; i < path->length (); i++)
383 {
384 jump_thread_edge *x
385 = new jump_thread_edge ((*path)[i]->e, (*path)[i]->type);
386 copy->safe_push (x);
387 }
388 e->aux = (void *)copy;
389 }
390 else
391 {
392 e->aux = NULL;
393 }
394
395 /* If there are any PHI nodes at the destination of the outgoing edge
396 from the duplicate block, then we will need to add a new argument
397 to them. The argument should have the same value as the argument
398 associated with the outgoing edge stored in RD. */
399 copy_phi_args (e->dest, rd->path->last ()->e, e);
400 }
401
402 /* Wire up the outgoing edges from the duplicate block and
403 update any PHIs as needed. */
404 void
405 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
406 ssa_local_info_t *local_info)
407 {
408 edge e = rd->incoming_edges->e;
409 vec<jump_thread_edge *> *path = THREAD_PATH (e);
410
411 /* If we were threading through an joiner block, then we want
412 to keep its control statement and redirect an outgoing edge.
413 Else we want to remove the control statement & edges, then create
414 a new outgoing edge. In both cases we may need to update PHIs. */
415 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
416 {
417 edge victim;
418 edge e2;
419
420 /* This updates the PHIs at the destination of the duplicate
421 block. */
422 update_destination_phis (local_info->bb, rd->dup_block);
423
424 /* Find the edge from the duplicate block to the block we're
425 threading through. That's the edge we want to redirect. */
426 victim = find_edge (rd->dup_block, (*path)[1]->e->dest);
427 e2 = redirect_edge_and_branch (victim, path->last ()->e->dest);
428 e2->count = path->last ()->e->count;
429
430 /* If we redirected the edge, then we need to copy PHI arguments
431 at the target. If the edge already existed (e2 != victim case),
432 then the PHIs in the target already have the correct arguments. */
433 if (e2 == victim)
434 copy_phi_args (e2->dest, path->last ()->e, e2);
435 }
436 else
437 {
438 remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
439 create_edge_and_update_destination_phis (rd, rd->dup_block);
440 }
441 }
442 /* Hash table traversal callback routine to create duplicate blocks. */
443
444 int
445 ssa_create_duplicates (struct redirection_data **slot,
446 ssa_local_info_t *local_info)
447 {
448 struct redirection_data *rd = *slot;
449
450 /* Create a template block if we have not done so already. Otherwise
451 use the template to create a new block. */
452 if (local_info->template_block == NULL)
453 {
454 create_block_for_threading (local_info->bb, rd);
455 local_info->template_block = rd->dup_block;
456
457 /* We do not create any outgoing edges for the template. We will
458 take care of that in a later traversal. That way we do not
459 create edges that are going to just be deleted. */
460 }
461 else
462 {
463 create_block_for_threading (local_info->template_block, rd);
464
465 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
466 block. */
467 ssa_fix_duplicate_block_edges (rd, local_info);
468 }
469
470 /* Keep walking the hash table. */
471 return 1;
472 }
473
474 /* We did not create any outgoing edges for the template block during
475 block creation. This hash table traversal callback creates the
476 outgoing edge for the template block. */
477
478 inline int
479 ssa_fixup_template_block (struct redirection_data **slot,
480 ssa_local_info_t *local_info)
481 {
482 struct redirection_data *rd = *slot;
483
484 /* If this is the template block halt the traversal after updating
485 it appropriately.
486
487 If we were threading through an joiner block, then we want
488 to keep its control statement and redirect an outgoing edge.
489 Else we want to remove the control statement & edges, then create
490 a new outgoing edge. In both cases we may need to update PHIs. */
491 if (rd->dup_block && rd->dup_block == local_info->template_block)
492 {
493 ssa_fix_duplicate_block_edges (rd, local_info);
494 return 0;
495 }
496
497 return 1;
498 }
499
500 /* Hash table traversal callback to redirect each incoming edge
501 associated with this hash table element to its new destination. */
502
503 int
504 ssa_redirect_edges (struct redirection_data **slot,
505 ssa_local_info_t *local_info)
506 {
507 struct redirection_data *rd = *slot;
508 struct el *next, *el;
509
510 /* Walk over all the incoming edges associated associated with this
511 hash table entry. */
512 for (el = rd->incoming_edges; el; el = next)
513 {
514 edge e = el->e;
515 vec<jump_thread_edge *> *path = THREAD_PATH (e);
516
517 /* Go ahead and free this element from the list. Doing this now
518 avoids the need for another list walk when we destroy the hash
519 table. */
520 next = el->next;
521 free (el);
522
523 thread_stats.num_threaded_edges++;
524
525 if (rd->dup_block)
526 {
527 edge e2;
528
529 if (dump_file && (dump_flags & TDF_DETAILS))
530 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
531 e->src->index, e->dest->index, rd->dup_block->index);
532
533 rd->dup_block->count += e->count;
534
535 /* Excessive jump threading may make frequencies large enough so
536 the computation overflows. */
537 if (rd->dup_block->frequency < BB_FREQ_MAX * 2)
538 rd->dup_block->frequency += EDGE_FREQUENCY (e);
539
540 /* In the case of threading through a joiner block, the outgoing
541 edges from the duplicate block were updated when they were
542 redirected during ssa_fix_duplicate_block_edges. */
543 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
544 EDGE_SUCC (rd->dup_block, 0)->count += e->count;
545
546 /* Redirect the incoming edge (possibly to the joiner block) to the
547 appropriate duplicate block. */
548 e2 = redirect_edge_and_branch (e, rd->dup_block);
549 gcc_assert (e == e2);
550 flush_pending_stmts (e2);
551 }
552
553 /* Go ahead and clear E->aux. It's not needed anymore and failure
554 to clear it will cause all kinds of unpleasant problems later. */
555 for (unsigned int i = 0; i < path->length (); i++)
556 delete (*path)[i];
557 path->release ();
558 e->aux = NULL;
559
560 }
561
562 /* Indicate that we actually threaded one or more jumps. */
563 if (rd->incoming_edges)
564 local_info->jumps_threaded = true;
565
566 return 1;
567 }
568
569 /* Return true if this block has no executable statements other than
570 a simple ctrl flow instruction. When the number of outgoing edges
571 is one, this is equivalent to a "forwarder" block. */
572
573 static bool
574 redirection_block_p (basic_block bb)
575 {
576 gimple_stmt_iterator gsi;
577
578 /* Advance to the first executable statement. */
579 gsi = gsi_start_bb (bb);
580 while (!gsi_end_p (gsi)
581 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
582 || is_gimple_debug (gsi_stmt (gsi))
583 || gimple_nop_p (gsi_stmt (gsi))))
584 gsi_next (&gsi);
585
586 /* Check if this is an empty block. */
587 if (gsi_end_p (gsi))
588 return true;
589
590 /* Test that we've reached the terminating control statement. */
591 return gsi_stmt (gsi)
592 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
593 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
594 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
595 }
596
597 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
598 is reached via one or more specific incoming edges, we know which
599 outgoing edge from BB will be traversed.
600
601 We want to redirect those incoming edges to the target of the
602 appropriate outgoing edge. Doing so avoids a conditional branch
603 and may expose new optimization opportunities. Note that we have
604 to update dominator tree and SSA graph after such changes.
605
606 The key to keeping the SSA graph update manageable is to duplicate
607 the side effects occurring in BB so that those side effects still
608 occur on the paths which bypass BB after redirecting edges.
609
610 We accomplish this by creating duplicates of BB and arranging for
611 the duplicates to unconditionally pass control to one specific
612 successor of BB. We then revector the incoming edges into BB to
613 the appropriate duplicate of BB.
614
615 If NOLOOP_ONLY is true, we only perform the threading as long as it
616 does not affect the structure of the loops in a nontrivial way. */
617
618 static bool
619 thread_block (basic_block bb, bool noloop_only)
620 {
621 /* E is an incoming edge into BB that we may or may not want to
622 redirect to a duplicate of BB. */
623 edge e, e2;
624 edge_iterator ei;
625 ssa_local_info_t local_info;
626 struct loop *loop = bb->loop_father;
627
628 /* To avoid scanning a linear array for the element we need we instead
629 use a hash table. For normal code there should be no noticeable
630 difference. However, if we have a block with a large number of
631 incoming and outgoing edges such linear searches can get expensive. */
632 redirection_data.create (EDGE_COUNT (bb->succs));
633
634 /* If we thread the latch of the loop to its exit, the loop ceases to
635 exist. Make sure we do not restrict ourselves in order to preserve
636 this loop. */
637 if (loop->header == bb)
638 {
639 e = loop_latch_edge (loop);
640 vec<jump_thread_edge *> *path = THREAD_PATH (e);
641
642 if (path)
643 {
644 for (unsigned int i = 1; i < path->length (); i++)
645 {
646 edge e2 = (*path)[i]->e;
647
648 if (loop_exit_edge_p (loop, e2))
649 {
650 loop->header = NULL;
651 loop->latch = NULL;
652 loops_state_set (LOOPS_NEED_FIXUP);
653 }
654 }
655 }
656 }
657
658 /* Record each unique threaded destination into a hash table for
659 efficient lookups. */
660 FOR_EACH_EDGE (e, ei, bb->preds)
661 {
662 if (e->aux == NULL)
663 continue;
664
665 vec<jump_thread_edge *> *path = THREAD_PATH (e);
666 e2 = path->last ()->e;
667 if (!e2 || noloop_only)
668 {
669 /* If NOLOOP_ONLY is true, we only allow threading through the
670 header of a loop to exit edges.
671
672 There are two cases to consider. The first when BB is the
673 loop header. We will attempt to thread this elsewhere, so
674 we can just continue here. */
675
676 if (bb == bb->loop_father->header
677 && (!loop_exit_edge_p (bb->loop_father, e2)
678 || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
679 continue;
680
681
682 /* The second occurs when there was loop header buried in a jump
683 threading path. We do not try and thread this elsewhere, so
684 just cancel the jump threading request by clearing the AUX
685 field now. */
686 if ((bb->loop_father != e2->src->loop_father
687 && !loop_exit_edge_p (e2->src->loop_father, e2))
688 || (e2->src->loop_father != e2->dest->loop_father
689 && !loop_exit_edge_p (e2->src->loop_father, e2)))
690 {
691 /* Since this case is not handled by our special code
692 to thread through a loop header, we must explicitly
693 cancel the threading request here. */
694 for (unsigned int i = 0; i < path->length (); i++)
695 delete (*path)[i];
696 path->release ();
697 e->aux = NULL;
698 continue;
699 }
700 }
701
702 if (e->dest == e2->src)
703 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e),
704 e->count, (*THREAD_PATH (e))[1]->e);
705
706 /* Insert the outgoing edge into the hash table if it is not
707 already in the hash table. */
708 lookup_redirection_data (e, INSERT);
709 }
710
711 /* We do not update dominance info. */
712 free_dominance_info (CDI_DOMINATORS);
713
714 /* We know we only thread through the loop header to loop exits.
715 Let the basic block duplication hook know we are not creating
716 a multiple entry loop. */
717 if (noloop_only
718 && bb == bb->loop_father->header)
719 set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
720
721 /* Now create duplicates of BB.
722
723 Note that for a block with a high outgoing degree we can waste
724 a lot of time and memory creating and destroying useless edges.
725
726 So we first duplicate BB and remove the control structure at the
727 tail of the duplicate as well as all outgoing edges from the
728 duplicate. We then use that duplicate block as a template for
729 the rest of the duplicates. */
730 local_info.template_block = NULL;
731 local_info.bb = bb;
732 local_info.jumps_threaded = false;
733 redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
734 (&local_info);
735
736 /* The template does not have an outgoing edge. Create that outgoing
737 edge and update PHI nodes as the edge's target as necessary.
738
739 We do this after creating all the duplicates to avoid creating
740 unnecessary edges. */
741 redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
742 (&local_info);
743
744 /* The hash table traversals above created the duplicate blocks (and the
745 statements within the duplicate blocks). This loop creates PHI nodes for
746 the duplicated blocks and redirects the incoming edges into BB to reach
747 the duplicates of BB. */
748 redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
749 (&local_info);
750
751 /* Done with this block. Clear REDIRECTION_DATA. */
752 redirection_data.dispose ();
753
754 if (noloop_only
755 && bb == bb->loop_father->header)
756 set_loop_copy (bb->loop_father, NULL);
757
758 /* Indicate to our caller whether or not any jumps were threaded. */
759 return local_info.jumps_threaded;
760 }
761
762 /* Threads edge E through E->dest to the edge THREAD_TARGET (E). Returns the
763 copy of E->dest created during threading, or E->dest if it was not necessary
764 to copy it (E is its single predecessor). */
765
766 static basic_block
767 thread_single_edge (edge e)
768 {
769 basic_block bb = e->dest;
770 struct redirection_data rd;
771 vec<jump_thread_edge *> *path = THREAD_PATH (e);
772 edge eto = (*path)[1]->e;
773
774 for (unsigned int i = 0; i < path->length (); i++)
775 delete (*path)[i];
776 delete path;
777 e->aux = NULL;
778
779 thread_stats.num_threaded_edges++;
780
781 if (single_pred_p (bb))
782 {
783 /* If BB has just a single predecessor, we should only remove the
784 control statements at its end, and successors except for ETO. */
785 remove_ctrl_stmt_and_useless_edges (bb, eto->dest);
786
787 /* And fixup the flags on the single remaining edge. */
788 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
789 eto->flags |= EDGE_FALLTHRU;
790
791 return bb;
792 }
793
794 /* Otherwise, we need to create a copy. */
795 if (e->dest == eto->src)
796 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto);
797
798 vec<jump_thread_edge *> *npath = new vec<jump_thread_edge *> ();
799 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
800 npath->safe_push (x);
801
802 x = new jump_thread_edge (eto, EDGE_COPY_SRC_BLOCK);
803 npath->safe_push (x);
804 rd.path = npath;
805
806 create_block_for_threading (bb, &rd);
807 remove_ctrl_stmt_and_useless_edges (rd.dup_block, NULL);
808 create_edge_and_update_destination_phis (&rd, rd.dup_block);
809
810 if (dump_file && (dump_flags & TDF_DETAILS))
811 fprintf (dump_file, " Threaded jump %d --> %d to %d\n",
812 e->src->index, e->dest->index, rd.dup_block->index);
813
814 rd.dup_block->count = e->count;
815 rd.dup_block->frequency = EDGE_FREQUENCY (e);
816 single_succ_edge (rd.dup_block)->count = e->count;
817 redirect_edge_and_branch (e, rd.dup_block);
818 flush_pending_stmts (e);
819
820 return rd.dup_block;
821 }
822
823 /* Callback for dfs_enumerate_from. Returns true if BB is different
824 from STOP and DBDS_CE_STOP. */
825
826 static basic_block dbds_ce_stop;
827 static bool
828 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
829 {
830 return (bb != (const_basic_block) stop
831 && bb != dbds_ce_stop);
832 }
833
834 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
835 returns the state. */
836
837 enum bb_dom_status
838 {
839 /* BB does not dominate latch of the LOOP. */
840 DOMST_NONDOMINATING,
841 /* The LOOP is broken (there is no path from the header to its latch. */
842 DOMST_LOOP_BROKEN,
843 /* BB dominates the latch of the LOOP. */
844 DOMST_DOMINATING
845 };
846
847 static enum bb_dom_status
848 determine_bb_domination_status (struct loop *loop, basic_block bb)
849 {
850 basic_block *bblocks;
851 unsigned nblocks, i;
852 bool bb_reachable = false;
853 edge_iterator ei;
854 edge e;
855
856 /* This function assumes BB is a successor of LOOP->header.
857 If that is not the case return DOMST_NONDOMINATING which
858 is always safe. */
859 {
860 bool ok = false;
861
862 FOR_EACH_EDGE (e, ei, bb->preds)
863 {
864 if (e->src == loop->header)
865 {
866 ok = true;
867 break;
868 }
869 }
870
871 if (!ok)
872 return DOMST_NONDOMINATING;
873 }
874
875 if (bb == loop->latch)
876 return DOMST_DOMINATING;
877
878 /* Check that BB dominates LOOP->latch, and that it is back-reachable
879 from it. */
880
881 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
882 dbds_ce_stop = loop->header;
883 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
884 bblocks, loop->num_nodes, bb);
885 for (i = 0; i < nblocks; i++)
886 FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
887 {
888 if (e->src == loop->header)
889 {
890 free (bblocks);
891 return DOMST_NONDOMINATING;
892 }
893 if (e->src == bb)
894 bb_reachable = true;
895 }
896
897 free (bblocks);
898 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
899 }
900
901 /* Return true if BB is part of the new pre-header that is created
902 when threading the latch to DATA. */
903
904 static bool
905 def_split_header_continue_p (const_basic_block bb, const void *data)
906 {
907 const_basic_block new_header = (const_basic_block) data;
908 const struct loop *l;
909
910 if (bb == new_header
911 || loop_depth (bb->loop_father) < loop_depth (new_header->loop_father))
912 return false;
913 for (l = bb->loop_father; l; l = loop_outer (l))
914 if (l == new_header->loop_father)
915 return true;
916 return false;
917 }
918
919 /* Thread jumps through the header of LOOP. Returns true if cfg changes.
920 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
921 to the inside of the loop. */
922
923 static bool
924 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
925 {
926 basic_block header = loop->header;
927 edge e, tgt_edge, latch = loop_latch_edge (loop);
928 edge_iterator ei;
929 basic_block tgt_bb, atgt_bb;
930 enum bb_dom_status domst;
931
932 /* We have already threaded through headers to exits, so all the threading
933 requests now are to the inside of the loop. We need to avoid creating
934 irreducible regions (i.e., loops with more than one entry block), and
935 also loop with several latch edges, or new subloops of the loop (although
936 there are cases where it might be appropriate, it is difficult to decide,
937 and doing it wrongly may confuse other optimizers).
938
939 We could handle more general cases here. However, the intention is to
940 preserve some information about the loop, which is impossible if its
941 structure changes significantly, in a way that is not well understood.
942 Thus we only handle few important special cases, in which also updating
943 of the loop-carried information should be feasible:
944
945 1) Propagation of latch edge to a block that dominates the latch block
946 of a loop. This aims to handle the following idiom:
947
948 first = 1;
949 while (1)
950 {
951 if (first)
952 initialize;
953 first = 0;
954 body;
955 }
956
957 After threading the latch edge, this becomes
958
959 first = 1;
960 if (first)
961 initialize;
962 while (1)
963 {
964 first = 0;
965 body;
966 }
967
968 The original header of the loop is moved out of it, and we may thread
969 the remaining edges through it without further constraints.
970
971 2) All entry edges are propagated to a single basic block that dominates
972 the latch block of the loop. This aims to handle the following idiom
973 (normally created for "for" loops):
974
975 i = 0;
976 while (1)
977 {
978 if (i >= 100)
979 break;
980 body;
981 i++;
982 }
983
984 This becomes
985
986 i = 0;
987 while (1)
988 {
989 body;
990 i++;
991 if (i >= 100)
992 break;
993 }
994 */
995
996 /* Threading through the header won't improve the code if the header has just
997 one successor. */
998 if (single_succ_p (header))
999 goto fail;
1000
1001 if (latch->aux)
1002 {
1003 vec<jump_thread_edge *> *path = THREAD_PATH (latch);
1004 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1005 goto fail;
1006 tgt_edge = (*path)[1]->e;
1007 tgt_bb = tgt_edge->dest;
1008 }
1009 else if (!may_peel_loop_headers
1010 && !redirection_block_p (loop->header))
1011 goto fail;
1012 else
1013 {
1014 tgt_bb = NULL;
1015 tgt_edge = NULL;
1016 FOR_EACH_EDGE (e, ei, header->preds)
1017 {
1018 if (!e->aux)
1019 {
1020 if (e == latch)
1021 continue;
1022
1023 /* If latch is not threaded, and there is a header
1024 edge that is not threaded, we would create loop
1025 with multiple entries. */
1026 goto fail;
1027 }
1028
1029 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1030
1031 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1032 goto fail;
1033 tgt_edge = (*path)[1]->e;
1034 atgt_bb = tgt_edge->dest;
1035 if (!tgt_bb)
1036 tgt_bb = atgt_bb;
1037 /* Two targets of threading would make us create loop
1038 with multiple entries. */
1039 else if (tgt_bb != atgt_bb)
1040 goto fail;
1041 }
1042
1043 if (!tgt_bb)
1044 {
1045 /* There are no threading requests. */
1046 return false;
1047 }
1048
1049 /* Redirecting to empty loop latch is useless. */
1050 if (tgt_bb == loop->latch
1051 && empty_block_p (loop->latch))
1052 goto fail;
1053 }
1054
1055 /* The target block must dominate the loop latch, otherwise we would be
1056 creating a subloop. */
1057 domst = determine_bb_domination_status (loop, tgt_bb);
1058 if (domst == DOMST_NONDOMINATING)
1059 goto fail;
1060 if (domst == DOMST_LOOP_BROKEN)
1061 {
1062 /* If the loop ceased to exist, mark it as such, and thread through its
1063 original header. */
1064 loop->header = NULL;
1065 loop->latch = NULL;
1066 loops_state_set (LOOPS_NEED_FIXUP);
1067 return thread_block (header, false);
1068 }
1069
1070 if (tgt_bb->loop_father->header == tgt_bb)
1071 {
1072 /* If the target of the threading is a header of a subloop, we need
1073 to create a preheader for it, so that the headers of the two loops
1074 do not merge. */
1075 if (EDGE_COUNT (tgt_bb->preds) > 2)
1076 {
1077 tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1078 gcc_assert (tgt_bb != NULL);
1079 }
1080 else
1081 tgt_bb = split_edge (tgt_edge);
1082 }
1083
1084 if (latch->aux)
1085 {
1086 basic_block *bblocks;
1087 unsigned nblocks, i;
1088
1089 /* First handle the case latch edge is redirected. We are copying
1090 the loop header but not creating a multiple entry loop. Make the
1091 cfg manipulation code aware of that fact. */
1092 set_loop_copy (loop, loop);
1093 loop->latch = thread_single_edge (latch);
1094 set_loop_copy (loop, NULL);
1095 gcc_assert (single_succ (loop->latch) == tgt_bb);
1096 loop->header = tgt_bb;
1097
1098 /* Remove the new pre-header blocks from our loop. */
1099 bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1100 nblocks = dfs_enumerate_from (header, 0, def_split_header_continue_p,
1101 bblocks, loop->num_nodes, tgt_bb);
1102 for (i = 0; i < nblocks; i++)
1103 if (bblocks[i]->loop_father == loop)
1104 {
1105 remove_bb_from_loops (bblocks[i]);
1106 add_bb_to_loop (bblocks[i], loop_outer (loop));
1107 }
1108 free (bblocks);
1109
1110 /* If the new header has multiple latches mark it so. */
1111 FOR_EACH_EDGE (e, ei, loop->header->preds)
1112 if (e->src->loop_father == loop
1113 && e->src != loop->latch)
1114 {
1115 loop->latch = NULL;
1116 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
1117 }
1118
1119 /* Cancel remaining threading requests that would make the
1120 loop a multiple entry loop. */
1121 FOR_EACH_EDGE (e, ei, header->preds)
1122 {
1123 edge e2;
1124
1125 if (e->aux == NULL)
1126 continue;
1127
1128 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1129 e2 = path->last ()->e;
1130
1131 if (e->src->loop_father != e2->dest->loop_father
1132 && e2->dest != loop->header)
1133 {
1134 for (unsigned int i = 0; i < path->length (); i++)
1135 delete (*path)[i];
1136 path->release ();
1137 e->aux = NULL;
1138 }
1139 }
1140
1141 /* Thread the remaining edges through the former header. */
1142 thread_block (header, false);
1143 }
1144 else
1145 {
1146 basic_block new_preheader;
1147
1148 /* Now consider the case entry edges are redirected to the new entry
1149 block. Remember one entry edge, so that we can find the new
1150 preheader (its destination after threading). */
1151 FOR_EACH_EDGE (e, ei, header->preds)
1152 {
1153 if (e->aux)
1154 break;
1155 }
1156
1157 /* The duplicate of the header is the new preheader of the loop. Ensure
1158 that it is placed correctly in the loop hierarchy. */
1159 set_loop_copy (loop, loop_outer (loop));
1160
1161 thread_block (header, false);
1162 set_loop_copy (loop, NULL);
1163 new_preheader = e->dest;
1164
1165 /* Create the new latch block. This is always necessary, as the latch
1166 must have only a single successor, but the original header had at
1167 least two successors. */
1168 loop->latch = NULL;
1169 mfb_kj_edge = single_succ_edge (new_preheader);
1170 loop->header = mfb_kj_edge->dest;
1171 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1172 loop->header = latch->dest;
1173 loop->latch = latch->src;
1174 }
1175
1176 return true;
1177
1178 fail:
1179 /* We failed to thread anything. Cancel the requests. */
1180 FOR_EACH_EDGE (e, ei, header->preds)
1181 {
1182 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1183
1184 if (path)
1185 {
1186 for (unsigned int i = 0; i < path->length (); i++)
1187 delete (*path)[i];
1188 path->release ();
1189 e->aux = NULL;
1190 }
1191 }
1192 return false;
1193 }
1194
1195 /* E1 and E2 are edges into the same basic block. Return TRUE if the
1196 PHI arguments associated with those edges are equal or there are no
1197 PHI arguments, otherwise return FALSE. */
1198
1199 static bool
1200 phi_args_equal_on_edges (edge e1, edge e2)
1201 {
1202 gimple_stmt_iterator gsi;
1203 int indx1 = e1->dest_idx;
1204 int indx2 = e2->dest_idx;
1205
1206 for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1207 {
1208 gimple phi = gsi_stmt (gsi);
1209
1210 if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1211 gimple_phi_arg_def (phi, indx2), 0))
1212 return false;
1213 }
1214 return true;
1215 }
1216
1217 /* Walk through the registered jump threads and convert them into a
1218 form convenient for this pass.
1219
1220 Any block which has incoming edges threaded to outgoing edges
1221 will have its entry in THREADED_BLOCK set.
1222
1223 Any threaded edge will have its new outgoing edge stored in the
1224 original edge's AUX field.
1225
1226 This form avoids the need to walk all the edges in the CFG to
1227 discover blocks which need processing and avoids unnecessary
1228 hash table lookups to map from threaded edge to new target. */
1229
1230 static void
1231 mark_threaded_blocks (bitmap threaded_blocks)
1232 {
1233 unsigned int i;
1234 bitmap_iterator bi;
1235 bitmap tmp = BITMAP_ALLOC (NULL);
1236 basic_block bb;
1237 edge e;
1238 edge_iterator ei;
1239
1240 /* It is possible to have jump threads in which one is a subpath
1241 of the other. ie, (A, B), (B, C), (C, D) where B is a joiner
1242 block and (B, C), (C, D) where no joiner block exists.
1243
1244 When this occurs ignore the jump thread request with the joiner
1245 block. It's totally subsumed by the simpler jump thread request.
1246
1247 This results in less block copying, simpler CFGs. More improtantly,
1248 when we duplicate the joiner block, B, in this case we will create
1249 a new threading opportunity that we wouldn't be able to optimize
1250 until the next jump threading iteration.
1251
1252 So first convert the jump thread requests which do not require a
1253 joiner block. */
1254 for (i = 0; i < paths.length (); i++)
1255 {
1256 vec<jump_thread_edge *> *path = paths[i];
1257
1258 if ((*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1259 {
1260 edge e = (*path)[0]->e;
1261 e->aux = (void *)path;
1262 bitmap_set_bit (tmp, e->dest->index);
1263 }
1264 }
1265
1266
1267 /* Now iterate again, converting cases where we threaded through
1268 a joiner block, but ignoring those where we have already
1269 threaded through the joiner block. */
1270 for (i = 0; i < paths.length (); i++)
1271 {
1272 vec<jump_thread_edge *> *path = paths[i];
1273
1274 if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK
1275 && (*path)[0]->e->aux == NULL)
1276 {
1277 edge e = (*path)[0]->e;
1278 e->aux = path;
1279 bitmap_set_bit (tmp, e->dest->index);
1280 }
1281 }
1282
1283 /* If we have a joiner block (J) which has two successors S1 and S2 and
1284 we are threading though S1 and the final destination of the thread
1285 is S2, then we must verify that any PHI nodes in S2 have the same
1286 PHI arguments for the edge J->S2 and J->S1->...->S2.
1287
1288 We used to detect this prior to registering the jump thread, but
1289 that prohibits propagation of edge equivalences into non-dominated
1290 PHI nodes as the equivalency test might occur before propagation.
1291
1292 This works for now, but will need improvement as part of the FSA
1293 optimization.
1294
1295 Note since we've moved the thread request data to the edges,
1296 we have to iterate on those rather than the threaded_edges vector. */
1297 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1298 {
1299 bb = BASIC_BLOCK (i);
1300 FOR_EACH_EDGE (e, ei, bb->preds)
1301 {
1302 if (e->aux)
1303 {
1304 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1305 bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1306
1307 if (have_joiner)
1308 {
1309 basic_block joiner = e->dest;
1310 edge final_edge = path->last ()->e;
1311 basic_block final_dest = final_edge->dest;
1312 edge e2 = find_edge (joiner, final_dest);
1313
1314 if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1315 {
1316 for (unsigned int i = 0; i < path->length (); i++)
1317 delete (*path)[i];
1318 path->release ();
1319 e->aux = NULL;
1320 }
1321 }
1322 }
1323 }
1324 }
1325
1326
1327 /* If optimizing for size, only thread through block if we don't have
1328 to duplicate it or it's an otherwise empty redirection block. */
1329 if (optimize_function_for_size_p (cfun))
1330 {
1331 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1332 {
1333 bb = BASIC_BLOCK (i);
1334 if (EDGE_COUNT (bb->preds) > 1
1335 && !redirection_block_p (bb))
1336 {
1337 FOR_EACH_EDGE (e, ei, bb->preds)
1338 {
1339 if (e->aux)
1340 {
1341 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1342 for (unsigned int i = 0; i < path->length (); i++)
1343 delete (*path)[i];
1344 path->release ();
1345 e->aux = NULL;
1346 }
1347 }
1348 }
1349 else
1350 bitmap_set_bit (threaded_blocks, i);
1351 }
1352 }
1353 else
1354 bitmap_copy (threaded_blocks, tmp);
1355
1356 /* Look for jump threading paths which cross multiple loop headers.
1357
1358 The code to thread through loop headers will change the CFG in ways
1359 that break assumptions made by the loop optimization code.
1360
1361 We don't want to blindly cancel the requests. We can instead do better
1362 by trimming off the end of the jump thread path. */
1363 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1364 {
1365 basic_block bb = BASIC_BLOCK (i);
1366 FOR_EACH_EDGE (e, ei, bb->preds)
1367 {
1368 if (e->aux)
1369 {
1370 vec<jump_thread_edge *> *path = THREAD_PATH (e);
1371
1372 /* Basically we're looking for a situation where we can see
1373 3 or more loop structures on a jump threading path. */
1374
1375 struct loop *first_father = (*path)[0]->e->src->loop_father;
1376 struct loop *second_father = NULL;
1377 for (unsigned int i = 0; i < path->length (); i++)
1378 {
1379 /* See if this is a loop father we have not seen before. */
1380 if ((*path)[i]->e->dest->loop_father != first_father
1381 && (*path)[i]->e->dest->loop_father != second_father)
1382 {
1383 /* We've already seen two loop fathers, so we
1384 need to trim this jump threading path. */
1385 if (second_father != NULL)
1386 {
1387 /* Trim from entry I onwards. */
1388 for (unsigned int j = i; j < path->length (); j++)
1389 delete (*path)[j];
1390 path->truncate (i);
1391
1392 /* Now that we've truncated the path, make sure
1393 what's left is still valid. We need at least
1394 two edges on the path and the last edge can not
1395 be a joiner. This should never happen, but let's
1396 be safe. */
1397 if (path->length () < 2
1398 || (path->last ()->type
1399 == EDGE_COPY_SRC_JOINER_BLOCK))
1400 {
1401 for (unsigned int i = 0; i < path->length (); i++)
1402 delete (*path)[i];
1403 path->release ();
1404 e->aux = NULL;
1405 }
1406 break;
1407 }
1408 else
1409 {
1410 second_father = (*path)[i]->e->dest->loop_father;
1411 }
1412 }
1413 }
1414 }
1415 }
1416 }
1417
1418 BITMAP_FREE (tmp);
1419 }
1420
1421
1422 /* Walk through all blocks and thread incoming edges to the appropriate
1423 outgoing edge for each edge pair recorded in THREADED_EDGES.
1424
1425 It is the caller's responsibility to fix the dominance information
1426 and rewrite duplicated SSA_NAMEs back into SSA form.
1427
1428 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
1429 loop headers if it does not simplify the loop.
1430
1431 Returns true if one or more edges were threaded, false otherwise. */
1432
1433 bool
1434 thread_through_all_blocks (bool may_peel_loop_headers)
1435 {
1436 bool retval = false;
1437 unsigned int i;
1438 bitmap_iterator bi;
1439 bitmap threaded_blocks;
1440 struct loop *loop;
1441 loop_iterator li;
1442
1443 /* We must know about loops in order to preserve them. */
1444 gcc_assert (current_loops != NULL);
1445
1446 if (!paths.exists ())
1447 return false;
1448
1449 threaded_blocks = BITMAP_ALLOC (NULL);
1450 memset (&thread_stats, 0, sizeof (thread_stats));
1451
1452 mark_threaded_blocks (threaded_blocks);
1453
1454 initialize_original_copy_tables ();
1455
1456 /* First perform the threading requests that do not affect
1457 loop structure. */
1458 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi)
1459 {
1460 basic_block bb = BASIC_BLOCK (i);
1461
1462 if (EDGE_COUNT (bb->preds) > 0)
1463 retval |= thread_block (bb, true);
1464 }
1465
1466 /* Then perform the threading through loop headers. We start with the
1467 innermost loop, so that the changes in cfg we perform won't affect
1468 further threading. */
1469 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1470 {
1471 if (!loop->header
1472 || !bitmap_bit_p (threaded_blocks, loop->header->index))
1473 continue;
1474
1475 retval |= thread_through_loop_header (loop, may_peel_loop_headers);
1476 }
1477
1478 statistics_counter_event (cfun, "Jumps threaded",
1479 thread_stats.num_threaded_edges);
1480
1481 free_original_copy_tables ();
1482
1483 BITMAP_FREE (threaded_blocks);
1484 threaded_blocks = NULL;
1485 paths.release ();
1486
1487 if (retval)
1488 loops_state_set (LOOPS_NEED_FIXUP);
1489
1490 return retval;
1491 }
1492
1493 /* Dump a jump threading path, including annotations about each
1494 edge in the path. */
1495
1496 static void
1497 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path)
1498 {
1499 fprintf (dump_file,
1500 " Registering jump thread: (%d, %d) incoming edge; ",
1501 path[0]->e->src->index, path[0]->e->dest->index);
1502
1503 for (unsigned int i = 1; i < path.length (); i++)
1504 {
1505 /* We can get paths with a NULL edge when the final destination
1506 of a jump thread turns out to be a constant address. We dump
1507 those paths when debugging, so we have to be prepared for that
1508 possibility here. */
1509 if (path[i]->e == NULL)
1510 continue;
1511
1512 if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1513 fprintf (dump_file, " (%d, %d) joiner; ",
1514 path[i]->e->src->index, path[i]->e->dest->index);
1515 if (path[i]->type == EDGE_COPY_SRC_BLOCK)
1516 fprintf (dump_file, " (%d, %d) normal;",
1517 path[i]->e->src->index, path[i]->e->dest->index);
1518 if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
1519 fprintf (dump_file, " (%d, %d) nocopy;",
1520 path[i]->e->src->index, path[i]->e->dest->index);
1521 }
1522 fputc ('\n', dump_file);
1523 }
1524
1525 /* Register a jump threading opportunity. We queue up all the jump
1526 threading opportunities discovered by a pass and update the CFG
1527 and SSA form all at once.
1528
1529 E is the edge we can thread, E2 is the new target edge, i.e., we
1530 are effectively recording that E->dest can be changed to E2->dest
1531 after fixing the SSA graph. */
1532
1533 void
1534 register_jump_thread (vec<jump_thread_edge *> *path)
1535 {
1536 if (!dbg_cnt (registered_jump_thread))
1537 {
1538 for (unsigned int i = 0; i < path->length (); i++)
1539 delete (*path)[i];
1540 path->release ();
1541 return;
1542 }
1543
1544 /* First make sure there are no NULL outgoing edges on the jump threading
1545 path. That can happen for jumping to a constant address. */
1546 for (unsigned int i = 0; i < path->length (); i++)
1547 if ((*path)[i]->e == NULL)
1548 {
1549 if (dump_file && (dump_flags & TDF_DETAILS))
1550 {
1551 fprintf (dump_file,
1552 "Found NULL edge in jump threading path. Cancelling jump thread:\n");
1553 dump_jump_thread_path (dump_file, *path);
1554 }
1555
1556 for (unsigned int i = 0; i < path->length (); i++)
1557 delete (*path)[i];
1558 path->release ();
1559 return;
1560 }
1561
1562 if (dump_file && (dump_flags & TDF_DETAILS))
1563 dump_jump_thread_path (dump_file, *path);
1564
1565 if (!paths.exists ())
1566 paths.create (5);
1567
1568 paths.safe_push (path);
1569 }