ggcplug.c: Shuffle includes to include gcc-plugin.h earlier.
[gcc.git] / gcc / bb-reorder.c
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This (greedy) algorithm constructs traces in several rounds.
21 The construction starts from "seeds". The seed for the first round
22 is the entry point of the function. When there are more than one seed,
23 the one with the lowest key in the heap is selected first (see bb_to_key).
24 Then the algorithm repeatedly adds the most probable successor to the end
25 of a trace. Finally it connects the traces.
26
27 There are two parameters: Branch Threshold and Exec Threshold.
28 If the probability of an edge to a successor of the current basic block is
29 lower than Branch Threshold or its frequency is lower than Exec Threshold,
30 then the successor will be the seed in one of the next rounds.
31 Each round has these parameters lower than the previous one.
32 The last round has to have these parameters set to zero so that the
33 remaining blocks are picked up.
34
35 The algorithm selects the most probable successor from all unvisited
36 successors and successors that have been added to this trace.
37 The other successors (that has not been "sent" to the next round) will be
38 other seeds for this round and the secondary traces will start from them.
39 If the successor has not been visited in this trace, it is added to the
40 trace (however, there is some heuristic for simple branches).
41 If the successor has been visited in this trace, a loop has been found.
42 If the loop has many iterations, the loop is rotated so that the source
43 block of the most probable edge going out of the loop is the last block
44 of the trace.
45 If the loop has few iterations and there is no edge from the last block of
46 the loop going out of the loop, the loop header is duplicated.
47
48 When connecting traces, the algorithm first checks whether there is an edge
49 from the last block of a trace to the first block of another trace.
50 When there are still some unconnected traces it checks whether there exists
51 a basic block BB such that BB is a successor of the last block of a trace
52 and BB is a predecessor of the first block of another trace. In this case,
53 BB is duplicated, added at the end of the first trace and the traces are
54 connected through it.
55 The rest of traces are simply connected so there will be a jump to the
56 beginning of the rest of traces.
57
58 The above description is for the full algorithm, which is used when the
59 function is optimized for speed. When the function is optimized for size,
60 in order to reduce long jumps and connect more fallthru edges, the
61 algorithm is modified as follows:
62 (1) Break long traces to short ones. A trace is broken at a block that has
63 multiple predecessors/ successors during trace discovery. When connecting
64 traces, only connect Trace n with Trace n + 1. This change reduces most
65 long jumps compared with the above algorithm.
66 (2) Ignore the edge probability and frequency for fallthru edges.
67 (3) Keep the original order of blocks when there is no chance to fall
68 through. We rely on the results of cfg_cleanup.
69
70 To implement the change for code size optimization, block's index is
71 selected as the key and all traces are found in one round.
72
73 References:
74
75 "Software Trace Cache"
76 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
77 http://citeseer.nj.nec.com/15361.html
78
79 */
80
81 #include "config.h"
82 #include "system.h"
83 #include "coretypes.h"
84 #include "tm.h"
85 #include "tree.h"
86 #include "rtl.h"
87 #include "regs.h"
88 #include "flags.h"
89 #include "output.h"
90 #include "fibheap.h"
91 #include "target.h"
92 #include "hashtab.h"
93 #include "hash-set.h"
94 #include "vec.h"
95 #include "machmode.h"
96 #include "hard-reg-set.h"
97 #include "input.h"
98 #include "function.h"
99 #include "tm_p.h"
100 #include "obstack.h"
101 #include "expr.h"
102 #include "params.h"
103 #include "diagnostic-core.h"
104 #include "toplev.h" /* user_defined_section_attribute */
105 #include "tree-pass.h"
106 #include "dominance.h"
107 #include "cfg.h"
108 #include "cfgrtl.h"
109 #include "cfganal.h"
110 #include "cfgbuild.h"
111 #include "cfgcleanup.h"
112 #include "predict.h"
113 #include "basic-block.h"
114 #include "df.h"
115 #include "bb-reorder.h"
116 #include "cgraph.h"
117 #include "except.h"
118
119 /* The number of rounds. In most cases there will only be 4 rounds, but
120 when partitioning hot and cold basic blocks into separate sections of
121 the object file there will be an extra round. */
122 #define N_ROUNDS 5
123
124 /* Stubs in case we don't have a return insn.
125 We have to check at run time too, not only compile time. */
126
127 #ifndef HAVE_return
128 #define HAVE_return 0
129 #define gen_return() NULL_RTX
130 #endif
131
132
133 struct target_bb_reorder default_target_bb_reorder;
134 #if SWITCHABLE_TARGET
135 struct target_bb_reorder *this_target_bb_reorder = &default_target_bb_reorder;
136 #endif
137
138 #define uncond_jump_length \
139 (this_target_bb_reorder->x_uncond_jump_length)
140
141 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
142 static const int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
143
144 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
145 static const int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
146
147 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
148 block the edge destination is not duplicated while connecting traces. */
149 #define DUPLICATION_THRESHOLD 100
150
151 /* Structure to hold needed information for each basic block. */
152 typedef struct bbro_basic_block_data_def
153 {
154 /* Which trace is the bb start of (-1 means it is not a start of any). */
155 int start_of_trace;
156
157 /* Which trace is the bb end of (-1 means it is not an end of any). */
158 int end_of_trace;
159
160 /* Which trace is the bb in? */
161 int in_trace;
162
163 /* Which trace was this bb visited in? */
164 int visited;
165
166 /* Which heap is BB in (if any)? */
167 fibheap_t heap;
168
169 /* Which heap node is BB in (if any)? */
170 fibnode_t node;
171 } bbro_basic_block_data;
172
173 /* The current size of the following dynamic array. */
174 static int array_size;
175
176 /* The array which holds needed information for basic blocks. */
177 static bbro_basic_block_data *bbd;
178
179 /* To avoid frequent reallocation the size of arrays is greater than needed,
180 the number of elements is (not less than) 1.25 * size_wanted. */
181 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
182
183 /* Free the memory and set the pointer to NULL. */
184 #define FREE(P) (gcc_assert (P), free (P), P = 0)
185
186 /* Structure for holding information about a trace. */
187 struct trace
188 {
189 /* First and last basic block of the trace. */
190 basic_block first, last;
191
192 /* The round of the STC creation which this trace was found in. */
193 int round;
194
195 /* The length (i.e. the number of basic blocks) of the trace. */
196 int length;
197 };
198
199 /* Maximum frequency and count of one of the entry blocks. */
200 static int max_entry_frequency;
201 static gcov_type max_entry_count;
202
203 /* Local function prototypes. */
204 static void find_traces (int *, struct trace *);
205 static basic_block rotate_loop (edge, struct trace *, int);
206 static void mark_bb_visited (basic_block, int);
207 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
208 int, fibheap_t *, int);
209 static basic_block copy_bb (basic_block, edge, basic_block, int);
210 static fibheapkey_t bb_to_key (basic_block);
211 static bool better_edge_p (const_basic_block, const_edge, int, int, int, int,
212 const_edge);
213 static bool connect_better_edge_p (const_edge, bool, int, const_edge,
214 struct trace *);
215 static void connect_traces (int, struct trace *);
216 static bool copy_bb_p (const_basic_block, int);
217 static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
218 \f
219 /* Return the trace number in which BB was visited. */
220
221 static int
222 bb_visited_trace (const_basic_block bb)
223 {
224 gcc_assert (bb->index < array_size);
225 return bbd[bb->index].visited;
226 }
227
228 /* This function marks BB that it was visited in trace number TRACE. */
229
230 static void
231 mark_bb_visited (basic_block bb, int trace)
232 {
233 bbd[bb->index].visited = trace;
234 if (bbd[bb->index].heap)
235 {
236 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
237 bbd[bb->index].heap = NULL;
238 bbd[bb->index].node = NULL;
239 }
240 }
241
242 /* Check to see if bb should be pushed into the next round of trace
243 collections or not. Reasons for pushing the block forward are 1).
244 If the block is cold, we are doing partitioning, and there will be
245 another round (cold partition blocks are not supposed to be
246 collected into traces until the very last round); or 2). There will
247 be another round, and the basic block is not "hot enough" for the
248 current round of trace collection. */
249
250 static bool
251 push_to_next_round_p (const_basic_block bb, int round, int number_of_rounds,
252 int exec_th, gcov_type count_th)
253 {
254 bool there_exists_another_round;
255 bool block_not_hot_enough;
256
257 there_exists_another_round = round < number_of_rounds - 1;
258
259 block_not_hot_enough = (bb->frequency < exec_th
260 || bb->count < count_th
261 || probably_never_executed_bb_p (cfun, bb));
262
263 if (there_exists_another_round
264 && block_not_hot_enough)
265 return true;
266 else
267 return false;
268 }
269
270 /* Find the traces for Software Trace Cache. Chain each trace through
271 RBI()->next. Store the number of traces to N_TRACES and description of
272 traces to TRACES. */
273
274 static void
275 find_traces (int *n_traces, struct trace *traces)
276 {
277 int i;
278 int number_of_rounds;
279 edge e;
280 edge_iterator ei;
281 fibheap_t heap;
282
283 /* Add one extra round of trace collection when partitioning hot/cold
284 basic blocks into separate sections. The last round is for all the
285 cold blocks (and ONLY the cold blocks). */
286
287 number_of_rounds = N_ROUNDS - 1;
288
289 /* Insert entry points of function into heap. */
290 heap = fibheap_new ();
291 max_entry_frequency = 0;
292 max_entry_count = 0;
293 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
294 {
295 bbd[e->dest->index].heap = heap;
296 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
297 e->dest);
298 if (e->dest->frequency > max_entry_frequency)
299 max_entry_frequency = e->dest->frequency;
300 if (e->dest->count > max_entry_count)
301 max_entry_count = e->dest->count;
302 }
303
304 /* Find the traces. */
305 for (i = 0; i < number_of_rounds; i++)
306 {
307 gcov_type count_threshold;
308
309 if (dump_file)
310 fprintf (dump_file, "STC - round %d\n", i + 1);
311
312 if (max_entry_count < INT_MAX / 1000)
313 count_threshold = max_entry_count * exec_threshold[i] / 1000;
314 else
315 count_threshold = max_entry_count / 1000 * exec_threshold[i];
316
317 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
318 max_entry_frequency * exec_threshold[i] / 1000,
319 count_threshold, traces, n_traces, i, &heap,
320 number_of_rounds);
321 }
322 fibheap_delete (heap);
323
324 if (dump_file)
325 {
326 for (i = 0; i < *n_traces; i++)
327 {
328 basic_block bb;
329 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
330 traces[i].round + 1);
331 for (bb = traces[i].first;
332 bb != traces[i].last;
333 bb = (basic_block) bb->aux)
334 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
335 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
336 }
337 fflush (dump_file);
338 }
339 }
340
341 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
342 (with sequential number TRACE_N). */
343
344 static basic_block
345 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
346 {
347 basic_block bb;
348
349 /* Information about the best end (end after rotation) of the loop. */
350 basic_block best_bb = NULL;
351 edge best_edge = NULL;
352 int best_freq = -1;
353 gcov_type best_count = -1;
354 /* The best edge is preferred when its destination is not visited yet
355 or is a start block of some trace. */
356 bool is_preferred = false;
357
358 /* Find the most frequent edge that goes out from current trace. */
359 bb = back_edge->dest;
360 do
361 {
362 edge e;
363 edge_iterator ei;
364
365 FOR_EACH_EDGE (e, ei, bb->succs)
366 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
367 && bb_visited_trace (e->dest) != trace_n
368 && (e->flags & EDGE_CAN_FALLTHRU)
369 && !(e->flags & EDGE_COMPLEX))
370 {
371 if (is_preferred)
372 {
373 /* The best edge is preferred. */
374 if (!bb_visited_trace (e->dest)
375 || bbd[e->dest->index].start_of_trace >= 0)
376 {
377 /* The current edge E is also preferred. */
378 int freq = EDGE_FREQUENCY (e);
379 if (freq > best_freq || e->count > best_count)
380 {
381 best_freq = freq;
382 best_count = e->count;
383 best_edge = e;
384 best_bb = bb;
385 }
386 }
387 }
388 else
389 {
390 if (!bb_visited_trace (e->dest)
391 || bbd[e->dest->index].start_of_trace >= 0)
392 {
393 /* The current edge E is preferred. */
394 is_preferred = true;
395 best_freq = EDGE_FREQUENCY (e);
396 best_count = e->count;
397 best_edge = e;
398 best_bb = bb;
399 }
400 else
401 {
402 int freq = EDGE_FREQUENCY (e);
403 if (!best_edge || freq > best_freq || e->count > best_count)
404 {
405 best_freq = freq;
406 best_count = e->count;
407 best_edge = e;
408 best_bb = bb;
409 }
410 }
411 }
412 }
413 bb = (basic_block) bb->aux;
414 }
415 while (bb != back_edge->dest);
416
417 if (best_bb)
418 {
419 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
420 the trace. */
421 if (back_edge->dest == trace->first)
422 {
423 trace->first = (basic_block) best_bb->aux;
424 }
425 else
426 {
427 basic_block prev_bb;
428
429 for (prev_bb = trace->first;
430 prev_bb->aux != back_edge->dest;
431 prev_bb = (basic_block) prev_bb->aux)
432 ;
433 prev_bb->aux = best_bb->aux;
434
435 /* Try to get rid of uncond jump to cond jump. */
436 if (single_succ_p (prev_bb))
437 {
438 basic_block header = single_succ (prev_bb);
439
440 /* Duplicate HEADER if it is a small block containing cond jump
441 in the end. */
442 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
443 && !CROSSING_JUMP_P (BB_END (header)))
444 copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
445 }
446 }
447 }
448 else
449 {
450 /* We have not found suitable loop tail so do no rotation. */
451 best_bb = back_edge->src;
452 }
453 best_bb->aux = NULL;
454 return best_bb;
455 }
456
457 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
458 not include basic blocks whose probability is lower than BRANCH_TH or whose
459 frequency is lower than EXEC_TH into traces (or whose count is lower than
460 COUNT_TH). Store the new traces into TRACES and modify the number of
461 traces *N_TRACES. Set the round (which the trace belongs to) to ROUND.
462 The function expects starting basic blocks to be in *HEAP and will delete
463 *HEAP and store starting points for the next round into new *HEAP. */
464
465 static void
466 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
467 struct trace *traces, int *n_traces, int round,
468 fibheap_t *heap, int number_of_rounds)
469 {
470 /* Heap for discarded basic blocks which are possible starting points for
471 the next round. */
472 fibheap_t new_heap = fibheap_new ();
473 bool for_size = optimize_function_for_size_p (cfun);
474
475 while (!fibheap_empty (*heap))
476 {
477 basic_block bb;
478 struct trace *trace;
479 edge best_edge, e;
480 fibheapkey_t key;
481 edge_iterator ei;
482
483 bb = (basic_block) fibheap_extract_min (*heap);
484 bbd[bb->index].heap = NULL;
485 bbd[bb->index].node = NULL;
486
487 if (dump_file)
488 fprintf (dump_file, "Getting bb %d\n", bb->index);
489
490 /* If the BB's frequency is too low, send BB to the next round. When
491 partitioning hot/cold blocks into separate sections, make sure all
492 the cold blocks (and ONLY the cold blocks) go into the (extra) final
493 round. When optimizing for size, do not push to next round. */
494
495 if (!for_size
496 && push_to_next_round_p (bb, round, number_of_rounds, exec_th,
497 count_th))
498 {
499 int key = bb_to_key (bb);
500 bbd[bb->index].heap = new_heap;
501 bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
502
503 if (dump_file)
504 fprintf (dump_file,
505 " Possible start point of next round: %d (key: %d)\n",
506 bb->index, key);
507 continue;
508 }
509
510 trace = traces + *n_traces;
511 trace->first = bb;
512 trace->round = round;
513 trace->length = 0;
514 bbd[bb->index].in_trace = *n_traces;
515 (*n_traces)++;
516
517 do
518 {
519 int prob, freq;
520 bool ends_in_call;
521
522 /* The probability and frequency of the best edge. */
523 int best_prob = INT_MIN / 2;
524 int best_freq = INT_MIN / 2;
525
526 best_edge = NULL;
527 mark_bb_visited (bb, *n_traces);
528 trace->length++;
529
530 if (dump_file)
531 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
532 bb->index, *n_traces - 1);
533
534 ends_in_call = block_ends_with_call_p (bb);
535
536 /* Select the successor that will be placed after BB. */
537 FOR_EACH_EDGE (e, ei, bb->succs)
538 {
539 gcc_assert (!(e->flags & EDGE_FAKE));
540
541 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
542 continue;
543
544 if (bb_visited_trace (e->dest)
545 && bb_visited_trace (e->dest) != *n_traces)
546 continue;
547
548 if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
549 continue;
550
551 prob = e->probability;
552 freq = e->dest->frequency;
553
554 /* The only sensible preference for a call instruction is the
555 fallthru edge. Don't bother selecting anything else. */
556 if (ends_in_call)
557 {
558 if (e->flags & EDGE_CAN_FALLTHRU)
559 {
560 best_edge = e;
561 best_prob = prob;
562 best_freq = freq;
563 }
564 continue;
565 }
566
567 /* Edge that cannot be fallthru or improbable or infrequent
568 successor (i.e. it is unsuitable successor). When optimizing
569 for size, ignore the probability and frequency. */
570 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
571 || ((prob < branch_th || EDGE_FREQUENCY (e) < exec_th
572 || e->count < count_th) && (!for_size)))
573 continue;
574
575 /* If partitioning hot/cold basic blocks, don't consider edges
576 that cross section boundaries. */
577
578 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
579 best_edge))
580 {
581 best_edge = e;
582 best_prob = prob;
583 best_freq = freq;
584 }
585 }
586
587 /* If the best destination has multiple predecessors, and can be
588 duplicated cheaper than a jump, don't allow it to be added
589 to a trace. We'll duplicate it when connecting traces. */
590 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
591 && copy_bb_p (best_edge->dest, 0))
592 best_edge = NULL;
593
594 /* If the best destination has multiple successors or predecessors,
595 don't allow it to be added when optimizing for size. This makes
596 sure predecessors with smaller index are handled before the best
597 destinarion. It breaks long trace and reduces long jumps.
598
599 Take if-then-else as an example.
600 A
601 / \
602 B C
603 \ /
604 D
605 If we do not remove the best edge B->D/C->D, the final order might
606 be A B D ... C. C is at the end of the program. If D's successors
607 and D are complicated, might need long jumps for A->C and C->D.
608 Similar issue for order: A C D ... B.
609
610 After removing the best edge, the final result will be ABCD/ ACBD.
611 It does not add jump compared with the previous order. But it
612 reduces the possibility of long jumps. */
613 if (best_edge && for_size
614 && (EDGE_COUNT (best_edge->dest->succs) > 1
615 || EDGE_COUNT (best_edge->dest->preds) > 1))
616 best_edge = NULL;
617
618 /* Add all non-selected successors to the heaps. */
619 FOR_EACH_EDGE (e, ei, bb->succs)
620 {
621 if (e == best_edge
622 || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
623 || bb_visited_trace (e->dest))
624 continue;
625
626 key = bb_to_key (e->dest);
627
628 if (bbd[e->dest->index].heap)
629 {
630 /* E->DEST is already in some heap. */
631 if (key != bbd[e->dest->index].node->key)
632 {
633 if (dump_file)
634 {
635 fprintf (dump_file,
636 "Changing key for bb %d from %ld to %ld.\n",
637 e->dest->index,
638 (long) bbd[e->dest->index].node->key,
639 key);
640 }
641 fibheap_replace_key (bbd[e->dest->index].heap,
642 bbd[e->dest->index].node, key);
643 }
644 }
645 else
646 {
647 fibheap_t which_heap = *heap;
648
649 prob = e->probability;
650 freq = EDGE_FREQUENCY (e);
651
652 if (!(e->flags & EDGE_CAN_FALLTHRU)
653 || (e->flags & EDGE_COMPLEX)
654 || prob < branch_th || freq < exec_th
655 || e->count < count_th)
656 {
657 /* When partitioning hot/cold basic blocks, make sure
658 the cold blocks (and only the cold blocks) all get
659 pushed to the last round of trace collection. When
660 optimizing for size, do not push to next round. */
661
662 if (!for_size && push_to_next_round_p (e->dest, round,
663 number_of_rounds,
664 exec_th, count_th))
665 which_heap = new_heap;
666 }
667
668 bbd[e->dest->index].heap = which_heap;
669 bbd[e->dest->index].node = fibheap_insert (which_heap,
670 key, e->dest);
671
672 if (dump_file)
673 {
674 fprintf (dump_file,
675 " Possible start of %s round: %d (key: %ld)\n",
676 (which_heap == new_heap) ? "next" : "this",
677 e->dest->index, (long) key);
678 }
679
680 }
681 }
682
683 if (best_edge) /* Suitable successor was found. */
684 {
685 if (bb_visited_trace (best_edge->dest) == *n_traces)
686 {
687 /* We do nothing with one basic block loops. */
688 if (best_edge->dest != bb)
689 {
690 if (EDGE_FREQUENCY (best_edge)
691 > 4 * best_edge->dest->frequency / 5)
692 {
693 /* The loop has at least 4 iterations. If the loop
694 header is not the first block of the function
695 we can rotate the loop. */
696
697 if (best_edge->dest
698 != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
699 {
700 if (dump_file)
701 {
702 fprintf (dump_file,
703 "Rotating loop %d - %d\n",
704 best_edge->dest->index, bb->index);
705 }
706 bb->aux = best_edge->dest;
707 bbd[best_edge->dest->index].in_trace =
708 (*n_traces) - 1;
709 bb = rotate_loop (best_edge, trace, *n_traces);
710 }
711 }
712 else
713 {
714 /* The loop has less than 4 iterations. */
715
716 if (single_succ_p (bb)
717 && copy_bb_p (best_edge->dest,
718 optimize_edge_for_speed_p
719 (best_edge)))
720 {
721 bb = copy_bb (best_edge->dest, best_edge, bb,
722 *n_traces);
723 trace->length++;
724 }
725 }
726 }
727
728 /* Terminate the trace. */
729 break;
730 }
731 else
732 {
733 /* Check for a situation
734
735 A
736 /|
737 B |
738 \|
739 C
740
741 where
742 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
743 >= EDGE_FREQUENCY (AC).
744 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
745 Best ordering is then A B C.
746
747 When optimizing for size, A B C is always the best order.
748
749 This situation is created for example by:
750
751 if (A) B;
752 C;
753
754 */
755
756 FOR_EACH_EDGE (e, ei, bb->succs)
757 if (e != best_edge
758 && (e->flags & EDGE_CAN_FALLTHRU)
759 && !(e->flags & EDGE_COMPLEX)
760 && !bb_visited_trace (e->dest)
761 && single_pred_p (e->dest)
762 && !(e->flags & EDGE_CROSSING)
763 && single_succ_p (e->dest)
764 && (single_succ_edge (e->dest)->flags
765 & EDGE_CAN_FALLTHRU)
766 && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
767 && single_succ (e->dest) == best_edge->dest
768 && (2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)
769 || for_size))
770 {
771 best_edge = e;
772 if (dump_file)
773 fprintf (dump_file, "Selecting BB %d\n",
774 best_edge->dest->index);
775 break;
776 }
777
778 bb->aux = best_edge->dest;
779 bbd[best_edge->dest->index].in_trace = (*n_traces) - 1;
780 bb = best_edge->dest;
781 }
782 }
783 }
784 while (best_edge);
785 trace->last = bb;
786 bbd[trace->first->index].start_of_trace = *n_traces - 1;
787 bbd[trace->last->index].end_of_trace = *n_traces - 1;
788
789 /* The trace is terminated so we have to recount the keys in heap
790 (some block can have a lower key because now one of its predecessors
791 is an end of the trace). */
792 FOR_EACH_EDGE (e, ei, bb->succs)
793 {
794 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
795 || bb_visited_trace (e->dest))
796 continue;
797
798 if (bbd[e->dest->index].heap)
799 {
800 key = bb_to_key (e->dest);
801 if (key != bbd[e->dest->index].node->key)
802 {
803 if (dump_file)
804 {
805 fprintf (dump_file,
806 "Changing key for bb %d from %ld to %ld.\n",
807 e->dest->index,
808 (long) bbd[e->dest->index].node->key, key);
809 }
810 fibheap_replace_key (bbd[e->dest->index].heap,
811 bbd[e->dest->index].node,
812 key);
813 }
814 }
815 }
816 }
817
818 fibheap_delete (*heap);
819
820 /* "Return" the new heap. */
821 *heap = new_heap;
822 }
823
824 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
825 it to trace after BB, mark OLD_BB visited and update pass' data structures
826 (TRACE is a number of trace which OLD_BB is duplicated to). */
827
828 static basic_block
829 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
830 {
831 basic_block new_bb;
832
833 new_bb = duplicate_block (old_bb, e, bb);
834 BB_COPY_PARTITION (new_bb, old_bb);
835
836 gcc_assert (e->dest == new_bb);
837
838 if (dump_file)
839 fprintf (dump_file,
840 "Duplicated bb %d (created bb %d)\n",
841 old_bb->index, new_bb->index);
842
843 if (new_bb->index >= array_size
844 || last_basic_block_for_fn (cfun) > array_size)
845 {
846 int i;
847 int new_size;
848
849 new_size = MAX (last_basic_block_for_fn (cfun), new_bb->index + 1);
850 new_size = GET_ARRAY_SIZE (new_size);
851 bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size);
852 for (i = array_size; i < new_size; i++)
853 {
854 bbd[i].start_of_trace = -1;
855 bbd[i].end_of_trace = -1;
856 bbd[i].in_trace = -1;
857 bbd[i].visited = 0;
858 bbd[i].heap = NULL;
859 bbd[i].node = NULL;
860 }
861 array_size = new_size;
862
863 if (dump_file)
864 {
865 fprintf (dump_file,
866 "Growing the dynamic array to %d elements.\n",
867 array_size);
868 }
869 }
870
871 gcc_assert (!bb_visited_trace (e->dest));
872 mark_bb_visited (new_bb, trace);
873 new_bb->aux = bb->aux;
874 bb->aux = new_bb;
875
876 bbd[new_bb->index].in_trace = trace;
877
878 return new_bb;
879 }
880
881 /* Compute and return the key (for the heap) of the basic block BB. */
882
883 static fibheapkey_t
884 bb_to_key (basic_block bb)
885 {
886 edge e;
887 edge_iterator ei;
888 int priority = 0;
889
890 /* Use index as key to align with its original order. */
891 if (optimize_function_for_size_p (cfun))
892 return bb->index;
893
894 /* Do not start in probably never executed blocks. */
895
896 if (BB_PARTITION (bb) == BB_COLD_PARTITION
897 || probably_never_executed_bb_p (cfun, bb))
898 return BB_FREQ_MAX;
899
900 /* Prefer blocks whose predecessor is an end of some trace
901 or whose predecessor edge is EDGE_DFS_BACK. */
902 FOR_EACH_EDGE (e, ei, bb->preds)
903 {
904 if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
905 && bbd[e->src->index].end_of_trace >= 0)
906 || (e->flags & EDGE_DFS_BACK))
907 {
908 int edge_freq = EDGE_FREQUENCY (e);
909
910 if (edge_freq > priority)
911 priority = edge_freq;
912 }
913 }
914
915 if (priority)
916 /* The block with priority should have significantly lower key. */
917 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
918
919 return -bb->frequency;
920 }
921
922 /* Return true when the edge E from basic block BB is better than the temporary
923 best edge (details are in function). The probability of edge E is PROB. The
924 frequency of the successor is FREQ. The current best probability is
925 BEST_PROB, the best frequency is BEST_FREQ.
926 The edge is considered to be equivalent when PROB does not differ much from
927 BEST_PROB; similarly for frequency. */
928
929 static bool
930 better_edge_p (const_basic_block bb, const_edge e, int prob, int freq,
931 int best_prob, int best_freq, const_edge cur_best_edge)
932 {
933 bool is_better_edge;
934
935 /* The BEST_* values do not have to be best, but can be a bit smaller than
936 maximum values. */
937 int diff_prob = best_prob / 10;
938 int diff_freq = best_freq / 10;
939
940 /* The smaller one is better to keep the original order. */
941 if (optimize_function_for_size_p (cfun))
942 return !cur_best_edge
943 || cur_best_edge->dest->index > e->dest->index;
944
945 if (prob > best_prob + diff_prob)
946 /* The edge has higher probability than the temporary best edge. */
947 is_better_edge = true;
948 else if (prob < best_prob - diff_prob)
949 /* The edge has lower probability than the temporary best edge. */
950 is_better_edge = false;
951 else if (freq < best_freq - diff_freq)
952 /* The edge and the temporary best edge have almost equivalent
953 probabilities. The higher frequency of a successor now means
954 that there is another edge going into that successor.
955 This successor has lower frequency so it is better. */
956 is_better_edge = true;
957 else if (freq > best_freq + diff_freq)
958 /* This successor has higher frequency so it is worse. */
959 is_better_edge = false;
960 else if (e->dest->prev_bb == bb)
961 /* The edges have equivalent probabilities and the successors
962 have equivalent frequencies. Select the previous successor. */
963 is_better_edge = true;
964 else
965 is_better_edge = false;
966
967 /* If we are doing hot/cold partitioning, make sure that we always favor
968 non-crossing edges over crossing edges. */
969
970 if (!is_better_edge
971 && flag_reorder_blocks_and_partition
972 && cur_best_edge
973 && (cur_best_edge->flags & EDGE_CROSSING)
974 && !(e->flags & EDGE_CROSSING))
975 is_better_edge = true;
976
977 return is_better_edge;
978 }
979
980 /* Return true when the edge E is better than the temporary best edge
981 CUR_BEST_EDGE. If SRC_INDEX_P is true, the function compares the src bb of
982 E and CUR_BEST_EDGE; otherwise it will compare the dest bb.
983 BEST_LEN is the trace length of src (or dest) bb in CUR_BEST_EDGE.
984 TRACES record the information about traces.
985 When optimizing for size, the edge with smaller index is better.
986 When optimizing for speed, the edge with bigger probability or longer trace
987 is better. */
988
989 static bool
990 connect_better_edge_p (const_edge e, bool src_index_p, int best_len,
991 const_edge cur_best_edge, struct trace *traces)
992 {
993 int e_index;
994 int b_index;
995 bool is_better_edge;
996
997 if (!cur_best_edge)
998 return true;
999
1000 if (optimize_function_for_size_p (cfun))
1001 {
1002 e_index = src_index_p ? e->src->index : e->dest->index;
1003 b_index = src_index_p ? cur_best_edge->src->index
1004 : cur_best_edge->dest->index;
1005 /* The smaller one is better to keep the original order. */
1006 return b_index > e_index;
1007 }
1008
1009 if (src_index_p)
1010 {
1011 e_index = e->src->index;
1012
1013 if (e->probability > cur_best_edge->probability)
1014 /* The edge has higher probability than the temporary best edge. */
1015 is_better_edge = true;
1016 else if (e->probability < cur_best_edge->probability)
1017 /* The edge has lower probability than the temporary best edge. */
1018 is_better_edge = false;
1019 else if (traces[bbd[e_index].end_of_trace].length > best_len)
1020 /* The edge and the temporary best edge have equivalent probabilities.
1021 The edge with longer trace is better. */
1022 is_better_edge = true;
1023 else
1024 is_better_edge = false;
1025 }
1026 else
1027 {
1028 e_index = e->dest->index;
1029
1030 if (e->probability > cur_best_edge->probability)
1031 /* The edge has higher probability than the temporary best edge. */
1032 is_better_edge = true;
1033 else if (e->probability < cur_best_edge->probability)
1034 /* The edge has lower probability than the temporary best edge. */
1035 is_better_edge = false;
1036 else if (traces[bbd[e_index].start_of_trace].length > best_len)
1037 /* The edge and the temporary best edge have equivalent probabilities.
1038 The edge with longer trace is better. */
1039 is_better_edge = true;
1040 else
1041 is_better_edge = false;
1042 }
1043
1044 return is_better_edge;
1045 }
1046
1047 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
1048
1049 static void
1050 connect_traces (int n_traces, struct trace *traces)
1051 {
1052 int i;
1053 bool *connected;
1054 bool two_passes;
1055 int last_trace;
1056 int current_pass;
1057 int current_partition;
1058 int freq_threshold;
1059 gcov_type count_threshold;
1060 bool for_size = optimize_function_for_size_p (cfun);
1061
1062 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
1063 if (max_entry_count < INT_MAX / 1000)
1064 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
1065 else
1066 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
1067
1068 connected = XCNEWVEC (bool, n_traces);
1069 last_trace = -1;
1070 current_pass = 1;
1071 current_partition = BB_PARTITION (traces[0].first);
1072 two_passes = false;
1073
1074 if (crtl->has_bb_partition)
1075 for (i = 0; i < n_traces && !two_passes; i++)
1076 if (BB_PARTITION (traces[0].first)
1077 != BB_PARTITION (traces[i].first))
1078 two_passes = true;
1079
1080 for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++)
1081 {
1082 int t = i;
1083 int t2;
1084 edge e, best;
1085 int best_len;
1086
1087 if (i >= n_traces)
1088 {
1089 gcc_assert (two_passes && current_pass == 1);
1090 i = 0;
1091 t = i;
1092 current_pass = 2;
1093 if (current_partition == BB_HOT_PARTITION)
1094 current_partition = BB_COLD_PARTITION;
1095 else
1096 current_partition = BB_HOT_PARTITION;
1097 }
1098
1099 if (connected[t])
1100 continue;
1101
1102 if (two_passes
1103 && BB_PARTITION (traces[t].first) != current_partition)
1104 continue;
1105
1106 connected[t] = true;
1107
1108 /* Find the predecessor traces. */
1109 for (t2 = t; t2 > 0;)
1110 {
1111 edge_iterator ei;
1112 best = NULL;
1113 best_len = 0;
1114 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
1115 {
1116 int si = e->src->index;
1117
1118 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1119 && (e->flags & EDGE_CAN_FALLTHRU)
1120 && !(e->flags & EDGE_COMPLEX)
1121 && bbd[si].end_of_trace >= 0
1122 && !connected[bbd[si].end_of_trace]
1123 && (BB_PARTITION (e->src) == current_partition)
1124 && connect_better_edge_p (e, true, best_len, best, traces))
1125 {
1126 best = e;
1127 best_len = traces[bbd[si].end_of_trace].length;
1128 }
1129 }
1130 if (best)
1131 {
1132 best->src->aux = best->dest;
1133 t2 = bbd[best->src->index].end_of_trace;
1134 connected[t2] = true;
1135
1136 if (dump_file)
1137 {
1138 fprintf (dump_file, "Connection: %d %d\n",
1139 best->src->index, best->dest->index);
1140 }
1141 }
1142 else
1143 break;
1144 }
1145
1146 if (last_trace >= 0)
1147 traces[last_trace].last->aux = traces[t2].first;
1148 last_trace = t;
1149
1150 /* Find the successor traces. */
1151 while (1)
1152 {
1153 /* Find the continuation of the chain. */
1154 edge_iterator ei;
1155 best = NULL;
1156 best_len = 0;
1157 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1158 {
1159 int di = e->dest->index;
1160
1161 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1162 && (e->flags & EDGE_CAN_FALLTHRU)
1163 && !(e->flags & EDGE_COMPLEX)
1164 && bbd[di].start_of_trace >= 0
1165 && !connected[bbd[di].start_of_trace]
1166 && (BB_PARTITION (e->dest) == current_partition)
1167 && connect_better_edge_p (e, false, best_len, best, traces))
1168 {
1169 best = e;
1170 best_len = traces[bbd[di].start_of_trace].length;
1171 }
1172 }
1173
1174 if (for_size)
1175 {
1176 if (!best)
1177 /* Stop finding the successor traces. */
1178 break;
1179
1180 /* It is OK to connect block n with block n + 1 or a block
1181 before n. For others, only connect to the loop header. */
1182 if (best->dest->index > (traces[t].last->index + 1))
1183 {
1184 int count = EDGE_COUNT (best->dest->preds);
1185
1186 FOR_EACH_EDGE (e, ei, best->dest->preds)
1187 if (e->flags & EDGE_DFS_BACK)
1188 count--;
1189
1190 /* If dest has multiple predecessors, skip it. We expect
1191 that one predecessor with smaller index connects with it
1192 later. */
1193 if (count != 1)
1194 break;
1195 }
1196
1197 /* Only connect Trace n with Trace n + 1. It is conservative
1198 to keep the order as close as possible to the original order.
1199 It also helps to reduce long jumps. */
1200 if (last_trace != bbd[best->dest->index].start_of_trace - 1)
1201 break;
1202
1203 if (dump_file)
1204 fprintf (dump_file, "Connection: %d %d\n",
1205 best->src->index, best->dest->index);
1206
1207 t = bbd[best->dest->index].start_of_trace;
1208 traces[last_trace].last->aux = traces[t].first;
1209 connected[t] = true;
1210 last_trace = t;
1211 }
1212 else if (best)
1213 {
1214 if (dump_file)
1215 {
1216 fprintf (dump_file, "Connection: %d %d\n",
1217 best->src->index, best->dest->index);
1218 }
1219 t = bbd[best->dest->index].start_of_trace;
1220 traces[last_trace].last->aux = traces[t].first;
1221 connected[t] = true;
1222 last_trace = t;
1223 }
1224 else
1225 {
1226 /* Try to connect the traces by duplication of 1 block. */
1227 edge e2;
1228 basic_block next_bb = NULL;
1229 bool try_copy = false;
1230
1231 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1232 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1233 && (e->flags & EDGE_CAN_FALLTHRU)
1234 && !(e->flags & EDGE_COMPLEX)
1235 && (!best || e->probability > best->probability))
1236 {
1237 edge_iterator ei;
1238 edge best2 = NULL;
1239 int best2_len = 0;
1240
1241 /* If the destination is a start of a trace which is only
1242 one block long, then no need to search the successor
1243 blocks of the trace. Accept it. */
1244 if (bbd[e->dest->index].start_of_trace >= 0
1245 && traces[bbd[e->dest->index].start_of_trace].length
1246 == 1)
1247 {
1248 best = e;
1249 try_copy = true;
1250 continue;
1251 }
1252
1253 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1254 {
1255 int di = e2->dest->index;
1256
1257 if (e2->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
1258 || ((e2->flags & EDGE_CAN_FALLTHRU)
1259 && !(e2->flags & EDGE_COMPLEX)
1260 && bbd[di].start_of_trace >= 0
1261 && !connected[bbd[di].start_of_trace]
1262 && BB_PARTITION (e2->dest) == current_partition
1263 && EDGE_FREQUENCY (e2) >= freq_threshold
1264 && e2->count >= count_threshold
1265 && (!best2
1266 || e2->probability > best2->probability
1267 || (e2->probability == best2->probability
1268 && traces[bbd[di].start_of_trace].length
1269 > best2_len))))
1270 {
1271 best = e;
1272 best2 = e2;
1273 if (e2->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1274 best2_len = traces[bbd[di].start_of_trace].length;
1275 else
1276 best2_len = INT_MAX;
1277 next_bb = e2->dest;
1278 try_copy = true;
1279 }
1280 }
1281 }
1282
1283 if (crtl->has_bb_partition)
1284 try_copy = false;
1285
1286 /* Copy tiny blocks always; copy larger blocks only when the
1287 edge is traversed frequently enough. */
1288 if (try_copy
1289 && copy_bb_p (best->dest,
1290 optimize_edge_for_speed_p (best)
1291 && EDGE_FREQUENCY (best) >= freq_threshold
1292 && best->count >= count_threshold))
1293 {
1294 basic_block new_bb;
1295
1296 if (dump_file)
1297 {
1298 fprintf (dump_file, "Connection: %d %d ",
1299 traces[t].last->index, best->dest->index);
1300 if (!next_bb)
1301 fputc ('\n', dump_file);
1302 else if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1303 fprintf (dump_file, "exit\n");
1304 else
1305 fprintf (dump_file, "%d\n", next_bb->index);
1306 }
1307
1308 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1309 traces[t].last = new_bb;
1310 if (next_bb && next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
1311 {
1312 t = bbd[next_bb->index].start_of_trace;
1313 traces[last_trace].last->aux = traces[t].first;
1314 connected[t] = true;
1315 last_trace = t;
1316 }
1317 else
1318 break; /* Stop finding the successor traces. */
1319 }
1320 else
1321 break; /* Stop finding the successor traces. */
1322 }
1323 }
1324 }
1325
1326 if (dump_file)
1327 {
1328 basic_block bb;
1329
1330 fprintf (dump_file, "Final order:\n");
1331 for (bb = traces[0].first; bb; bb = (basic_block) bb->aux)
1332 fprintf (dump_file, "%d ", bb->index);
1333 fprintf (dump_file, "\n");
1334 fflush (dump_file);
1335 }
1336
1337 FREE (connected);
1338 }
1339
1340 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1341 when code size is allowed to grow by duplication. */
1342
1343 static bool
1344 copy_bb_p (const_basic_block bb, int code_may_grow)
1345 {
1346 int size = 0;
1347 int max_size = uncond_jump_length;
1348 rtx_insn *insn;
1349
1350 if (!bb->frequency)
1351 return false;
1352 if (EDGE_COUNT (bb->preds) < 2)
1353 return false;
1354 if (!can_duplicate_block_p (bb))
1355 return false;
1356
1357 /* Avoid duplicating blocks which have many successors (PR/13430). */
1358 if (EDGE_COUNT (bb->succs) > 8)
1359 return false;
1360
1361 if (code_may_grow && optimize_bb_for_speed_p (bb))
1362 max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
1363
1364 FOR_BB_INSNS (bb, insn)
1365 {
1366 if (INSN_P (insn))
1367 size += get_attr_min_length (insn);
1368 }
1369
1370 if (size <= max_size)
1371 return true;
1372
1373 if (dump_file)
1374 {
1375 fprintf (dump_file,
1376 "Block %d can't be copied because its size = %d.\n",
1377 bb->index, size);
1378 }
1379
1380 return false;
1381 }
1382
1383 /* Return the length of unconditional jump instruction. */
1384
1385 int
1386 get_uncond_jump_length (void)
1387 {
1388 rtx_insn *label, *jump;
1389 int length;
1390
1391 label = emit_label_before (gen_label_rtx (), get_insns ());
1392 jump = emit_jump_insn (gen_jump (label));
1393
1394 length = get_attr_min_length (jump);
1395
1396 delete_insn (jump);
1397 delete_insn (label);
1398 return length;
1399 }
1400
1401 /* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
1402 Duplicate the landing pad and split the edges so that no EH edge
1403 crosses partitions. */
1404
1405 static void
1406 fix_up_crossing_landing_pad (eh_landing_pad old_lp, basic_block old_bb)
1407 {
1408 eh_landing_pad new_lp;
1409 basic_block new_bb, last_bb, post_bb;
1410 rtx_insn *new_label, *jump;
1411 rtx post_label;
1412 unsigned new_partition;
1413 edge_iterator ei;
1414 edge e;
1415
1416 /* Generate the new landing-pad structure. */
1417 new_lp = gen_eh_landing_pad (old_lp->region);
1418 new_lp->post_landing_pad = old_lp->post_landing_pad;
1419 new_lp->landing_pad = gen_label_rtx ();
1420 LABEL_PRESERVE_P (new_lp->landing_pad) = 1;
1421
1422 /* Put appropriate instructions in new bb. */
1423 new_label = emit_label (new_lp->landing_pad);
1424
1425 expand_dw2_landing_pad_for_region (old_lp->region);
1426
1427 post_bb = BLOCK_FOR_INSN (old_lp->landing_pad);
1428 post_bb = single_succ (post_bb);
1429 post_label = block_label (post_bb);
1430 jump = emit_jump_insn (gen_jump (post_label));
1431 JUMP_LABEL (jump) = post_label;
1432
1433 /* Create new basic block to be dest for lp. */
1434 last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
1435 new_bb = create_basic_block (new_label, jump, last_bb);
1436 new_bb->aux = last_bb->aux;
1437 last_bb->aux = new_bb;
1438
1439 emit_barrier_after_bb (new_bb);
1440
1441 make_edge (new_bb, post_bb, 0);
1442
1443 /* Make sure new bb is in the other partition. */
1444 new_partition = BB_PARTITION (old_bb);
1445 new_partition ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1446 BB_SET_PARTITION (new_bb, new_partition);
1447
1448 /* Fix up the edges. */
1449 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)) != NULL; )
1450 if (BB_PARTITION (e->src) == new_partition)
1451 {
1452 rtx_insn *insn = BB_END (e->src);
1453 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1454
1455 gcc_assert (note != NULL);
1456 gcc_checking_assert (INTVAL (XEXP (note, 0)) == old_lp->index);
1457 XEXP (note, 0) = GEN_INT (new_lp->index);
1458
1459 /* Adjust the edge to the new destination. */
1460 redirect_edge_succ (e, new_bb);
1461 }
1462 else
1463 ei_next (&ei);
1464 }
1465
1466
1467 /* Ensure that all hot bbs are included in a hot path through the
1468 procedure. This is done by calling this function twice, once
1469 with WALK_UP true (to look for paths from the entry to hot bbs) and
1470 once with WALK_UP false (to look for paths from hot bbs to the exit).
1471 Returns the updated value of COLD_BB_COUNT and adds newly-hot bbs
1472 to BBS_IN_HOT_PARTITION. */
1473
1474 static unsigned int
1475 sanitize_hot_paths (bool walk_up, unsigned int cold_bb_count,
1476 vec<basic_block> *bbs_in_hot_partition)
1477 {
1478 /* Callers check this. */
1479 gcc_checking_assert (cold_bb_count);
1480
1481 /* Keep examining hot bbs while we still have some left to check
1482 and there are remaining cold bbs. */
1483 vec<basic_block> hot_bbs_to_check = bbs_in_hot_partition->copy ();
1484 while (! hot_bbs_to_check.is_empty ()
1485 && cold_bb_count)
1486 {
1487 basic_block bb = hot_bbs_to_check.pop ();
1488 vec<edge, va_gc> *edges = walk_up ? bb->preds : bb->succs;
1489 edge e;
1490 edge_iterator ei;
1491 int highest_probability = 0;
1492 int highest_freq = 0;
1493 gcov_type highest_count = 0;
1494 bool found = false;
1495
1496 /* Walk the preds/succs and check if there is at least one already
1497 marked hot. Keep track of the most frequent pred/succ so that we
1498 can mark it hot if we don't find one. */
1499 FOR_EACH_EDGE (e, ei, edges)
1500 {
1501 basic_block reach_bb = walk_up ? e->src : e->dest;
1502
1503 if (e->flags & EDGE_DFS_BACK)
1504 continue;
1505
1506 if (BB_PARTITION (reach_bb) != BB_COLD_PARTITION)
1507 {
1508 found = true;
1509 break;
1510 }
1511 /* The following loop will look for the hottest edge via
1512 the edge count, if it is non-zero, then fallback to the edge
1513 frequency and finally the edge probability. */
1514 if (e->count > highest_count)
1515 highest_count = e->count;
1516 int edge_freq = EDGE_FREQUENCY (e);
1517 if (edge_freq > highest_freq)
1518 highest_freq = edge_freq;
1519 if (e->probability > highest_probability)
1520 highest_probability = e->probability;
1521 }
1522
1523 /* If bb is reached by (or reaches, in the case of !WALK_UP) another hot
1524 block (or unpartitioned, e.g. the entry block) then it is ok. If not,
1525 then the most frequent pred (or succ) needs to be adjusted. In the
1526 case where multiple preds/succs have the same frequency (e.g. a
1527 50-50 branch), then both will be adjusted. */
1528 if (found)
1529 continue;
1530
1531 FOR_EACH_EDGE (e, ei, edges)
1532 {
1533 if (e->flags & EDGE_DFS_BACK)
1534 continue;
1535 /* Select the hottest edge using the edge count, if it is non-zero,
1536 then fallback to the edge frequency and finally the edge
1537 probability. */
1538 if (highest_count)
1539 {
1540 if (e->count < highest_count)
1541 continue;
1542 }
1543 else if (highest_freq)
1544 {
1545 if (EDGE_FREQUENCY (e) < highest_freq)
1546 continue;
1547 }
1548 else if (e->probability < highest_probability)
1549 continue;
1550
1551 basic_block reach_bb = walk_up ? e->src : e->dest;
1552
1553 /* We have a hot bb with an immediate dominator that is cold.
1554 The dominator needs to be re-marked hot. */
1555 BB_SET_PARTITION (reach_bb, BB_HOT_PARTITION);
1556 cold_bb_count--;
1557
1558 /* Now we need to examine newly-hot reach_bb to see if it is also
1559 dominated by a cold bb. */
1560 bbs_in_hot_partition->safe_push (reach_bb);
1561 hot_bbs_to_check.safe_push (reach_bb);
1562 }
1563 }
1564
1565 return cold_bb_count;
1566 }
1567
1568
1569 /* Find the basic blocks that are rarely executed and need to be moved to
1570 a separate section of the .o file (to cut down on paging and improve
1571 cache locality). Return a vector of all edges that cross. */
1572
1573 static vec<edge>
1574 find_rarely_executed_basic_blocks_and_crossing_edges (void)
1575 {
1576 vec<edge> crossing_edges = vNULL;
1577 basic_block bb;
1578 edge e;
1579 edge_iterator ei;
1580 unsigned int cold_bb_count = 0;
1581 vec<basic_block> bbs_in_hot_partition = vNULL;
1582
1583 /* Mark which partition (hot/cold) each basic block belongs in. */
1584 FOR_EACH_BB_FN (bb, cfun)
1585 {
1586 bool cold_bb = false;
1587
1588 if (probably_never_executed_bb_p (cfun, bb))
1589 {
1590 /* Handle profile insanities created by upstream optimizations
1591 by also checking the incoming edge weights. If there is a non-cold
1592 incoming edge, conservatively prevent this block from being split
1593 into the cold section. */
1594 cold_bb = true;
1595 FOR_EACH_EDGE (e, ei, bb->preds)
1596 if (!probably_never_executed_edge_p (cfun, e))
1597 {
1598 cold_bb = false;
1599 break;
1600 }
1601 }
1602 if (cold_bb)
1603 {
1604 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1605 cold_bb_count++;
1606 }
1607 else
1608 {
1609 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1610 bbs_in_hot_partition.safe_push (bb);
1611 }
1612 }
1613
1614 /* Ensure that hot bbs are included along a hot path from the entry to exit.
1615 Several different possibilities may include cold bbs along all paths
1616 to/from a hot bb. One is that there are edge weight insanities
1617 due to optimization phases that do not properly update basic block profile
1618 counts. The second is that the entry of the function may not be hot, because
1619 it is entered fewer times than the number of profile training runs, but there
1620 is a loop inside the function that causes blocks within the function to be
1621 above the threshold for hotness. This is fixed by walking up from hot bbs
1622 to the entry block, and then down from hot bbs to the exit, performing
1623 partitioning fixups as necessary. */
1624 if (cold_bb_count)
1625 {
1626 mark_dfs_back_edges ();
1627 cold_bb_count = sanitize_hot_paths (true, cold_bb_count,
1628 &bbs_in_hot_partition);
1629 if (cold_bb_count)
1630 sanitize_hot_paths (false, cold_bb_count, &bbs_in_hot_partition);
1631 }
1632
1633 /* The format of .gcc_except_table does not allow landing pads to
1634 be in a different partition as the throw. Fix this by either
1635 moving or duplicating the landing pads. */
1636 if (cfun->eh->lp_array)
1637 {
1638 unsigned i;
1639 eh_landing_pad lp;
1640
1641 FOR_EACH_VEC_ELT (*cfun->eh->lp_array, i, lp)
1642 {
1643 bool all_same, all_diff;
1644
1645 if (lp == NULL
1646 || lp->landing_pad == NULL_RTX
1647 || !LABEL_P (lp->landing_pad))
1648 continue;
1649
1650 all_same = all_diff = true;
1651 bb = BLOCK_FOR_INSN (lp->landing_pad);
1652 FOR_EACH_EDGE (e, ei, bb->preds)
1653 {
1654 gcc_assert (e->flags & EDGE_EH);
1655 if (BB_PARTITION (bb) == BB_PARTITION (e->src))
1656 all_diff = false;
1657 else
1658 all_same = false;
1659 }
1660
1661 if (all_same)
1662 ;
1663 else if (all_diff)
1664 {
1665 int which = BB_PARTITION (bb);
1666 which ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1667 BB_SET_PARTITION (bb, which);
1668 }
1669 else
1670 fix_up_crossing_landing_pad (lp, bb);
1671 }
1672 }
1673
1674 /* Mark every edge that crosses between sections. */
1675
1676 FOR_EACH_BB_FN (bb, cfun)
1677 FOR_EACH_EDGE (e, ei, bb->succs)
1678 {
1679 unsigned int flags = e->flags;
1680
1681 /* We should never have EDGE_CROSSING set yet. */
1682 gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
1683
1684 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1685 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1686 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1687 {
1688 crossing_edges.safe_push (e);
1689 flags |= EDGE_CROSSING;
1690 }
1691
1692 /* Now that we've split eh edges as appropriate, allow landing pads
1693 to be merged with the post-landing pads. */
1694 flags &= ~EDGE_PRESERVE;
1695
1696 e->flags = flags;
1697 }
1698
1699 return crossing_edges;
1700 }
1701
1702 /* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru. */
1703
1704 static void
1705 set_edge_can_fallthru_flag (void)
1706 {
1707 basic_block bb;
1708
1709 FOR_EACH_BB_FN (bb, cfun)
1710 {
1711 edge e;
1712 edge_iterator ei;
1713
1714 FOR_EACH_EDGE (e, ei, bb->succs)
1715 {
1716 e->flags &= ~EDGE_CAN_FALLTHRU;
1717
1718 /* The FALLTHRU edge is also CAN_FALLTHRU edge. */
1719 if (e->flags & EDGE_FALLTHRU)
1720 e->flags |= EDGE_CAN_FALLTHRU;
1721 }
1722
1723 /* If the BB ends with an invertible condjump all (2) edges are
1724 CAN_FALLTHRU edges. */
1725 if (EDGE_COUNT (bb->succs) != 2)
1726 continue;
1727 if (!any_condjump_p (BB_END (bb)))
1728 continue;
1729 if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0))
1730 continue;
1731 invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0);
1732 EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU;
1733 EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU;
1734 }
1735 }
1736
1737 /* If any destination of a crossing edge does not have a label, add label;
1738 Convert any easy fall-through crossing edges to unconditional jumps. */
1739
1740 static void
1741 add_labels_and_missing_jumps (vec<edge> crossing_edges)
1742 {
1743 size_t i;
1744 edge e;
1745
1746 FOR_EACH_VEC_ELT (crossing_edges, i, e)
1747 {
1748 basic_block src = e->src;
1749 basic_block dest = e->dest;
1750 rtx label;
1751 rtx_insn *new_jump;
1752
1753 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1754 continue;
1755
1756 /* Make sure dest has a label. */
1757 label = block_label (dest);
1758
1759 /* Nothing to do for non-fallthru edges. */
1760 if (src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1761 continue;
1762 if ((e->flags & EDGE_FALLTHRU) == 0)
1763 continue;
1764
1765 /* If the block does not end with a control flow insn, then we
1766 can trivially add a jump to the end to fixup the crossing.
1767 Otherwise the jump will have to go in a new bb, which will
1768 be handled by fix_up_fall_thru_edges function. */
1769 if (control_flow_insn_p (BB_END (src)))
1770 continue;
1771
1772 /* Make sure there's only one successor. */
1773 gcc_assert (single_succ_p (src));
1774
1775 new_jump = emit_jump_insn_after (gen_jump (label), BB_END (src));
1776 BB_END (src) = new_jump;
1777 JUMP_LABEL (new_jump) = label;
1778 LABEL_NUSES (label) += 1;
1779
1780 emit_barrier_after_bb (src);
1781
1782 /* Mark edge as non-fallthru. */
1783 e->flags &= ~EDGE_FALLTHRU;
1784 }
1785 }
1786
1787 /* Find any bb's where the fall-through edge is a crossing edge (note that
1788 these bb's must also contain a conditional jump or end with a call
1789 instruction; we've already dealt with fall-through edges for blocks
1790 that didn't have a conditional jump or didn't end with call instruction
1791 in the call to add_labels_and_missing_jumps). Convert the fall-through
1792 edge to non-crossing edge by inserting a new bb to fall-through into.
1793 The new bb will contain an unconditional jump (crossing edge) to the
1794 original fall through destination. */
1795
1796 static void
1797 fix_up_fall_thru_edges (void)
1798 {
1799 basic_block cur_bb;
1800 basic_block new_bb;
1801 edge succ1;
1802 edge succ2;
1803 edge fall_thru;
1804 edge cond_jump = NULL;
1805 edge e;
1806 bool cond_jump_crosses;
1807 int invert_worked;
1808 rtx_insn *old_jump;
1809 rtx fall_thru_label;
1810
1811 FOR_EACH_BB_FN (cur_bb, cfun)
1812 {
1813 fall_thru = NULL;
1814 if (EDGE_COUNT (cur_bb->succs) > 0)
1815 succ1 = EDGE_SUCC (cur_bb, 0);
1816 else
1817 succ1 = NULL;
1818
1819 if (EDGE_COUNT (cur_bb->succs) > 1)
1820 succ2 = EDGE_SUCC (cur_bb, 1);
1821 else
1822 succ2 = NULL;
1823
1824 /* Find the fall-through edge. */
1825
1826 if (succ1
1827 && (succ1->flags & EDGE_FALLTHRU))
1828 {
1829 fall_thru = succ1;
1830 cond_jump = succ2;
1831 }
1832 else if (succ2
1833 && (succ2->flags & EDGE_FALLTHRU))
1834 {
1835 fall_thru = succ2;
1836 cond_jump = succ1;
1837 }
1838 else if (succ1
1839 && (block_ends_with_call_p (cur_bb)
1840 || can_throw_internal (BB_END (cur_bb))))
1841 {
1842 edge e;
1843 edge_iterator ei;
1844
1845 FOR_EACH_EDGE (e, ei, cur_bb->succs)
1846 if (e->flags & EDGE_FALLTHRU)
1847 {
1848 fall_thru = e;
1849 break;
1850 }
1851 }
1852
1853 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
1854 {
1855 /* Check to see if the fall-thru edge is a crossing edge. */
1856
1857 if (fall_thru->flags & EDGE_CROSSING)
1858 {
1859 /* The fall_thru edge crosses; now check the cond jump edge, if
1860 it exists. */
1861
1862 cond_jump_crosses = true;
1863 invert_worked = 0;
1864 old_jump = BB_END (cur_bb);
1865
1866 /* Find the jump instruction, if there is one. */
1867
1868 if (cond_jump)
1869 {
1870 if (!(cond_jump->flags & EDGE_CROSSING))
1871 cond_jump_crosses = false;
1872
1873 /* We know the fall-thru edge crosses; if the cond
1874 jump edge does NOT cross, and its destination is the
1875 next block in the bb order, invert the jump
1876 (i.e. fix it so the fall through does not cross and
1877 the cond jump does). */
1878
1879 if (!cond_jump_crosses)
1880 {
1881 /* Find label in fall_thru block. We've already added
1882 any missing labels, so there must be one. */
1883
1884 fall_thru_label = block_label (fall_thru->dest);
1885
1886 if (old_jump && JUMP_P (old_jump) && fall_thru_label)
1887 invert_worked = invert_jump (old_jump,
1888 fall_thru_label,0);
1889 if (invert_worked)
1890 {
1891 fall_thru->flags &= ~EDGE_FALLTHRU;
1892 cond_jump->flags |= EDGE_FALLTHRU;
1893 update_br_prob_note (cur_bb);
1894 e = fall_thru;
1895 fall_thru = cond_jump;
1896 cond_jump = e;
1897 cond_jump->flags |= EDGE_CROSSING;
1898 fall_thru->flags &= ~EDGE_CROSSING;
1899 }
1900 }
1901 }
1902
1903 if (cond_jump_crosses || !invert_worked)
1904 {
1905 /* This is the case where both edges out of the basic
1906 block are crossing edges. Here we will fix up the
1907 fall through edge. The jump edge will be taken care
1908 of later. The EDGE_CROSSING flag of fall_thru edge
1909 is unset before the call to force_nonfallthru
1910 function because if a new basic-block is created
1911 this edge remains in the current section boundary
1912 while the edge between new_bb and the fall_thru->dest
1913 becomes EDGE_CROSSING. */
1914
1915 fall_thru->flags &= ~EDGE_CROSSING;
1916 new_bb = force_nonfallthru (fall_thru);
1917
1918 if (new_bb)
1919 {
1920 new_bb->aux = cur_bb->aux;
1921 cur_bb->aux = new_bb;
1922
1923 /* This is done by force_nonfallthru_and_redirect. */
1924 gcc_assert (BB_PARTITION (new_bb)
1925 == BB_PARTITION (cur_bb));
1926
1927 single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
1928 }
1929 else
1930 {
1931 /* If a new basic-block was not created; restore
1932 the EDGE_CROSSING flag. */
1933 fall_thru->flags |= EDGE_CROSSING;
1934 }
1935
1936 /* Add barrier after new jump */
1937 emit_barrier_after_bb (new_bb ? new_bb : cur_bb);
1938 }
1939 }
1940 }
1941 }
1942 }
1943
1944 /* This function checks the destination block of a "crossing jump" to
1945 see if it has any crossing predecessors that begin with a code label
1946 and end with an unconditional jump. If so, it returns that predecessor
1947 block. (This is to avoid creating lots of new basic blocks that all
1948 contain unconditional jumps to the same destination). */
1949
1950 static basic_block
1951 find_jump_block (basic_block jump_dest)
1952 {
1953 basic_block source_bb = NULL;
1954 edge e;
1955 rtx_insn *insn;
1956 edge_iterator ei;
1957
1958 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1959 if (e->flags & EDGE_CROSSING)
1960 {
1961 basic_block src = e->src;
1962
1963 /* Check each predecessor to see if it has a label, and contains
1964 only one executable instruction, which is an unconditional jump.
1965 If so, we can use it. */
1966
1967 if (LABEL_P (BB_HEAD (src)))
1968 for (insn = BB_HEAD (src);
1969 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1970 insn = NEXT_INSN (insn))
1971 {
1972 if (INSN_P (insn)
1973 && insn == BB_END (src)
1974 && JUMP_P (insn)
1975 && !any_condjump_p (insn))
1976 {
1977 source_bb = src;
1978 break;
1979 }
1980 }
1981
1982 if (source_bb)
1983 break;
1984 }
1985
1986 return source_bb;
1987 }
1988
1989 /* Find all BB's with conditional jumps that are crossing edges;
1990 insert a new bb and make the conditional jump branch to the new
1991 bb instead (make the new bb same color so conditional branch won't
1992 be a 'crossing' edge). Insert an unconditional jump from the
1993 new bb to the original destination of the conditional jump. */
1994
1995 static void
1996 fix_crossing_conditional_branches (void)
1997 {
1998 basic_block cur_bb;
1999 basic_block new_bb;
2000 basic_block dest;
2001 edge succ1;
2002 edge succ2;
2003 edge crossing_edge;
2004 edge new_edge;
2005 rtx_insn *old_jump;
2006 rtx set_src;
2007 rtx old_label = NULL_RTX;
2008 rtx new_label;
2009
2010 FOR_EACH_BB_FN (cur_bb, cfun)
2011 {
2012 crossing_edge = NULL;
2013 if (EDGE_COUNT (cur_bb->succs) > 0)
2014 succ1 = EDGE_SUCC (cur_bb, 0);
2015 else
2016 succ1 = NULL;
2017
2018 if (EDGE_COUNT (cur_bb->succs) > 1)
2019 succ2 = EDGE_SUCC (cur_bb, 1);
2020 else
2021 succ2 = NULL;
2022
2023 /* We already took care of fall-through edges, so only one successor
2024 can be a crossing edge. */
2025
2026 if (succ1 && (succ1->flags & EDGE_CROSSING))
2027 crossing_edge = succ1;
2028 else if (succ2 && (succ2->flags & EDGE_CROSSING))
2029 crossing_edge = succ2;
2030
2031 if (crossing_edge)
2032 {
2033 old_jump = BB_END (cur_bb);
2034
2035 /* Check to make sure the jump instruction is a
2036 conditional jump. */
2037
2038 set_src = NULL_RTX;
2039
2040 if (any_condjump_p (old_jump))
2041 {
2042 if (GET_CODE (PATTERN (old_jump)) == SET)
2043 set_src = SET_SRC (PATTERN (old_jump));
2044 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
2045 {
2046 set_src = XVECEXP (PATTERN (old_jump), 0,0);
2047 if (GET_CODE (set_src) == SET)
2048 set_src = SET_SRC (set_src);
2049 else
2050 set_src = NULL_RTX;
2051 }
2052 }
2053
2054 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
2055 {
2056 if (GET_CODE (XEXP (set_src, 1)) == PC)
2057 old_label = XEXP (set_src, 2);
2058 else if (GET_CODE (XEXP (set_src, 2)) == PC)
2059 old_label = XEXP (set_src, 1);
2060
2061 /* Check to see if new bb for jumping to that dest has
2062 already been created; if so, use it; if not, create
2063 a new one. */
2064
2065 new_bb = find_jump_block (crossing_edge->dest);
2066
2067 if (new_bb)
2068 new_label = block_label (new_bb);
2069 else
2070 {
2071 basic_block last_bb;
2072 rtx_insn *new_jump;
2073
2074 /* Create new basic block to be dest for
2075 conditional jump. */
2076
2077 /* Put appropriate instructions in new bb. */
2078
2079 new_label = gen_label_rtx ();
2080 emit_label (new_label);
2081
2082 gcc_assert (GET_CODE (old_label) == LABEL_REF);
2083 old_label = JUMP_LABEL (old_jump);
2084 new_jump = emit_jump_insn (gen_jump (old_label));
2085 JUMP_LABEL (new_jump) = old_label;
2086
2087 last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2088 new_bb = create_basic_block (new_label, new_jump, last_bb);
2089 new_bb->aux = last_bb->aux;
2090 last_bb->aux = new_bb;
2091
2092 emit_barrier_after_bb (new_bb);
2093
2094 /* Make sure new bb is in same partition as source
2095 of conditional branch. */
2096 BB_COPY_PARTITION (new_bb, cur_bb);
2097 }
2098
2099 /* Make old jump branch to new bb. */
2100
2101 redirect_jump (old_jump, new_label, 0);
2102
2103 /* Remove crossing_edge as predecessor of 'dest'. */
2104
2105 dest = crossing_edge->dest;
2106
2107 redirect_edge_succ (crossing_edge, new_bb);
2108
2109 /* Make a new edge from new_bb to old dest; new edge
2110 will be a successor for new_bb and a predecessor
2111 for 'dest'. */
2112
2113 if (EDGE_COUNT (new_bb->succs) == 0)
2114 new_edge = make_edge (new_bb, dest, 0);
2115 else
2116 new_edge = EDGE_SUCC (new_bb, 0);
2117
2118 crossing_edge->flags &= ~EDGE_CROSSING;
2119 new_edge->flags |= EDGE_CROSSING;
2120 }
2121 }
2122 }
2123 }
2124
2125 /* Find any unconditional branches that cross between hot and cold
2126 sections. Convert them into indirect jumps instead. */
2127
2128 static void
2129 fix_crossing_unconditional_branches (void)
2130 {
2131 basic_block cur_bb;
2132 rtx_insn *last_insn;
2133 rtx label;
2134 rtx label_addr;
2135 rtx_insn *indirect_jump_sequence;
2136 rtx_insn *jump_insn = NULL;
2137 rtx new_reg;
2138 rtx_insn *cur_insn;
2139 edge succ;
2140
2141 FOR_EACH_BB_FN (cur_bb, cfun)
2142 {
2143 last_insn = BB_END (cur_bb);
2144
2145 if (EDGE_COUNT (cur_bb->succs) < 1)
2146 continue;
2147
2148 succ = EDGE_SUCC (cur_bb, 0);
2149
2150 /* Check to see if bb ends in a crossing (unconditional) jump. At
2151 this point, no crossing jumps should be conditional. */
2152
2153 if (JUMP_P (last_insn)
2154 && (succ->flags & EDGE_CROSSING))
2155 {
2156 gcc_assert (!any_condjump_p (last_insn));
2157
2158 /* Make sure the jump is not already an indirect or table jump. */
2159
2160 if (!computed_jump_p (last_insn)
2161 && !tablejump_p (last_insn, NULL, NULL))
2162 {
2163 /* We have found a "crossing" unconditional branch. Now
2164 we must convert it to an indirect jump. First create
2165 reference of label, as target for jump. */
2166
2167 label = JUMP_LABEL (last_insn);
2168 label_addr = gen_rtx_LABEL_REF (Pmode, label);
2169 LABEL_NUSES (label) += 1;
2170
2171 /* Get a register to use for the indirect jump. */
2172
2173 new_reg = gen_reg_rtx (Pmode);
2174
2175 /* Generate indirect the jump sequence. */
2176
2177 start_sequence ();
2178 emit_move_insn (new_reg, label_addr);
2179 emit_indirect_jump (new_reg);
2180 indirect_jump_sequence = get_insns ();
2181 end_sequence ();
2182
2183 /* Make sure every instruction in the new jump sequence has
2184 its basic block set to be cur_bb. */
2185
2186 for (cur_insn = indirect_jump_sequence; cur_insn;
2187 cur_insn = NEXT_INSN (cur_insn))
2188 {
2189 if (!BARRIER_P (cur_insn))
2190 BLOCK_FOR_INSN (cur_insn) = cur_bb;
2191 if (JUMP_P (cur_insn))
2192 jump_insn = cur_insn;
2193 }
2194
2195 /* Insert the new (indirect) jump sequence immediately before
2196 the unconditional jump, then delete the unconditional jump. */
2197
2198 emit_insn_before (indirect_jump_sequence, last_insn);
2199 delete_insn (last_insn);
2200
2201 JUMP_LABEL (jump_insn) = label;
2202 LABEL_NUSES (label)++;
2203
2204 /* Make BB_END for cur_bb be the jump instruction (NOT the
2205 barrier instruction at the end of the sequence...). */
2206
2207 BB_END (cur_bb) = jump_insn;
2208 }
2209 }
2210 }
2211 }
2212
2213 /* Update CROSSING_JUMP_P flags on all jump insns. */
2214
2215 static void
2216 update_crossing_jump_flags (void)
2217 {
2218 basic_block bb;
2219 edge e;
2220 edge_iterator ei;
2221
2222 FOR_EACH_BB_FN (bb, cfun)
2223 FOR_EACH_EDGE (e, ei, bb->succs)
2224 if (e->flags & EDGE_CROSSING)
2225 {
2226 if (JUMP_P (BB_END (bb))
2227 /* Some flags were added during fix_up_fall_thru_edges, via
2228 force_nonfallthru_and_redirect. */
2229 && !CROSSING_JUMP_P (BB_END (bb)))
2230 CROSSING_JUMP_P (BB_END (bb)) = 1;
2231 break;
2232 }
2233 }
2234
2235 /* Reorder basic blocks. The main entry point to this file. FLAGS is
2236 the set of flags to pass to cfg_layout_initialize(). */
2237
2238 static void
2239 reorder_basic_blocks (void)
2240 {
2241 int n_traces;
2242 int i;
2243 struct trace *traces;
2244
2245 gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
2246
2247 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
2248 return;
2249
2250 set_edge_can_fallthru_flag ();
2251 mark_dfs_back_edges ();
2252
2253 /* We are estimating the length of uncond jump insn only once since the code
2254 for getting the insn length always returns the minimal length now. */
2255 if (uncond_jump_length == 0)
2256 uncond_jump_length = get_uncond_jump_length ();
2257
2258 /* We need to know some information for each basic block. */
2259 array_size = GET_ARRAY_SIZE (last_basic_block_for_fn (cfun));
2260 bbd = XNEWVEC (bbro_basic_block_data, array_size);
2261 for (i = 0; i < array_size; i++)
2262 {
2263 bbd[i].start_of_trace = -1;
2264 bbd[i].end_of_trace = -1;
2265 bbd[i].in_trace = -1;
2266 bbd[i].visited = 0;
2267 bbd[i].heap = NULL;
2268 bbd[i].node = NULL;
2269 }
2270
2271 traces = XNEWVEC (struct trace, n_basic_blocks_for_fn (cfun));
2272 n_traces = 0;
2273 find_traces (&n_traces, traces);
2274 connect_traces (n_traces, traces);
2275 FREE (traces);
2276 FREE (bbd);
2277
2278 relink_block_chain (/*stay_in_cfglayout_mode=*/true);
2279
2280 if (dump_file)
2281 {
2282 if (dump_flags & TDF_DETAILS)
2283 dump_reg_info (dump_file);
2284 dump_flow_info (dump_file, dump_flags);
2285 }
2286
2287 /* Signal that rtl_verify_flow_info_1 can now verify that there
2288 is at most one switch between hot/cold sections. */
2289 crtl->bb_reorder_complete = true;
2290 }
2291
2292 /* Determine which partition the first basic block in the function
2293 belongs to, then find the first basic block in the current function
2294 that belongs to a different section, and insert a
2295 NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the
2296 instruction stream. When writing out the assembly code,
2297 encountering this note will make the compiler switch between the
2298 hot and cold text sections. */
2299
2300 void
2301 insert_section_boundary_note (void)
2302 {
2303 basic_block bb;
2304 bool switched_sections = false;
2305 int current_partition = 0;
2306
2307 if (!crtl->has_bb_partition)
2308 return;
2309
2310 FOR_EACH_BB_FN (bb, cfun)
2311 {
2312 if (!current_partition)
2313 current_partition = BB_PARTITION (bb);
2314 if (BB_PARTITION (bb) != current_partition)
2315 {
2316 gcc_assert (!switched_sections);
2317 switched_sections = true;
2318 emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS, BB_HEAD (bb));
2319 current_partition = BB_PARTITION (bb);
2320 }
2321 }
2322 }
2323
2324 namespace {
2325
2326 const pass_data pass_data_reorder_blocks =
2327 {
2328 RTL_PASS, /* type */
2329 "bbro", /* name */
2330 OPTGROUP_NONE, /* optinfo_flags */
2331 TV_REORDER_BLOCKS, /* tv_id */
2332 0, /* properties_required */
2333 0, /* properties_provided */
2334 0, /* properties_destroyed */
2335 0, /* todo_flags_start */
2336 0, /* todo_flags_finish */
2337 };
2338
2339 class pass_reorder_blocks : public rtl_opt_pass
2340 {
2341 public:
2342 pass_reorder_blocks (gcc::context *ctxt)
2343 : rtl_opt_pass (pass_data_reorder_blocks, ctxt)
2344 {}
2345
2346 /* opt_pass methods: */
2347 virtual bool gate (function *)
2348 {
2349 if (targetm.cannot_modify_jumps_p ())
2350 return false;
2351 return (optimize > 0
2352 && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
2353 }
2354
2355 virtual unsigned int execute (function *);
2356
2357 }; // class pass_reorder_blocks
2358
2359 unsigned int
2360 pass_reorder_blocks::execute (function *fun)
2361 {
2362 basic_block bb;
2363
2364 /* Last attempt to optimize CFG, as scheduling, peepholing and insn
2365 splitting possibly introduced more crossjumping opportunities. */
2366 cfg_layout_initialize (CLEANUP_EXPENSIVE);
2367
2368 reorder_basic_blocks ();
2369 cleanup_cfg (CLEANUP_EXPENSIVE);
2370
2371 FOR_EACH_BB_FN (bb, fun)
2372 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
2373 bb->aux = bb->next_bb;
2374 cfg_layout_finalize ();
2375
2376 return 0;
2377 }
2378
2379 } // anon namespace
2380
2381 rtl_opt_pass *
2382 make_pass_reorder_blocks (gcc::context *ctxt)
2383 {
2384 return new pass_reorder_blocks (ctxt);
2385 }
2386
2387 /* Duplicate the blocks containing computed gotos. This basically unfactors
2388 computed gotos that were factored early on in the compilation process to
2389 speed up edge based data flow. We used to not unfactoring them again,
2390 which can seriously pessimize code with many computed jumps in the source
2391 code, such as interpreters. See e.g. PR15242. */
2392
2393 namespace {
2394
2395 const pass_data pass_data_duplicate_computed_gotos =
2396 {
2397 RTL_PASS, /* type */
2398 "compgotos", /* name */
2399 OPTGROUP_NONE, /* optinfo_flags */
2400 TV_REORDER_BLOCKS, /* tv_id */
2401 0, /* properties_required */
2402 0, /* properties_provided */
2403 0, /* properties_destroyed */
2404 0, /* todo_flags_start */
2405 0, /* todo_flags_finish */
2406 };
2407
2408 class pass_duplicate_computed_gotos : public rtl_opt_pass
2409 {
2410 public:
2411 pass_duplicate_computed_gotos (gcc::context *ctxt)
2412 : rtl_opt_pass (pass_data_duplicate_computed_gotos, ctxt)
2413 {}
2414
2415 /* opt_pass methods: */
2416 virtual bool gate (function *);
2417 virtual unsigned int execute (function *);
2418
2419 }; // class pass_duplicate_computed_gotos
2420
2421 bool
2422 pass_duplicate_computed_gotos::gate (function *fun)
2423 {
2424 if (targetm.cannot_modify_jumps_p ())
2425 return false;
2426 return (optimize > 0
2427 && flag_expensive_optimizations
2428 && ! optimize_function_for_size_p (fun));
2429 }
2430
2431 unsigned int
2432 pass_duplicate_computed_gotos::execute (function *fun)
2433 {
2434 basic_block bb, new_bb;
2435 bitmap candidates;
2436 int max_size;
2437 bool changed = false;
2438
2439 if (n_basic_blocks_for_fn (fun) <= NUM_FIXED_BLOCKS + 1)
2440 return 0;
2441
2442 clear_bb_flags ();
2443 cfg_layout_initialize (0);
2444
2445 /* We are estimating the length of uncond jump insn only once
2446 since the code for getting the insn length always returns
2447 the minimal length now. */
2448 if (uncond_jump_length == 0)
2449 uncond_jump_length = get_uncond_jump_length ();
2450
2451 max_size
2452 = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
2453 candidates = BITMAP_ALLOC (NULL);
2454
2455 /* Look for blocks that end in a computed jump, and see if such blocks
2456 are suitable for unfactoring. If a block is a candidate for unfactoring,
2457 mark it in the candidates. */
2458 FOR_EACH_BB_FN (bb, fun)
2459 {
2460 rtx_insn *insn;
2461 edge e;
2462 edge_iterator ei;
2463 int size, all_flags;
2464
2465 /* Build the reorder chain for the original order of blocks. */
2466 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
2467 bb->aux = bb->next_bb;
2468
2469 /* Obviously the block has to end in a computed jump. */
2470 if (!computed_jump_p (BB_END (bb)))
2471 continue;
2472
2473 /* Only consider blocks that can be duplicated. */
2474 if (CROSSING_JUMP_P (BB_END (bb))
2475 || !can_duplicate_block_p (bb))
2476 continue;
2477
2478 /* Make sure that the block is small enough. */
2479 size = 0;
2480 FOR_BB_INSNS (bb, insn)
2481 if (INSN_P (insn))
2482 {
2483 size += get_attr_min_length (insn);
2484 if (size > max_size)
2485 break;
2486 }
2487 if (size > max_size)
2488 continue;
2489
2490 /* Final check: there must not be any incoming abnormal edges. */
2491 all_flags = 0;
2492 FOR_EACH_EDGE (e, ei, bb->preds)
2493 all_flags |= e->flags;
2494 if (all_flags & EDGE_COMPLEX)
2495 continue;
2496
2497 bitmap_set_bit (candidates, bb->index);
2498 }
2499
2500 /* Nothing to do if there is no computed jump here. */
2501 if (bitmap_empty_p (candidates))
2502 goto done;
2503
2504 /* Duplicate computed gotos. */
2505 FOR_EACH_BB_FN (bb, fun)
2506 {
2507 if (bb->flags & BB_VISITED)
2508 continue;
2509
2510 bb->flags |= BB_VISITED;
2511
2512 /* BB must have one outgoing edge. That edge must not lead to
2513 the exit block or the next block.
2514 The destination must have more than one predecessor. */
2515 if (!single_succ_p (bb)
2516 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (fun)
2517 || single_succ (bb) == bb->next_bb
2518 || single_pred_p (single_succ (bb)))
2519 continue;
2520
2521 /* The successor block has to be a duplication candidate. */
2522 if (!bitmap_bit_p (candidates, single_succ (bb)->index))
2523 continue;
2524
2525 /* Don't duplicate a partition crossing edge, which requires difficult
2526 fixup. */
2527 if (JUMP_P (BB_END (bb)) && CROSSING_JUMP_P (BB_END (bb)))
2528 continue;
2529
2530 new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
2531 new_bb->aux = bb->aux;
2532 bb->aux = new_bb;
2533 new_bb->flags |= BB_VISITED;
2534 changed = true;
2535 }
2536
2537 done:
2538 if (changed)
2539 {
2540 /* Duplicating blocks above will redirect edges and may cause hot
2541 blocks previously reached by both hot and cold blocks to become
2542 dominated only by cold blocks. */
2543 fixup_partitions ();
2544
2545 /* Merge the duplicated blocks into predecessors, when possible. */
2546 cfg_layout_finalize ();
2547 cleanup_cfg (0);
2548 }
2549 else
2550 cfg_layout_finalize ();
2551
2552 BITMAP_FREE (candidates);
2553 return 0;
2554 }
2555
2556 } // anon namespace
2557
2558 rtl_opt_pass *
2559 make_pass_duplicate_computed_gotos (gcc::context *ctxt)
2560 {
2561 return new pass_duplicate_computed_gotos (ctxt);
2562 }
2563
2564 /* This function is the main 'entrance' for the optimization that
2565 partitions hot and cold basic blocks into separate sections of the
2566 .o file (to improve performance and cache locality). Ideally it
2567 would be called after all optimizations that rearrange the CFG have
2568 been called. However part of this optimization may introduce new
2569 register usage, so it must be called before register allocation has
2570 occurred. This means that this optimization is actually called
2571 well before the optimization that reorders basic blocks (see
2572 function above).
2573
2574 This optimization checks the feedback information to determine
2575 which basic blocks are hot/cold, updates flags on the basic blocks
2576 to indicate which section they belong in. This information is
2577 later used for writing out sections in the .o file. Because hot
2578 and cold sections can be arbitrarily large (within the bounds of
2579 memory), far beyond the size of a single function, it is necessary
2580 to fix up all edges that cross section boundaries, to make sure the
2581 instructions used can actually span the required distance. The
2582 fixes are described below.
2583
2584 Fall-through edges must be changed into jumps; it is not safe or
2585 legal to fall through across a section boundary. Whenever a
2586 fall-through edge crossing a section boundary is encountered, a new
2587 basic block is inserted (in the same section as the fall-through
2588 source), and the fall through edge is redirected to the new basic
2589 block. The new basic block contains an unconditional jump to the
2590 original fall-through target. (If the unconditional jump is
2591 insufficient to cross section boundaries, that is dealt with a
2592 little later, see below).
2593
2594 In order to deal with architectures that have short conditional
2595 branches (which cannot span all of memory) we take any conditional
2596 jump that attempts to cross a section boundary and add a level of
2597 indirection: it becomes a conditional jump to a new basic block, in
2598 the same section. The new basic block contains an unconditional
2599 jump to the original target, in the other section.
2600
2601 For those architectures whose unconditional branch is also
2602 incapable of reaching all of memory, those unconditional jumps are
2603 converted into indirect jumps, through a register.
2604
2605 IMPORTANT NOTE: This optimization causes some messy interactions
2606 with the cfg cleanup optimizations; those optimizations want to
2607 merge blocks wherever possible, and to collapse indirect jump
2608 sequences (change "A jumps to B jumps to C" directly into "A jumps
2609 to C"). Those optimizations can undo the jump fixes that
2610 partitioning is required to make (see above), in order to ensure
2611 that jumps attempting to cross section boundaries are really able
2612 to cover whatever distance the jump requires (on many architectures
2613 conditional or unconditional jumps are not able to reach all of
2614 memory). Therefore tests have to be inserted into each such
2615 optimization to make sure that it does not undo stuff necessary to
2616 cross partition boundaries. This would be much less of a problem
2617 if we could perform this optimization later in the compilation, but
2618 unfortunately the fact that we may need to create indirect jumps
2619 (through registers) requires that this optimization be performed
2620 before register allocation.
2621
2622 Hot and cold basic blocks are partitioned and put in separate
2623 sections of the .o file, to reduce paging and improve cache
2624 performance (hopefully). This can result in bits of code from the
2625 same function being widely separated in the .o file. However this
2626 is not obvious to the current bb structure. Therefore we must take
2627 care to ensure that: 1). There are no fall_thru edges that cross
2628 between sections; 2). For those architectures which have "short"
2629 conditional branches, all conditional branches that attempt to
2630 cross between sections are converted to unconditional branches;
2631 and, 3). For those architectures which have "short" unconditional
2632 branches, all unconditional branches that attempt to cross between
2633 sections are converted to indirect jumps.
2634
2635 The code for fixing up fall_thru edges that cross between hot and
2636 cold basic blocks does so by creating new basic blocks containing
2637 unconditional branches to the appropriate label in the "other"
2638 section. The new basic block is then put in the same (hot or cold)
2639 section as the original conditional branch, and the fall_thru edge
2640 is modified to fall into the new basic block instead. By adding
2641 this level of indirection we end up with only unconditional branches
2642 crossing between hot and cold sections.
2643
2644 Conditional branches are dealt with by adding a level of indirection.
2645 A new basic block is added in the same (hot/cold) section as the
2646 conditional branch, and the conditional branch is retargeted to the
2647 new basic block. The new basic block contains an unconditional branch
2648 to the original target of the conditional branch (in the other section).
2649
2650 Unconditional branches are dealt with by converting them into
2651 indirect jumps. */
2652
2653 namespace {
2654
2655 const pass_data pass_data_partition_blocks =
2656 {
2657 RTL_PASS, /* type */
2658 "bbpart", /* name */
2659 OPTGROUP_NONE, /* optinfo_flags */
2660 TV_REORDER_BLOCKS, /* tv_id */
2661 PROP_cfglayout, /* properties_required */
2662 0, /* properties_provided */
2663 0, /* properties_destroyed */
2664 0, /* todo_flags_start */
2665 0, /* todo_flags_finish */
2666 };
2667
2668 class pass_partition_blocks : public rtl_opt_pass
2669 {
2670 public:
2671 pass_partition_blocks (gcc::context *ctxt)
2672 : rtl_opt_pass (pass_data_partition_blocks, ctxt)
2673 {}
2674
2675 /* opt_pass methods: */
2676 virtual bool gate (function *);
2677 virtual unsigned int execute (function *);
2678
2679 }; // class pass_partition_blocks
2680
2681 bool
2682 pass_partition_blocks::gate (function *fun)
2683 {
2684 /* The optimization to partition hot/cold basic blocks into separate
2685 sections of the .o file does not work well with linkonce or with
2686 user defined section attributes. Don't call it if either case
2687 arises. */
2688 return (flag_reorder_blocks_and_partition
2689 && optimize
2690 /* See gate_handle_reorder_blocks. We should not partition if
2691 we are going to omit the reordering. */
2692 && optimize_function_for_speed_p (fun)
2693 && !DECL_COMDAT_GROUP (current_function_decl)
2694 && !user_defined_section_attribute);
2695 }
2696
2697 unsigned
2698 pass_partition_blocks::execute (function *fun)
2699 {
2700 vec<edge> crossing_edges;
2701
2702 if (n_basic_blocks_for_fn (fun) <= NUM_FIXED_BLOCKS + 1)
2703 return 0;
2704
2705 df_set_flags (DF_DEFER_INSN_RESCAN);
2706
2707 crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
2708 if (!crossing_edges.exists ())
2709 return 0;
2710
2711 crtl->has_bb_partition = true;
2712
2713 /* Make sure the source of any crossing edge ends in a jump and the
2714 destination of any crossing edge has a label. */
2715 add_labels_and_missing_jumps (crossing_edges);
2716
2717 /* Convert all crossing fall_thru edges to non-crossing fall
2718 thrus to unconditional jumps (that jump to the original fall
2719 through dest). */
2720 fix_up_fall_thru_edges ();
2721
2722 /* If the architecture does not have conditional branches that can
2723 span all of memory, convert crossing conditional branches into
2724 crossing unconditional branches. */
2725 if (!HAS_LONG_COND_BRANCH)
2726 fix_crossing_conditional_branches ();
2727
2728 /* If the architecture does not have unconditional branches that
2729 can span all of memory, convert crossing unconditional branches
2730 into indirect jumps. Since adding an indirect jump also adds
2731 a new register usage, update the register usage information as
2732 well. */
2733 if (!HAS_LONG_UNCOND_BRANCH)
2734 fix_crossing_unconditional_branches ();
2735
2736 update_crossing_jump_flags ();
2737
2738 /* Clear bb->aux fields that the above routines were using. */
2739 clear_aux_for_blocks ();
2740
2741 crossing_edges.release ();
2742
2743 /* ??? FIXME: DF generates the bb info for a block immediately.
2744 And by immediately, I mean *during* creation of the block.
2745
2746 #0 df_bb_refs_collect
2747 #1 in df_bb_refs_record
2748 #2 in create_basic_block_structure
2749
2750 Which means that the bb_has_eh_pred test in df_bb_refs_collect
2751 will *always* fail, because no edges can have been added to the
2752 block yet. Which of course means we don't add the right
2753 artificial refs, which means we fail df_verify (much) later.
2754
2755 Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
2756 that we also shouldn't grab data from the new blocks those new
2757 insns are in either. In this way one can create the block, link
2758 it up properly, and have everything Just Work later, when deferred
2759 insns are processed.
2760
2761 In the meantime, we have no other option but to throw away all
2762 of the DF data and recompute it all. */
2763 if (fun->eh->lp_array)
2764 {
2765 df_finish_pass (true);
2766 df_scan_alloc (NULL);
2767 df_scan_blocks ();
2768 /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
2769 data. We blindly generated all of them when creating the new
2770 landing pad. Delete those assignments we don't use. */
2771 df_set_flags (DF_LR_RUN_DCE);
2772 df_analyze ();
2773 }
2774
2775 return 0;
2776 }
2777
2778 } // anon namespace
2779
2780 rtl_opt_pass *
2781 make_pass_partition_blocks (gcc::context *ctxt)
2782 {
2783 return new pass_partition_blocks (ctxt);
2784 }