double-int.c (lshift_double, [...]): Remove SHIFT_COUNT_TRUNCATED handling.
[gcc.git] / gcc / sched-rgn.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This pass implements list scheduling within basic blocks. It is
23 run twice: (1) after flow analysis, but before register allocation,
24 and (2) after register allocation.
25
26 The first run performs interblock scheduling, moving insns between
27 different blocks in the same "region", and the second runs only
28 basic block scheduling.
29
30 Interblock motions performed are useful motions and speculative
31 motions, including speculative loads. Motions requiring code
32 duplication are not supported. The identification of motion type
33 and the check for validity of speculative motions requires
34 construction and analysis of the function's control flow graph.
35
36 The main entry point for this pass is schedule_insns(), called for
37 each function. The work of the scheduler is organized in three
38 levels: (1) function level: insns are subject to splitting,
39 control-flow-graph is constructed, regions are computed (after
40 reload, each region is of one block), (2) region level: control
41 flow graph attributes required for interblock scheduling are
42 computed (dominators, reachability, etc.), data dependences and
43 priorities are computed, and (3) block level: insns in the block
44 are actually scheduled. */
45 \f
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "diagnostic-core.h"
51 #include "rtl.h"
52 #include "tm_p.h"
53 #include "hard-reg-set.h"
54 #include "regs.h"
55 #include "function.h"
56 #include "flags.h"
57 #include "insn-config.h"
58 #include "insn-attr.h"
59 #include "except.h"
60 #include "recog.h"
61 #include "params.h"
62 #include "sched-int.h"
63 #include "sel-sched.h"
64 #include "target.h"
65 #include "tree-pass.h"
66 #include "dbgcnt.h"
67
68 #ifdef INSN_SCHEDULING
69
70 /* Some accessor macros for h_i_d members only used within this file. */
71 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
72 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
73
74 /* nr_inter/spec counts interblock/speculative motion for the function. */
75 static int nr_inter, nr_spec;
76
77 static int is_cfg_nonregular (void);
78
79 /* Number of regions in the procedure. */
80 int nr_regions = 0;
81
82 /* Table of region descriptions. */
83 region *rgn_table = NULL;
84
85 /* Array of lists of regions' blocks. */
86 int *rgn_bb_table = NULL;
87
88 /* Topological order of blocks in the region (if b2 is reachable from
89 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
90 always referred to by either block or b, while its topological
91 order name (in the region) is referred to by bb. */
92 int *block_to_bb = NULL;
93
94 /* The number of the region containing a block. */
95 int *containing_rgn = NULL;
96
97 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
98 Currently we can get a ebb only through splitting of currently
99 scheduling block, therefore, we don't need ebb_head array for every region,
100 hence, its sufficient to hold it for current one only. */
101 int *ebb_head = NULL;
102
103 /* The minimum probability of reaching a source block so that it will be
104 considered for speculative scheduling. */
105 static int min_spec_prob;
106
107 static void find_single_block_region (bool);
108 static void find_rgns (void);
109 static bool too_large (int, int *, int *);
110
111 /* Blocks of the current region being scheduled. */
112 int current_nr_blocks;
113 int current_blocks;
114
115 /* A speculative motion requires checking live information on the path
116 from 'source' to 'target'. The split blocks are those to be checked.
117 After a speculative motion, live information should be modified in
118 the 'update' blocks.
119
120 Lists of split and update blocks for each candidate of the current
121 target are in array bblst_table. */
122 static basic_block *bblst_table;
123 static int bblst_size, bblst_last;
124
125 /* Arrays that hold the DFA state at the end of a basic block, to re-use
126 as the initial state at the start of successor blocks. The BB_STATE
127 array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer
128 into BB_STATE for basic block I. FIXME: This should be a vec. */
129 static char *bb_state_array = NULL;
130 static state_t *bb_state = NULL;
131
132 /* Target info declarations.
133
134 The block currently being scheduled is referred to as the "target" block,
135 while other blocks in the region from which insns can be moved to the
136 target are called "source" blocks. The candidate structure holds info
137 about such sources: are they valid? Speculative? Etc. */
138 typedef struct
139 {
140 basic_block *first_member;
141 int nr_members;
142 }
143 bblst;
144
145 typedef struct
146 {
147 char is_valid;
148 char is_speculative;
149 int src_prob;
150 bblst split_bbs;
151 bblst update_bbs;
152 }
153 candidate;
154
155 static candidate *candidate_table;
156 #define IS_VALID(src) (candidate_table[src].is_valid)
157 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
158 #define IS_SPECULATIVE_INSN(INSN) \
159 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
160 #define SRC_PROB(src) ( candidate_table[src].src_prob )
161
162 /* The bb being currently scheduled. */
163 int target_bb;
164
165 /* List of edges. */
166 typedef struct
167 {
168 edge *first_member;
169 int nr_members;
170 }
171 edgelst;
172
173 static edge *edgelst_table;
174 static int edgelst_last;
175
176 static void extract_edgelst (sbitmap, edgelst *);
177
178 /* Target info functions. */
179 static void split_edges (int, int, edgelst *);
180 static void compute_trg_info (int);
181 void debug_candidate (int);
182 void debug_candidates (int);
183
184 /* Dominators array: dom[i] contains the sbitmap of dominators of
185 bb i in the region. */
186 static sbitmap *dom;
187
188 /* bb 0 is the only region entry. */
189 #define IS_RGN_ENTRY(bb) (!bb)
190
191 /* Is bb_src dominated by bb_trg. */
192 #define IS_DOMINATED(bb_src, bb_trg) \
193 ( bitmap_bit_p (dom[bb_src], bb_trg) )
194
195 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
196 the probability of bb i relative to the region entry. */
197 static int *prob;
198
199 /* Bit-set of edges, where bit i stands for edge i. */
200 typedef sbitmap edgeset;
201
202 /* Number of edges in the region. */
203 static int rgn_nr_edges;
204
205 /* Array of size rgn_nr_edges. */
206 static edge *rgn_edges;
207
208 /* Mapping from each edge in the graph to its number in the rgn. */
209 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
210 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
211
212 /* The split edges of a source bb is different for each target
213 bb. In order to compute this efficiently, the 'potential-split edges'
214 are computed for each bb prior to scheduling a region. This is actually
215 the split edges of each bb relative to the region entry.
216
217 pot_split[bb] is the set of potential split edges of bb. */
218 static edgeset *pot_split;
219
220 /* For every bb, a set of its ancestor edges. */
221 static edgeset *ancestor_edges;
222
223 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
224
225 /* Speculative scheduling functions. */
226 static int check_live_1 (int, rtx);
227 static void update_live_1 (int, rtx);
228 static int is_pfree (rtx, int, int);
229 static int find_conditional_protection (rtx, int);
230 static int is_conditionally_protected (rtx, int, int);
231 static int is_prisky (rtx, int, int);
232 static int is_exception_free (rtx, int, int);
233
234 static bool sets_likely_spilled (rtx);
235 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
236 static void add_branch_dependences (rtx, rtx);
237 static void compute_block_dependences (int);
238
239 static void schedule_region (int);
240 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
241 static void propagate_deps (int, struct deps_desc *);
242 static void free_pending_lists (void);
243
244 /* Functions for construction of the control flow graph. */
245
246 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
247
248 We decide not to build the control flow graph if there is possibly more
249 than one entry to the function, if computed branches exist, if we
250 have nonlocal gotos, or if we have an unreachable loop. */
251
252 static int
253 is_cfg_nonregular (void)
254 {
255 basic_block b;
256 rtx insn;
257
258 /* If we have a label that could be the target of a nonlocal goto, then
259 the cfg is not well structured. */
260 if (nonlocal_goto_handler_labels)
261 return 1;
262
263 /* If we have any forced labels, then the cfg is not well structured. */
264 if (forced_labels)
265 return 1;
266
267 /* If we have exception handlers, then we consider the cfg not well
268 structured. ?!? We should be able to handle this now that we
269 compute an accurate cfg for EH. */
270 if (current_function_has_exception_handlers ())
271 return 1;
272
273 /* If we have insns which refer to labels as non-jumped-to operands,
274 then we consider the cfg not well structured. */
275 FOR_EACH_BB (b)
276 FOR_BB_INSNS (b, insn)
277 {
278 rtx note, next, set, dest;
279
280 /* If this function has a computed jump, then we consider the cfg
281 not well structured. */
282 if (JUMP_P (insn) && computed_jump_p (insn))
283 return 1;
284
285 if (!INSN_P (insn))
286 continue;
287
288 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
289 if (note == NULL_RTX)
290 continue;
291
292 /* For that label not to be seen as a referred-to label, this
293 must be a single-set which is feeding a jump *only*. This
294 could be a conditional jump with the label split off for
295 machine-specific reasons or a casesi/tablejump. */
296 next = next_nonnote_insn (insn);
297 if (next == NULL_RTX
298 || !JUMP_P (next)
299 || (JUMP_LABEL (next) != XEXP (note, 0)
300 && find_reg_note (next, REG_LABEL_TARGET,
301 XEXP (note, 0)) == NULL_RTX)
302 || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
303 return 1;
304
305 set = single_set (insn);
306 if (set == NULL_RTX)
307 return 1;
308
309 dest = SET_DEST (set);
310 if (!REG_P (dest) || !dead_or_set_p (next, dest))
311 return 1;
312 }
313
314 /* Unreachable loops with more than one basic block are detected
315 during the DFS traversal in find_rgns.
316
317 Unreachable loops with a single block are detected here. This
318 test is redundant with the one in find_rgns, but it's much
319 cheaper to go ahead and catch the trivial case here. */
320 FOR_EACH_BB (b)
321 {
322 if (EDGE_COUNT (b->preds) == 0
323 || (single_pred_p (b)
324 && single_pred (b) == b))
325 return 1;
326 }
327
328 /* All the tests passed. Consider the cfg well structured. */
329 return 0;
330 }
331
332 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
333
334 static void
335 extract_edgelst (sbitmap set, edgelst *el)
336 {
337 unsigned int i = 0;
338 sbitmap_iterator sbi;
339
340 /* edgelst table space is reused in each call to extract_edgelst. */
341 edgelst_last = 0;
342
343 el->first_member = &edgelst_table[edgelst_last];
344 el->nr_members = 0;
345
346 /* Iterate over each word in the bitset. */
347 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi)
348 {
349 edgelst_table[edgelst_last++] = rgn_edges[i];
350 el->nr_members++;
351 }
352 }
353
354 /* Functions for the construction of regions. */
355
356 /* Print the regions, for debugging purposes. Callable from debugger. */
357
358 DEBUG_FUNCTION void
359 debug_regions (void)
360 {
361 int rgn, bb;
362
363 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
364 for (rgn = 0; rgn < nr_regions; rgn++)
365 {
366 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
367 rgn_table[rgn].rgn_nr_blocks);
368 fprintf (sched_dump, ";;\tbb/block: ");
369
370 /* We don't have ebb_head initialized yet, so we can't use
371 BB_TO_BLOCK (). */
372 current_blocks = RGN_BLOCKS (rgn);
373
374 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
375 fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
376
377 fprintf (sched_dump, "\n\n");
378 }
379 }
380
381 /* Print the region's basic blocks. */
382
383 DEBUG_FUNCTION void
384 debug_region (int rgn)
385 {
386 int bb;
387
388 fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
389 fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
390 rgn_table[rgn].rgn_nr_blocks);
391 fprintf (stderr, ";;\tbb/block: ");
392
393 /* We don't have ebb_head initialized yet, so we can't use
394 BB_TO_BLOCK (). */
395 current_blocks = RGN_BLOCKS (rgn);
396
397 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
398 fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
399
400 fprintf (stderr, "\n\n");
401
402 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
403 {
404 dump_bb (stderr, BASIC_BLOCK (rgn_bb_table[current_blocks + bb]),
405 0, TDF_SLIM | TDF_BLOCKS);
406 fprintf (stderr, "\n");
407 }
408
409 fprintf (stderr, "\n");
410
411 }
412
413 /* True when a bb with index BB_INDEX contained in region RGN. */
414 static bool
415 bb_in_region_p (int bb_index, int rgn)
416 {
417 int i;
418
419 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
420 if (rgn_bb_table[current_blocks + i] == bb_index)
421 return true;
422
423 return false;
424 }
425
426 /* Dump region RGN to file F using dot syntax. */
427 void
428 dump_region_dot (FILE *f, int rgn)
429 {
430 int i;
431
432 fprintf (f, "digraph Region_%d {\n", rgn);
433
434 /* We don't have ebb_head initialized yet, so we can't use
435 BB_TO_BLOCK (). */
436 current_blocks = RGN_BLOCKS (rgn);
437
438 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
439 {
440 edge e;
441 edge_iterator ei;
442 int src_bb_num = rgn_bb_table[current_blocks + i];
443 basic_block bb = BASIC_BLOCK (src_bb_num);
444
445 FOR_EACH_EDGE (e, ei, bb->succs)
446 if (bb_in_region_p (e->dest->index, rgn))
447 fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
448 }
449 fprintf (f, "}\n");
450 }
451
452 /* The same, but first open a file specified by FNAME. */
453 void
454 dump_region_dot_file (const char *fname, int rgn)
455 {
456 FILE *f = fopen (fname, "wt");
457 dump_region_dot (f, rgn);
458 fclose (f);
459 }
460
461 /* Build a single block region for each basic block in the function.
462 This allows for using the same code for interblock and basic block
463 scheduling. */
464
465 static void
466 find_single_block_region (bool ebbs_p)
467 {
468 basic_block bb, ebb_start;
469 int i = 0;
470
471 nr_regions = 0;
472
473 if (ebbs_p) {
474 int probability_cutoff;
475 if (profile_info && flag_branch_probabilities)
476 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
477 else
478 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
479 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
480
481 FOR_EACH_BB (ebb_start)
482 {
483 RGN_NR_BLOCKS (nr_regions) = 0;
484 RGN_BLOCKS (nr_regions) = i;
485 RGN_DONT_CALC_DEPS (nr_regions) = 0;
486 RGN_HAS_REAL_EBB (nr_regions) = 0;
487
488 for (bb = ebb_start; ; bb = bb->next_bb)
489 {
490 edge e;
491
492 rgn_bb_table[i] = bb->index;
493 RGN_NR_BLOCKS (nr_regions)++;
494 CONTAINING_RGN (bb->index) = nr_regions;
495 BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
496 i++;
497
498 if (bb->next_bb == EXIT_BLOCK_PTR
499 || LABEL_P (BB_HEAD (bb->next_bb)))
500 break;
501
502 e = find_fallthru_edge (bb->succs);
503 if (! e)
504 break;
505 if (e->probability <= probability_cutoff)
506 break;
507 }
508
509 ebb_start = bb;
510 nr_regions++;
511 }
512 }
513 else
514 FOR_EACH_BB (bb)
515 {
516 rgn_bb_table[nr_regions] = bb->index;
517 RGN_NR_BLOCKS (nr_regions) = 1;
518 RGN_BLOCKS (nr_regions) = nr_regions;
519 RGN_DONT_CALC_DEPS (nr_regions) = 0;
520 RGN_HAS_REAL_EBB (nr_regions) = 0;
521
522 CONTAINING_RGN (bb->index) = nr_regions;
523 BLOCK_TO_BB (bb->index) = 0;
524 nr_regions++;
525 }
526 }
527
528 /* Estimate number of the insns in the BB. */
529 static int
530 rgn_estimate_number_of_insns (basic_block bb)
531 {
532 int count;
533
534 count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
535
536 if (MAY_HAVE_DEBUG_INSNS)
537 {
538 rtx insn;
539
540 FOR_BB_INSNS (bb, insn)
541 if (DEBUG_INSN_P (insn))
542 count--;
543 }
544
545 return count;
546 }
547
548 /* Update number of blocks and the estimate for number of insns
549 in the region. Return true if the region is "too large" for interblock
550 scheduling (compile time considerations). */
551
552 static bool
553 too_large (int block, int *num_bbs, int *num_insns)
554 {
555 (*num_bbs)++;
556 (*num_insns) += (common_sched_info->estimate_number_of_insns
557 (BASIC_BLOCK (block)));
558
559 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
560 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
561 }
562
563 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
564 is still an inner loop. Put in max_hdr[blk] the header of the most inner
565 loop containing blk. */
566 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
567 { \
568 if (max_hdr[blk] == -1) \
569 max_hdr[blk] = hdr; \
570 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
571 bitmap_clear_bit (inner, hdr); \
572 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
573 { \
574 bitmap_clear_bit (inner,max_hdr[blk]); \
575 max_hdr[blk] = hdr; \
576 } \
577 }
578
579 /* Find regions for interblock scheduling.
580
581 A region for scheduling can be:
582
583 * A loop-free procedure, or
584
585 * A reducible inner loop, or
586
587 * A basic block not contained in any other region.
588
589 ?!? In theory we could build other regions based on extended basic
590 blocks or reverse extended basic blocks. Is it worth the trouble?
591
592 Loop blocks that form a region are put into the region's block list
593 in topological order.
594
595 This procedure stores its results into the following global (ick) variables
596
597 * rgn_nr
598 * rgn_table
599 * rgn_bb_table
600 * block_to_bb
601 * containing region
602
603 We use dominator relationships to avoid making regions out of non-reducible
604 loops.
605
606 This procedure needs to be converted to work on pred/succ lists instead
607 of edge tables. That would simplify it somewhat. */
608
609 static void
610 haifa_find_rgns (void)
611 {
612 int *max_hdr, *dfs_nr, *degree;
613 char no_loops = 1;
614 int node, child, loop_head, i, head, tail;
615 int count = 0, sp, idx = 0;
616 edge_iterator current_edge;
617 edge_iterator *stack;
618 int num_bbs, num_insns, unreachable;
619 int too_large_failure;
620 basic_block bb;
621
622 /* Note if a block is a natural loop header. */
623 sbitmap header;
624
625 /* Note if a block is a natural inner loop header. */
626 sbitmap inner;
627
628 /* Note if a block is in the block queue. */
629 sbitmap in_queue;
630
631 /* Note if a block is in the block queue. */
632 sbitmap in_stack;
633
634 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
635 and a mapping from block to its loop header (if the block is contained
636 in a loop, else -1).
637
638 Store results in HEADER, INNER, and MAX_HDR respectively, these will
639 be used as inputs to the second traversal.
640
641 STACK, SP and DFS_NR are only used during the first traversal. */
642
643 /* Allocate and initialize variables for the first traversal. */
644 max_hdr = XNEWVEC (int, last_basic_block);
645 dfs_nr = XCNEWVEC (int, last_basic_block);
646 stack = XNEWVEC (edge_iterator, n_edges);
647
648 inner = sbitmap_alloc (last_basic_block);
649 bitmap_ones (inner);
650
651 header = sbitmap_alloc (last_basic_block);
652 bitmap_clear (header);
653
654 in_queue = sbitmap_alloc (last_basic_block);
655 bitmap_clear (in_queue);
656
657 in_stack = sbitmap_alloc (last_basic_block);
658 bitmap_clear (in_stack);
659
660 for (i = 0; i < last_basic_block; i++)
661 max_hdr[i] = -1;
662
663 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
664 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
665
666 /* DFS traversal to find inner loops in the cfg. */
667
668 current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
669 sp = -1;
670
671 while (1)
672 {
673 if (EDGE_PASSED (current_edge))
674 {
675 /* We have reached a leaf node or a node that was already
676 processed. Pop edges off the stack until we find
677 an edge that has not yet been processed. */
678 while (sp >= 0 && EDGE_PASSED (current_edge))
679 {
680 /* Pop entry off the stack. */
681 current_edge = stack[sp--];
682 node = ei_edge (current_edge)->src->index;
683 gcc_assert (node != ENTRY_BLOCK);
684 child = ei_edge (current_edge)->dest->index;
685 gcc_assert (child != EXIT_BLOCK);
686 bitmap_clear_bit (in_stack, child);
687 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
688 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
689 ei_next (&current_edge);
690 }
691
692 /* See if have finished the DFS tree traversal. */
693 if (sp < 0 && EDGE_PASSED (current_edge))
694 break;
695
696 /* Nope, continue the traversal with the popped node. */
697 continue;
698 }
699
700 /* Process a node. */
701 node = ei_edge (current_edge)->src->index;
702 gcc_assert (node != ENTRY_BLOCK);
703 bitmap_set_bit (in_stack, node);
704 dfs_nr[node] = ++count;
705
706 /* We don't traverse to the exit block. */
707 child = ei_edge (current_edge)->dest->index;
708 if (child == EXIT_BLOCK)
709 {
710 SET_EDGE_PASSED (current_edge);
711 ei_next (&current_edge);
712 continue;
713 }
714
715 /* If the successor is in the stack, then we've found a loop.
716 Mark the loop, if it is not a natural loop, then it will
717 be rejected during the second traversal. */
718 if (bitmap_bit_p (in_stack, child))
719 {
720 no_loops = 0;
721 bitmap_set_bit (header, child);
722 UPDATE_LOOP_RELATIONS (node, child);
723 SET_EDGE_PASSED (current_edge);
724 ei_next (&current_edge);
725 continue;
726 }
727
728 /* If the child was already visited, then there is no need to visit
729 it again. Just update the loop relationships and restart
730 with a new edge. */
731 if (dfs_nr[child])
732 {
733 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
734 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
735 SET_EDGE_PASSED (current_edge);
736 ei_next (&current_edge);
737 continue;
738 }
739
740 /* Push an entry on the stack and continue DFS traversal. */
741 stack[++sp] = current_edge;
742 SET_EDGE_PASSED (current_edge);
743 current_edge = ei_start (ei_edge (current_edge)->dest->succs);
744 }
745
746 /* Reset ->aux field used by EDGE_PASSED. */
747 FOR_ALL_BB (bb)
748 {
749 edge_iterator ei;
750 edge e;
751 FOR_EACH_EDGE (e, ei, bb->succs)
752 e->aux = NULL;
753 }
754
755
756 /* Another check for unreachable blocks. The earlier test in
757 is_cfg_nonregular only finds unreachable blocks that do not
758 form a loop.
759
760 The DFS traversal will mark every block that is reachable from
761 the entry node by placing a nonzero value in dfs_nr. Thus if
762 dfs_nr is zero for any block, then it must be unreachable. */
763 unreachable = 0;
764 FOR_EACH_BB (bb)
765 if (dfs_nr[bb->index] == 0)
766 {
767 unreachable = 1;
768 break;
769 }
770
771 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
772 to hold degree counts. */
773 degree = dfs_nr;
774
775 FOR_EACH_BB (bb)
776 degree[bb->index] = EDGE_COUNT (bb->preds);
777
778 /* Do not perform region scheduling if there are any unreachable
779 blocks. */
780 if (!unreachable)
781 {
782 int *queue, *degree1 = NULL;
783 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
784 there basic blocks, which are forced to be region heads.
785 This is done to try to assemble few smaller regions
786 from a too_large region. */
787 sbitmap extended_rgn_header = NULL;
788 bool extend_regions_p;
789
790 if (no_loops)
791 bitmap_set_bit (header, 0);
792
793 /* Second traversal:find reducible inner loops and topologically sort
794 block of each region. */
795
796 queue = XNEWVEC (int, n_basic_blocks);
797
798 extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
799 if (extend_regions_p)
800 {
801 degree1 = XNEWVEC (int, last_basic_block);
802 extended_rgn_header = sbitmap_alloc (last_basic_block);
803 bitmap_clear (extended_rgn_header);
804 }
805
806 /* Find blocks which are inner loop headers. We still have non-reducible
807 loops to consider at this point. */
808 FOR_EACH_BB (bb)
809 {
810 if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
811 {
812 edge e;
813 edge_iterator ei;
814 basic_block jbb;
815
816 /* Now check that the loop is reducible. We do this separate
817 from finding inner loops so that we do not find a reducible
818 loop which contains an inner non-reducible loop.
819
820 A simple way to find reducible/natural loops is to verify
821 that each block in the loop is dominated by the loop
822 header.
823
824 If there exists a block that is not dominated by the loop
825 header, then the block is reachable from outside the loop
826 and thus the loop is not a natural loop. */
827 FOR_EACH_BB (jbb)
828 {
829 /* First identify blocks in the loop, except for the loop
830 entry block. */
831 if (bb->index == max_hdr[jbb->index] && bb != jbb)
832 {
833 /* Now verify that the block is dominated by the loop
834 header. */
835 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
836 break;
837 }
838 }
839
840 /* If we exited the loop early, then I is the header of
841 a non-reducible loop and we should quit processing it
842 now. */
843 if (jbb != EXIT_BLOCK_PTR)
844 continue;
845
846 /* I is a header of an inner loop, or block 0 in a subroutine
847 with no loops at all. */
848 head = tail = -1;
849 too_large_failure = 0;
850 loop_head = max_hdr[bb->index];
851
852 if (extend_regions_p)
853 /* We save degree in case when we meet a too_large region
854 and cancel it. We need a correct degree later when
855 calling extend_rgns. */
856 memcpy (degree1, degree, last_basic_block * sizeof (int));
857
858 /* Decrease degree of all I's successors for topological
859 ordering. */
860 FOR_EACH_EDGE (e, ei, bb->succs)
861 if (e->dest != EXIT_BLOCK_PTR)
862 --degree[e->dest->index];
863
864 /* Estimate # insns, and count # blocks in the region. */
865 num_bbs = 1;
866 num_insns = common_sched_info->estimate_number_of_insns (bb);
867
868 /* Find all loop latches (blocks with back edges to the loop
869 header) or all the leaf blocks in the cfg has no loops.
870
871 Place those blocks into the queue. */
872 if (no_loops)
873 {
874 FOR_EACH_BB (jbb)
875 /* Leaf nodes have only a single successor which must
876 be EXIT_BLOCK. */
877 if (single_succ_p (jbb)
878 && single_succ (jbb) == EXIT_BLOCK_PTR)
879 {
880 queue[++tail] = jbb->index;
881 bitmap_set_bit (in_queue, jbb->index);
882
883 if (too_large (jbb->index, &num_bbs, &num_insns))
884 {
885 too_large_failure = 1;
886 break;
887 }
888 }
889 }
890 else
891 {
892 edge e;
893
894 FOR_EACH_EDGE (e, ei, bb->preds)
895 {
896 if (e->src == ENTRY_BLOCK_PTR)
897 continue;
898
899 node = e->src->index;
900
901 if (max_hdr[node] == loop_head && node != bb->index)
902 {
903 /* This is a loop latch. */
904 queue[++tail] = node;
905 bitmap_set_bit (in_queue, node);
906
907 if (too_large (node, &num_bbs, &num_insns))
908 {
909 too_large_failure = 1;
910 break;
911 }
912 }
913 }
914 }
915
916 /* Now add all the blocks in the loop to the queue.
917
918 We know the loop is a natural loop; however the algorithm
919 above will not always mark certain blocks as being in the
920 loop. Consider:
921 node children
922 a b,c
923 b c
924 c a,d
925 d b
926
927 The algorithm in the DFS traversal may not mark B & D as part
928 of the loop (i.e. they will not have max_hdr set to A).
929
930 We know they can not be loop latches (else they would have
931 had max_hdr set since they'd have a backedge to a dominator
932 block). So we don't need them on the initial queue.
933
934 We know they are part of the loop because they are dominated
935 by the loop header and can be reached by a backwards walk of
936 the edges starting with nodes on the initial queue.
937
938 It is safe and desirable to include those nodes in the
939 loop/scheduling region. To do so we would need to decrease
940 the degree of a node if it is the target of a backedge
941 within the loop itself as the node is placed in the queue.
942
943 We do not do this because I'm not sure that the actual
944 scheduling code will properly handle this case. ?!? */
945
946 while (head < tail && !too_large_failure)
947 {
948 edge e;
949 child = queue[++head];
950
951 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds)
952 {
953 node = e->src->index;
954
955 /* See discussion above about nodes not marked as in
956 this loop during the initial DFS traversal. */
957 if (e->src == ENTRY_BLOCK_PTR
958 || max_hdr[node] != loop_head)
959 {
960 tail = -1;
961 break;
962 }
963 else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
964 {
965 queue[++tail] = node;
966 bitmap_set_bit (in_queue, node);
967
968 if (too_large (node, &num_bbs, &num_insns))
969 {
970 too_large_failure = 1;
971 break;
972 }
973 }
974 }
975 }
976
977 if (tail >= 0 && !too_large_failure)
978 {
979 /* Place the loop header into list of region blocks. */
980 degree[bb->index] = -1;
981 rgn_bb_table[idx] = bb->index;
982 RGN_NR_BLOCKS (nr_regions) = num_bbs;
983 RGN_BLOCKS (nr_regions) = idx++;
984 RGN_DONT_CALC_DEPS (nr_regions) = 0;
985 RGN_HAS_REAL_EBB (nr_regions) = 0;
986 CONTAINING_RGN (bb->index) = nr_regions;
987 BLOCK_TO_BB (bb->index) = count = 0;
988
989 /* Remove blocks from queue[] when their in degree
990 becomes zero. Repeat until no blocks are left on the
991 list. This produces a topological list of blocks in
992 the region. */
993 while (tail >= 0)
994 {
995 if (head < 0)
996 head = tail;
997 child = queue[head];
998 if (degree[child] == 0)
999 {
1000 edge e;
1001
1002 degree[child] = -1;
1003 rgn_bb_table[idx++] = child;
1004 BLOCK_TO_BB (child) = ++count;
1005 CONTAINING_RGN (child) = nr_regions;
1006 queue[head] = queue[tail--];
1007
1008 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
1009 if (e->dest != EXIT_BLOCK_PTR)
1010 --degree[e->dest->index];
1011 }
1012 else
1013 --head;
1014 }
1015 ++nr_regions;
1016 }
1017 else if (extend_regions_p)
1018 {
1019 /* Restore DEGREE. */
1020 int *t = degree;
1021
1022 degree = degree1;
1023 degree1 = t;
1024
1025 /* And force successors of BB to be region heads.
1026 This may provide several smaller regions instead
1027 of one too_large region. */
1028 FOR_EACH_EDGE (e, ei, bb->succs)
1029 if (e->dest != EXIT_BLOCK_PTR)
1030 bitmap_set_bit (extended_rgn_header, e->dest->index);
1031 }
1032 }
1033 }
1034 free (queue);
1035
1036 if (extend_regions_p)
1037 {
1038 free (degree1);
1039
1040 bitmap_ior (header, header, extended_rgn_header);
1041 sbitmap_free (extended_rgn_header);
1042
1043 extend_rgns (degree, &idx, header, max_hdr);
1044 }
1045 }
1046
1047 /* Any block that did not end up in a region is placed into a region
1048 by itself. */
1049 FOR_EACH_BB (bb)
1050 if (degree[bb->index] >= 0)
1051 {
1052 rgn_bb_table[idx] = bb->index;
1053 RGN_NR_BLOCKS (nr_regions) = 1;
1054 RGN_BLOCKS (nr_regions) = idx++;
1055 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1056 RGN_HAS_REAL_EBB (nr_regions) = 0;
1057 CONTAINING_RGN (bb->index) = nr_regions++;
1058 BLOCK_TO_BB (bb->index) = 0;
1059 }
1060
1061 free (max_hdr);
1062 free (degree);
1063 free (stack);
1064 sbitmap_free (header);
1065 sbitmap_free (inner);
1066 sbitmap_free (in_queue);
1067 sbitmap_free (in_stack);
1068 }
1069
1070
1071 /* Wrapper function.
1072 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1073 regions. Otherwise just call find_rgns_haifa. */
1074 static void
1075 find_rgns (void)
1076 {
1077 if (sel_sched_p () && flag_sel_sched_pipelining)
1078 sel_find_rgns ();
1079 else
1080 haifa_find_rgns ();
1081 }
1082
1083 static int gather_region_statistics (int **);
1084 static void print_region_statistics (int *, int, int *, int);
1085
1086 /* Calculate the histogram that shows the number of regions having the
1087 given number of basic blocks, and store it in the RSP array. Return
1088 the size of this array. */
1089 static int
1090 gather_region_statistics (int **rsp)
1091 {
1092 int i, *a = 0, a_sz = 0;
1093
1094 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1095 for (i = 0; i < nr_regions; i++)
1096 {
1097 int nr_blocks = RGN_NR_BLOCKS (i);
1098
1099 gcc_assert (nr_blocks >= 1);
1100
1101 if (nr_blocks > a_sz)
1102 {
1103 a = XRESIZEVEC (int, a, nr_blocks);
1104 do
1105 a[a_sz++] = 0;
1106 while (a_sz != nr_blocks);
1107 }
1108
1109 a[nr_blocks - 1]++;
1110 }
1111
1112 *rsp = a;
1113 return a_sz;
1114 }
1115
1116 /* Print regions statistics. S1 and S2 denote the data before and after
1117 calling extend_rgns, respectively. */
1118 static void
1119 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1120 {
1121 int i;
1122
1123 /* We iterate until s2_sz because extend_rgns does not decrease
1124 the maximal region size. */
1125 for (i = 1; i < s2_sz; i++)
1126 {
1127 int n1, n2;
1128
1129 n2 = s2[i];
1130
1131 if (n2 == 0)
1132 continue;
1133
1134 if (i >= s1_sz)
1135 n1 = 0;
1136 else
1137 n1 = s1[i];
1138
1139 fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1140 "was %d + %d more\n", i + 1, n1, n2 - n1);
1141 }
1142 }
1143
1144 /* Extend regions.
1145 DEGREE - Array of incoming edge count, considering only
1146 the edges, that don't have their sources in formed regions yet.
1147 IDXP - pointer to the next available index in rgn_bb_table.
1148 HEADER - set of all region heads.
1149 LOOP_HDR - mapping from block to the containing loop
1150 (two blocks can reside within one region if they have
1151 the same loop header). */
1152 void
1153 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1154 {
1155 int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1156 int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
1157
1158 max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1159
1160 max_hdr = XNEWVEC (int, last_basic_block);
1161
1162 order = XNEWVEC (int, last_basic_block);
1163 post_order_compute (order, false, false);
1164
1165 for (i = nblocks - 1; i >= 0; i--)
1166 {
1167 int bbn = order[i];
1168 if (degree[bbn] >= 0)
1169 {
1170 max_hdr[bbn] = bbn;
1171 rescan = 1;
1172 }
1173 else
1174 /* This block already was processed in find_rgns. */
1175 max_hdr[bbn] = -1;
1176 }
1177
1178 /* The idea is to topologically walk through CFG in top-down order.
1179 During the traversal, if all the predecessors of a node are
1180 marked to be in the same region (they all have the same max_hdr),
1181 then current node is also marked to be a part of that region.
1182 Otherwise the node starts its own region.
1183 CFG should be traversed until no further changes are made. On each
1184 iteration the set of the region heads is extended (the set of those
1185 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1186 set of all basic blocks, thus the algorithm is guaranteed to
1187 terminate. */
1188
1189 while (rescan && iter < max_iter)
1190 {
1191 rescan = 0;
1192
1193 for (i = nblocks - 1; i >= 0; i--)
1194 {
1195 edge e;
1196 edge_iterator ei;
1197 int bbn = order[i];
1198
1199 if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
1200 {
1201 int hdr = -1;
1202
1203 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->preds)
1204 {
1205 int predn = e->src->index;
1206
1207 if (predn != ENTRY_BLOCK
1208 /* If pred wasn't processed in find_rgns. */
1209 && max_hdr[predn] != -1
1210 /* And pred and bb reside in the same loop.
1211 (Or out of any loop). */
1212 && loop_hdr[bbn] == loop_hdr[predn])
1213 {
1214 if (hdr == -1)
1215 /* Then bb extends the containing region of pred. */
1216 hdr = max_hdr[predn];
1217 else if (hdr != max_hdr[predn])
1218 /* Too bad, there are at least two predecessors
1219 that reside in different regions. Thus, BB should
1220 begin its own region. */
1221 {
1222 hdr = bbn;
1223 break;
1224 }
1225 }
1226 else
1227 /* BB starts its own region. */
1228 {
1229 hdr = bbn;
1230 break;
1231 }
1232 }
1233
1234 if (hdr == bbn)
1235 {
1236 /* If BB start its own region,
1237 update set of headers with BB. */
1238 bitmap_set_bit (header, bbn);
1239 rescan = 1;
1240 }
1241 else
1242 gcc_assert (hdr != -1);
1243
1244 max_hdr[bbn] = hdr;
1245 }
1246 }
1247
1248 iter++;
1249 }
1250
1251 /* Statistics were gathered on the SPEC2000 package of tests with
1252 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1253
1254 Statistics for SPECint:
1255 1 iteration : 1751 cases (38.7%)
1256 2 iterations: 2770 cases (61.3%)
1257 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1258 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1259 (We don't count single block regions here).
1260
1261 Statistics for SPECfp:
1262 1 iteration : 621 cases (35.9%)
1263 2 iterations: 1110 cases (64.1%)
1264 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1265 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1266 (We don't count single block regions here).
1267
1268 By default we do at most 2 iterations.
1269 This can be overridden with max-sched-extend-regions-iters parameter:
1270 0 - disable region extension,
1271 N > 0 - do at most N iterations. */
1272
1273 if (sched_verbose && iter != 0)
1274 fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1275 rescan ? "... failed" : "");
1276
1277 if (!rescan && iter != 0)
1278 {
1279 int *s1 = NULL, s1_sz = 0;
1280
1281 /* Save the old statistics for later printout. */
1282 if (sched_verbose >= 6)
1283 s1_sz = gather_region_statistics (&s1);
1284
1285 /* We have succeeded. Now assemble the regions. */
1286 for (i = nblocks - 1; i >= 0; i--)
1287 {
1288 int bbn = order[i];
1289
1290 if (max_hdr[bbn] == bbn)
1291 /* BBN is a region head. */
1292 {
1293 edge e;
1294 edge_iterator ei;
1295 int num_bbs = 0, j, num_insns = 0, large;
1296
1297 large = too_large (bbn, &num_bbs, &num_insns);
1298
1299 degree[bbn] = -1;
1300 rgn_bb_table[idx] = bbn;
1301 RGN_BLOCKS (nr_regions) = idx++;
1302 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1303 RGN_HAS_REAL_EBB (nr_regions) = 0;
1304 CONTAINING_RGN (bbn) = nr_regions;
1305 BLOCK_TO_BB (bbn) = 0;
1306
1307 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
1308 if (e->dest != EXIT_BLOCK_PTR)
1309 degree[e->dest->index]--;
1310
1311 if (!large)
1312 /* Here we check whether the region is too_large. */
1313 for (j = i - 1; j >= 0; j--)
1314 {
1315 int succn = order[j];
1316 if (max_hdr[succn] == bbn)
1317 {
1318 if ((large = too_large (succn, &num_bbs, &num_insns)))
1319 break;
1320 }
1321 }
1322
1323 if (large)
1324 /* If the region is too_large, then wrap every block of
1325 the region into single block region.
1326 Here we wrap region head only. Other blocks are
1327 processed in the below cycle. */
1328 {
1329 RGN_NR_BLOCKS (nr_regions) = 1;
1330 nr_regions++;
1331 }
1332
1333 num_bbs = 1;
1334
1335 for (j = i - 1; j >= 0; j--)
1336 {
1337 int succn = order[j];
1338
1339 if (max_hdr[succn] == bbn)
1340 /* This cycle iterates over all basic blocks, that
1341 are supposed to be in the region with head BBN,
1342 and wraps them into that region (or in single
1343 block region). */
1344 {
1345 gcc_assert (degree[succn] == 0);
1346
1347 degree[succn] = -1;
1348 rgn_bb_table[idx] = succn;
1349 BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1350 CONTAINING_RGN (succn) = nr_regions;
1351
1352 if (large)
1353 /* Wrap SUCCN into single block region. */
1354 {
1355 RGN_BLOCKS (nr_regions) = idx;
1356 RGN_NR_BLOCKS (nr_regions) = 1;
1357 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1358 RGN_HAS_REAL_EBB (nr_regions) = 0;
1359 nr_regions++;
1360 }
1361
1362 idx++;
1363
1364 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
1365 if (e->dest != EXIT_BLOCK_PTR)
1366 degree[e->dest->index]--;
1367 }
1368 }
1369
1370 if (!large)
1371 {
1372 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1373 nr_regions++;
1374 }
1375 }
1376 }
1377
1378 if (sched_verbose >= 6)
1379 {
1380 int *s2, s2_sz;
1381
1382 /* Get the new statistics and print the comparison with the
1383 one before calling this function. */
1384 s2_sz = gather_region_statistics (&s2);
1385 print_region_statistics (s1, s1_sz, s2, s2_sz);
1386 free (s1);
1387 free (s2);
1388 }
1389 }
1390
1391 free (order);
1392 free (max_hdr);
1393
1394 *idxp = idx;
1395 }
1396
1397 /* Functions for regions scheduling information. */
1398
1399 /* Compute dominators, probability, and potential-split-edges of bb.
1400 Assume that these values were already computed for bb's predecessors. */
1401
1402 static void
1403 compute_dom_prob_ps (int bb)
1404 {
1405 edge_iterator in_ei;
1406 edge in_edge;
1407
1408 /* We shouldn't have any real ebbs yet. */
1409 gcc_assert (ebb_head [bb] == bb + current_blocks);
1410
1411 if (IS_RGN_ENTRY (bb))
1412 {
1413 bitmap_set_bit (dom[bb], 0);
1414 prob[bb] = REG_BR_PROB_BASE;
1415 return;
1416 }
1417
1418 prob[bb] = 0;
1419
1420 /* Initialize dom[bb] to '111..1'. */
1421 bitmap_ones (dom[bb]);
1422
1423 FOR_EACH_EDGE (in_edge, in_ei, BASIC_BLOCK (BB_TO_BLOCK (bb))->preds)
1424 {
1425 int pred_bb;
1426 edge out_edge;
1427 edge_iterator out_ei;
1428
1429 if (in_edge->src == ENTRY_BLOCK_PTR)
1430 continue;
1431
1432 pred_bb = BLOCK_TO_BB (in_edge->src->index);
1433 bitmap_and (dom[bb], dom[bb], dom[pred_bb]);
1434 bitmap_ior (ancestor_edges[bb],
1435 ancestor_edges[bb], ancestor_edges[pred_bb]);
1436
1437 bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1438
1439 bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1440
1441 FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1442 bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
1443
1444 prob[bb] += combine_probabilities (prob[pred_bb], in_edge->probability);
1445 // The rounding divide in combine_probabilities can result in an extra
1446 // probability increment propagating along 50-50 edges. Eventually when
1447 // the edges re-merge, the accumulated probability can go slightly above
1448 // REG_BR_PROB_BASE.
1449 if (prob[bb] > REG_BR_PROB_BASE)
1450 prob[bb] = REG_BR_PROB_BASE;
1451 }
1452
1453 bitmap_set_bit (dom[bb], bb);
1454 bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1455
1456 if (sched_verbose >= 2)
1457 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1458 (100 * prob[bb]) / REG_BR_PROB_BASE);
1459 }
1460
1461 /* Functions for target info. */
1462
1463 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1464 Note that bb_trg dominates bb_src. */
1465
1466 static void
1467 split_edges (int bb_src, int bb_trg, edgelst *bl)
1468 {
1469 sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
1470 bitmap_copy (src, pot_split[bb_src]);
1471
1472 bitmap_and_compl (src, src, pot_split[bb_trg]);
1473 extract_edgelst (src, bl);
1474 sbitmap_free (src);
1475 }
1476
1477 /* Find the valid candidate-source-blocks for the target block TRG, compute
1478 their probability, and check if they are speculative or not.
1479 For speculative sources, compute their update-blocks and split-blocks. */
1480
1481 static void
1482 compute_trg_info (int trg)
1483 {
1484 candidate *sp;
1485 edgelst el = { NULL, 0 };
1486 int i, j, k, update_idx;
1487 basic_block block;
1488 sbitmap visited;
1489 edge_iterator ei;
1490 edge e;
1491
1492 candidate_table = XNEWVEC (candidate, current_nr_blocks);
1493
1494 bblst_last = 0;
1495 /* bblst_table holds split blocks and update blocks for each block after
1496 the current one in the region. split blocks and update blocks are
1497 the TO blocks of region edges, so there can be at most rgn_nr_edges
1498 of them. */
1499 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1500 bblst_table = XNEWVEC (basic_block, bblst_size);
1501
1502 edgelst_last = 0;
1503 edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1504
1505 /* Define some of the fields for the target bb as well. */
1506 sp = candidate_table + trg;
1507 sp->is_valid = 1;
1508 sp->is_speculative = 0;
1509 sp->src_prob = REG_BR_PROB_BASE;
1510
1511 visited = sbitmap_alloc (last_basic_block);
1512
1513 for (i = trg + 1; i < current_nr_blocks; i++)
1514 {
1515 sp = candidate_table + i;
1516
1517 sp->is_valid = IS_DOMINATED (i, trg);
1518 if (sp->is_valid)
1519 {
1520 int tf = prob[trg], cf = prob[i];
1521
1522 /* In CFGs with low probability edges TF can possibly be zero. */
1523 sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0);
1524 sp->is_valid = (sp->src_prob >= min_spec_prob);
1525 }
1526
1527 if (sp->is_valid)
1528 {
1529 split_edges (i, trg, &el);
1530 sp->is_speculative = (el.nr_members) ? 1 : 0;
1531 if (sp->is_speculative && !flag_schedule_speculative)
1532 sp->is_valid = 0;
1533 }
1534
1535 if (sp->is_valid)
1536 {
1537 /* Compute split blocks and store them in bblst_table.
1538 The TO block of every split edge is a split block. */
1539 sp->split_bbs.first_member = &bblst_table[bblst_last];
1540 sp->split_bbs.nr_members = el.nr_members;
1541 for (j = 0; j < el.nr_members; bblst_last++, j++)
1542 bblst_table[bblst_last] = el.first_member[j]->dest;
1543 sp->update_bbs.first_member = &bblst_table[bblst_last];
1544
1545 /* Compute update blocks and store them in bblst_table.
1546 For every split edge, look at the FROM block, and check
1547 all out edges. For each out edge that is not a split edge,
1548 add the TO block to the update block list. This list can end
1549 up with a lot of duplicates. We need to weed them out to avoid
1550 overrunning the end of the bblst_table. */
1551
1552 update_idx = 0;
1553 bitmap_clear (visited);
1554 for (j = 0; j < el.nr_members; j++)
1555 {
1556 block = el.first_member[j]->src;
1557 FOR_EACH_EDGE (e, ei, block->succs)
1558 {
1559 if (!bitmap_bit_p (visited, e->dest->index))
1560 {
1561 for (k = 0; k < el.nr_members; k++)
1562 if (e == el.first_member[k])
1563 break;
1564
1565 if (k >= el.nr_members)
1566 {
1567 bblst_table[bblst_last++] = e->dest;
1568 bitmap_set_bit (visited, e->dest->index);
1569 update_idx++;
1570 }
1571 }
1572 }
1573 }
1574 sp->update_bbs.nr_members = update_idx;
1575
1576 /* Make sure we didn't overrun the end of bblst_table. */
1577 gcc_assert (bblst_last <= bblst_size);
1578 }
1579 else
1580 {
1581 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1582
1583 sp->is_speculative = 0;
1584 sp->src_prob = 0;
1585 }
1586 }
1587
1588 sbitmap_free (visited);
1589 }
1590
1591 /* Free the computed target info. */
1592 static void
1593 free_trg_info (void)
1594 {
1595 free (candidate_table);
1596 free (bblst_table);
1597 free (edgelst_table);
1598 }
1599
1600 /* Print candidates info, for debugging purposes. Callable from debugger. */
1601
1602 DEBUG_FUNCTION void
1603 debug_candidate (int i)
1604 {
1605 if (!candidate_table[i].is_valid)
1606 return;
1607
1608 if (candidate_table[i].is_speculative)
1609 {
1610 int j;
1611 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1612
1613 fprintf (sched_dump, "split path: ");
1614 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1615 {
1616 int b = candidate_table[i].split_bbs.first_member[j]->index;
1617
1618 fprintf (sched_dump, " %d ", b);
1619 }
1620 fprintf (sched_dump, "\n");
1621
1622 fprintf (sched_dump, "update path: ");
1623 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1624 {
1625 int b = candidate_table[i].update_bbs.first_member[j]->index;
1626
1627 fprintf (sched_dump, " %d ", b);
1628 }
1629 fprintf (sched_dump, "\n");
1630 }
1631 else
1632 {
1633 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1634 }
1635 }
1636
1637 /* Print candidates info, for debugging purposes. Callable from debugger. */
1638
1639 DEBUG_FUNCTION void
1640 debug_candidates (int trg)
1641 {
1642 int i;
1643
1644 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1645 BB_TO_BLOCK (trg), trg);
1646 for (i = trg + 1; i < current_nr_blocks; i++)
1647 debug_candidate (i);
1648 }
1649
1650 /* Functions for speculative scheduling. */
1651
1652 static bitmap_head not_in_df;
1653
1654 /* Return 0 if x is a set of a register alive in the beginning of one
1655 of the split-blocks of src, otherwise return 1. */
1656
1657 static int
1658 check_live_1 (int src, rtx x)
1659 {
1660 int i;
1661 int regno;
1662 rtx reg = SET_DEST (x);
1663
1664 if (reg == 0)
1665 return 1;
1666
1667 while (GET_CODE (reg) == SUBREG
1668 || GET_CODE (reg) == ZERO_EXTRACT
1669 || GET_CODE (reg) == STRICT_LOW_PART)
1670 reg = XEXP (reg, 0);
1671
1672 if (GET_CODE (reg) == PARALLEL)
1673 {
1674 int i;
1675
1676 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1677 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1678 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1679 return 1;
1680
1681 return 0;
1682 }
1683
1684 if (!REG_P (reg))
1685 return 1;
1686
1687 regno = REGNO (reg);
1688
1689 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1690 {
1691 /* Global registers are assumed live. */
1692 return 0;
1693 }
1694 else
1695 {
1696 if (regno < FIRST_PSEUDO_REGISTER)
1697 {
1698 /* Check for hard registers. */
1699 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1700 while (--j >= 0)
1701 {
1702 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1703 {
1704 basic_block b = candidate_table[src].split_bbs.first_member[i];
1705 int t = bitmap_bit_p (&not_in_df, b->index);
1706
1707 /* We can have split blocks, that were recently generated.
1708 Such blocks are always outside current region. */
1709 gcc_assert (!t || (CONTAINING_RGN (b->index)
1710 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1711
1712 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1713 return 0;
1714 }
1715 }
1716 }
1717 else
1718 {
1719 /* Check for pseudo registers. */
1720 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1721 {
1722 basic_block b = candidate_table[src].split_bbs.first_member[i];
1723 int t = bitmap_bit_p (&not_in_df, b->index);
1724
1725 gcc_assert (!t || (CONTAINING_RGN (b->index)
1726 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1727
1728 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1729 return 0;
1730 }
1731 }
1732 }
1733
1734 return 1;
1735 }
1736
1737 /* If x is a set of a register R, mark that R is alive in the beginning
1738 of every update-block of src. */
1739
1740 static void
1741 update_live_1 (int src, rtx x)
1742 {
1743 int i;
1744 int regno;
1745 rtx reg = SET_DEST (x);
1746
1747 if (reg == 0)
1748 return;
1749
1750 while (GET_CODE (reg) == SUBREG
1751 || GET_CODE (reg) == ZERO_EXTRACT
1752 || GET_CODE (reg) == STRICT_LOW_PART)
1753 reg = XEXP (reg, 0);
1754
1755 if (GET_CODE (reg) == PARALLEL)
1756 {
1757 int i;
1758
1759 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1760 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1761 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1762
1763 return;
1764 }
1765
1766 if (!REG_P (reg))
1767 return;
1768
1769 /* Global registers are always live, so the code below does not apply
1770 to them. */
1771
1772 regno = REGNO (reg);
1773
1774 if (! HARD_REGISTER_NUM_P (regno)
1775 || !global_regs[regno])
1776 {
1777 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1778 {
1779 basic_block b = candidate_table[src].update_bbs.first_member[i];
1780
1781 if (HARD_REGISTER_NUM_P (regno))
1782 bitmap_set_range (df_get_live_in (b), regno,
1783 hard_regno_nregs[regno][GET_MODE (reg)]);
1784 else
1785 bitmap_set_bit (df_get_live_in (b), regno);
1786 }
1787 }
1788 }
1789
1790 /* Return 1 if insn can be speculatively moved from block src to trg,
1791 otherwise return 0. Called before first insertion of insn to
1792 ready-list or before the scheduling. */
1793
1794 static int
1795 check_live (rtx insn, int src)
1796 {
1797 /* Find the registers set by instruction. */
1798 if (GET_CODE (PATTERN (insn)) == SET
1799 || GET_CODE (PATTERN (insn)) == CLOBBER)
1800 return check_live_1 (src, PATTERN (insn));
1801 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1802 {
1803 int j;
1804 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1805 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1806 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1807 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1808 return 0;
1809
1810 return 1;
1811 }
1812
1813 return 1;
1814 }
1815
1816 /* Update the live registers info after insn was moved speculatively from
1817 block src to trg. */
1818
1819 static void
1820 update_live (rtx insn, int src)
1821 {
1822 /* Find the registers set by instruction. */
1823 if (GET_CODE (PATTERN (insn)) == SET
1824 || GET_CODE (PATTERN (insn)) == CLOBBER)
1825 update_live_1 (src, PATTERN (insn));
1826 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1827 {
1828 int j;
1829 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1830 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1831 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1832 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1833 }
1834 }
1835
1836 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1837 #define IS_REACHABLE(bb_from, bb_to) \
1838 (bb_from == bb_to \
1839 || IS_RGN_ENTRY (bb_from) \
1840 || (bitmap_bit_p (ancestor_edges[bb_to], \
1841 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
1842
1843 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1844
1845 static void
1846 set_spec_fed (rtx load_insn)
1847 {
1848 sd_iterator_def sd_it;
1849 dep_t dep;
1850
1851 FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1852 if (DEP_TYPE (dep) == REG_DEP_TRUE)
1853 FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1854 }
1855
1856 /* On the path from the insn to load_insn_bb, find a conditional
1857 branch depending on insn, that guards the speculative load. */
1858
1859 static int
1860 find_conditional_protection (rtx insn, int load_insn_bb)
1861 {
1862 sd_iterator_def sd_it;
1863 dep_t dep;
1864
1865 /* Iterate through DEF-USE forward dependences. */
1866 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1867 {
1868 rtx next = DEP_CON (dep);
1869
1870 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1871 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1872 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1873 && load_insn_bb != INSN_BB (next)
1874 && DEP_TYPE (dep) == REG_DEP_TRUE
1875 && (JUMP_P (next)
1876 || find_conditional_protection (next, load_insn_bb)))
1877 return 1;
1878 }
1879 return 0;
1880 } /* find_conditional_protection */
1881
1882 /* Returns 1 if the same insn1 that participates in the computation
1883 of load_insn's address is feeding a conditional branch that is
1884 guarding on load_insn. This is true if we find two DEF-USE
1885 chains:
1886 insn1 -> ... -> conditional-branch
1887 insn1 -> ... -> load_insn,
1888 and if a flow path exists:
1889 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1890 and if insn1 is on the path
1891 region-entry -> ... -> bb_trg -> ... load_insn.
1892
1893 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1894 Locate the branch by following INSN_FORW_DEPS from insn1. */
1895
1896 static int
1897 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1898 {
1899 sd_iterator_def sd_it;
1900 dep_t dep;
1901
1902 FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1903 {
1904 rtx insn1 = DEP_PRO (dep);
1905
1906 /* Must be a DEF-USE dependence upon non-branch. */
1907 if (DEP_TYPE (dep) != REG_DEP_TRUE
1908 || JUMP_P (insn1))
1909 continue;
1910
1911 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1912 if (INSN_BB (insn1) == bb_src
1913 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1914 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1915 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1916 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1917 continue;
1918
1919 /* Now search for the conditional-branch. */
1920 if (find_conditional_protection (insn1, bb_src))
1921 return 1;
1922
1923 /* Recursive step: search another insn1, "above" current insn1. */
1924 return is_conditionally_protected (insn1, bb_src, bb_trg);
1925 }
1926
1927 /* The chain does not exist. */
1928 return 0;
1929 } /* is_conditionally_protected */
1930
1931 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1932 load_insn can move speculatively from bb_src to bb_trg. All the
1933 following must hold:
1934
1935 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1936 (2) load_insn and load1 have a def-use dependence upon
1937 the same insn 'insn1'.
1938 (3) either load2 is in bb_trg, or:
1939 - there's only one split-block, and
1940 - load1 is on the escape path, and
1941
1942 From all these we can conclude that the two loads access memory
1943 addresses that differ at most by a constant, and hence if moving
1944 load_insn would cause an exception, it would have been caused by
1945 load2 anyhow. */
1946
1947 static int
1948 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1949 {
1950 sd_iterator_def back_sd_it;
1951 dep_t back_dep;
1952 candidate *candp = candidate_table + bb_src;
1953
1954 if (candp->split_bbs.nr_members != 1)
1955 /* Must have exactly one escape block. */
1956 return 0;
1957
1958 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1959 {
1960 rtx insn1 = DEP_PRO (back_dep);
1961
1962 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1963 /* Found a DEF-USE dependence (insn1, load_insn). */
1964 {
1965 sd_iterator_def fore_sd_it;
1966 dep_t fore_dep;
1967
1968 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1969 {
1970 rtx insn2 = DEP_CON (fore_dep);
1971
1972 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1973 {
1974 /* Found a DEF-USE dependence (insn1, insn2). */
1975 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1976 /* insn2 not guaranteed to be a 1 base reg load. */
1977 continue;
1978
1979 if (INSN_BB (insn2) == bb_trg)
1980 /* insn2 is the similar load, in the target block. */
1981 return 1;
1982
1983 if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
1984 /* insn2 is a similar load, in a split-block. */
1985 return 1;
1986 }
1987 }
1988 }
1989 }
1990
1991 /* Couldn't find a similar load. */
1992 return 0;
1993 } /* is_pfree */
1994
1995 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1996 a load moved speculatively, or if load_insn is protected by
1997 a compare on load_insn's address). */
1998
1999 static int
2000 is_prisky (rtx load_insn, int bb_src, int bb_trg)
2001 {
2002 if (FED_BY_SPEC_LOAD (load_insn))
2003 return 1;
2004
2005 if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
2006 /* Dependence may 'hide' out of the region. */
2007 return 1;
2008
2009 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2010 return 1;
2011
2012 return 0;
2013 }
2014
2015 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2016 Return 1 if insn is exception-free (and the motion is valid)
2017 and 0 otherwise. */
2018
2019 static int
2020 is_exception_free (rtx insn, int bb_src, int bb_trg)
2021 {
2022 int insn_class = haifa_classify_insn (insn);
2023
2024 /* Handle non-load insns. */
2025 switch (insn_class)
2026 {
2027 case TRAP_FREE:
2028 return 1;
2029 case TRAP_RISKY:
2030 return 0;
2031 default:;
2032 }
2033
2034 /* Handle loads. */
2035 if (!flag_schedule_speculative_load)
2036 return 0;
2037 IS_LOAD_INSN (insn) = 1;
2038 switch (insn_class)
2039 {
2040 case IFREE:
2041 return (1);
2042 case IRISKY:
2043 return 0;
2044 case PFREE_CANDIDATE:
2045 if (is_pfree (insn, bb_src, bb_trg))
2046 return 1;
2047 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2048 case PRISKY_CANDIDATE:
2049 if (!flag_schedule_speculative_load_dangerous
2050 || is_prisky (insn, bb_src, bb_trg))
2051 return 0;
2052 break;
2053 default:;
2054 }
2055
2056 return flag_schedule_speculative_load_dangerous;
2057 }
2058 \f
2059 /* The number of insns from the current block scheduled so far. */
2060 static int sched_target_n_insns;
2061 /* The number of insns from the current block to be scheduled in total. */
2062 static int target_n_insns;
2063 /* The number of insns from the entire region scheduled so far. */
2064 static int sched_n_insns;
2065
2066 /* Implementations of the sched_info functions for region scheduling. */
2067 static void init_ready_list (void);
2068 static int can_schedule_ready_p (rtx);
2069 static void begin_schedule_ready (rtx);
2070 static ds_t new_ready (rtx, ds_t);
2071 static int schedule_more_p (void);
2072 static const char *rgn_print_insn (const_rtx, int);
2073 static int rgn_rank (rtx, rtx);
2074 static void compute_jump_reg_dependencies (rtx, regset);
2075
2076 /* Functions for speculative scheduling. */
2077 static void rgn_add_remove_insn (rtx, int);
2078 static void rgn_add_block (basic_block, basic_block);
2079 static void rgn_fix_recovery_cfg (int, int, int);
2080 static basic_block advance_target_bb (basic_block, rtx);
2081
2082 /* Return nonzero if there are more insns that should be scheduled. */
2083
2084 static int
2085 schedule_more_p (void)
2086 {
2087 return sched_target_n_insns < target_n_insns;
2088 }
2089
2090 /* Add all insns that are initially ready to the ready list READY. Called
2091 once before scheduling a set of insns. */
2092
2093 static void
2094 init_ready_list (void)
2095 {
2096 rtx prev_head = current_sched_info->prev_head;
2097 rtx next_tail = current_sched_info->next_tail;
2098 int bb_src;
2099 rtx insn;
2100
2101 target_n_insns = 0;
2102 sched_target_n_insns = 0;
2103 sched_n_insns = 0;
2104
2105 /* Print debugging information. */
2106 if (sched_verbose >= 5)
2107 debug_rgn_dependencies (target_bb);
2108
2109 /* Prepare current target block info. */
2110 if (current_nr_blocks > 1)
2111 compute_trg_info (target_bb);
2112
2113 /* Initialize ready list with all 'ready' insns in target block.
2114 Count number of insns in the target block being scheduled. */
2115 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2116 {
2117 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2118 TODO_SPEC (insn) = HARD_DEP;
2119 try_ready (insn);
2120 target_n_insns++;
2121
2122 gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2123 }
2124
2125 /* Add to ready list all 'ready' insns in valid source blocks.
2126 For speculative insns, check-live, exception-free, and
2127 issue-delay. */
2128 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2129 if (IS_VALID (bb_src))
2130 {
2131 rtx src_head;
2132 rtx src_next_tail;
2133 rtx tail, head;
2134
2135 get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2136 &head, &tail);
2137 src_next_tail = NEXT_INSN (tail);
2138 src_head = head;
2139
2140 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2141 if (INSN_P (insn))
2142 {
2143 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2144 TODO_SPEC (insn) = HARD_DEP;
2145 try_ready (insn);
2146 }
2147 }
2148 }
2149
2150 /* Called after taking INSN from the ready list. Returns nonzero if this
2151 insn can be scheduled, nonzero if we should silently discard it. */
2152
2153 static int
2154 can_schedule_ready_p (rtx insn)
2155 {
2156 /* An interblock motion? */
2157 if (INSN_BB (insn) != target_bb
2158 && IS_SPECULATIVE_INSN (insn)
2159 && !check_live (insn, INSN_BB (insn)))
2160 return 0;
2161 else
2162 return 1;
2163 }
2164
2165 /* Updates counter and other information. Split from can_schedule_ready_p ()
2166 because when we schedule insn speculatively then insn passed to
2167 can_schedule_ready_p () differs from the one passed to
2168 begin_schedule_ready (). */
2169 static void
2170 begin_schedule_ready (rtx insn)
2171 {
2172 /* An interblock motion? */
2173 if (INSN_BB (insn) != target_bb)
2174 {
2175 if (IS_SPECULATIVE_INSN (insn))
2176 {
2177 gcc_assert (check_live (insn, INSN_BB (insn)));
2178
2179 update_live (insn, INSN_BB (insn));
2180
2181 /* For speculative load, mark insns fed by it. */
2182 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2183 set_spec_fed (insn);
2184
2185 nr_spec++;
2186 }
2187 nr_inter++;
2188 }
2189 else
2190 {
2191 /* In block motion. */
2192 sched_target_n_insns++;
2193 }
2194 sched_n_insns++;
2195 }
2196
2197 /* Called after INSN has all its hard dependencies resolved and the speculation
2198 of type TS is enough to overcome them all.
2199 Return nonzero if it should be moved to the ready list or the queue, or zero
2200 if we should silently discard it. */
2201 static ds_t
2202 new_ready (rtx next, ds_t ts)
2203 {
2204 if (INSN_BB (next) != target_bb)
2205 {
2206 int not_ex_free = 0;
2207
2208 /* For speculative insns, before inserting to ready/queue,
2209 check live, exception-free, and issue-delay. */
2210 if (!IS_VALID (INSN_BB (next))
2211 || CANT_MOVE (next)
2212 || (IS_SPECULATIVE_INSN (next)
2213 && ((recog_memoized (next) >= 0
2214 && min_insn_conflict_delay (curr_state, next, next)
2215 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2216 || IS_SPECULATION_CHECK_P (next)
2217 || !check_live (next, INSN_BB (next))
2218 || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2219 target_bb)))))
2220 {
2221 if (not_ex_free
2222 /* We are here because is_exception_free () == false.
2223 But we possibly can handle that with control speculation. */
2224 && sched_deps_info->generate_spec_deps
2225 && spec_info->mask & BEGIN_CONTROL)
2226 {
2227 ds_t new_ds;
2228
2229 /* Add control speculation to NEXT's dependency type. */
2230 new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2231
2232 /* Check if NEXT can be speculated with new dependency type. */
2233 if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2234 /* Here we got new control-speculative instruction. */
2235 ts = new_ds;
2236 else
2237 /* NEXT isn't ready yet. */
2238 ts = DEP_POSTPONED;
2239 }
2240 else
2241 /* NEXT isn't ready yet. */
2242 ts = DEP_POSTPONED;
2243 }
2244 }
2245
2246 return ts;
2247 }
2248
2249 /* Return a string that contains the insn uid and optionally anything else
2250 necessary to identify this insn in an output. It's valid to use a
2251 static buffer for this. The ALIGNED parameter should cause the string
2252 to be formatted so that multiple output lines will line up nicely. */
2253
2254 static const char *
2255 rgn_print_insn (const_rtx insn, int aligned)
2256 {
2257 static char tmp[80];
2258
2259 if (aligned)
2260 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2261 else
2262 {
2263 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2264 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2265 else
2266 sprintf (tmp, "%d", INSN_UID (insn));
2267 }
2268 return tmp;
2269 }
2270
2271 /* Compare priority of two insns. Return a positive number if the second
2272 insn is to be preferred for scheduling, and a negative one if the first
2273 is to be preferred. Zero if they are equally good. */
2274
2275 static int
2276 rgn_rank (rtx insn1, rtx insn2)
2277 {
2278 /* Some comparison make sense in interblock scheduling only. */
2279 if (INSN_BB (insn1) != INSN_BB (insn2))
2280 {
2281 int spec_val, prob_val;
2282
2283 /* Prefer an inblock motion on an interblock motion. */
2284 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2285 return 1;
2286 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2287 return -1;
2288
2289 /* Prefer a useful motion on a speculative one. */
2290 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2291 if (spec_val)
2292 return spec_val;
2293
2294 /* Prefer a more probable (speculative) insn. */
2295 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2296 if (prob_val)
2297 return prob_val;
2298 }
2299 return 0;
2300 }
2301
2302 /* NEXT is an instruction that depends on INSN (a backward dependence);
2303 return nonzero if we should include this dependence in priority
2304 calculations. */
2305
2306 int
2307 contributes_to_priority (rtx next, rtx insn)
2308 {
2309 /* NEXT and INSN reside in one ebb. */
2310 return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2311 }
2312
2313 /* INSN is a JUMP_INSN. Store the set of registers that must be
2314 considered as used by this jump in USED. */
2315
2316 static void
2317 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2318 regset used ATTRIBUTE_UNUSED)
2319 {
2320 /* Nothing to do here, since we postprocess jumps in
2321 add_branch_dependences. */
2322 }
2323
2324 /* This variable holds common_sched_info hooks and data relevant to
2325 the interblock scheduler. */
2326 static struct common_sched_info_def rgn_common_sched_info;
2327
2328
2329 /* This holds data for the dependence analysis relevant to
2330 the interblock scheduler. */
2331 static struct sched_deps_info_def rgn_sched_deps_info;
2332
2333 /* This holds constant data used for initializing the above structure
2334 for the Haifa scheduler. */
2335 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2336 {
2337 compute_jump_reg_dependencies,
2338 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2339 0, 0, 0
2340 };
2341
2342 /* Same as above, but for the selective scheduler. */
2343 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2344 {
2345 compute_jump_reg_dependencies,
2346 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2347 0, 0, 0
2348 };
2349
2350 /* Return true if scheduling INSN will trigger finish of scheduling
2351 current block. */
2352 static bool
2353 rgn_insn_finishes_block_p (rtx insn)
2354 {
2355 if (INSN_BB (insn) == target_bb
2356 && sched_target_n_insns + 1 == target_n_insns)
2357 /* INSN is the last not-scheduled instruction in the current block. */
2358 return true;
2359
2360 return false;
2361 }
2362
2363 /* Used in schedule_insns to initialize current_sched_info for scheduling
2364 regions (or single basic blocks). */
2365
2366 static const struct haifa_sched_info rgn_const_sched_info =
2367 {
2368 init_ready_list,
2369 can_schedule_ready_p,
2370 schedule_more_p,
2371 new_ready,
2372 rgn_rank,
2373 rgn_print_insn,
2374 contributes_to_priority,
2375 rgn_insn_finishes_block_p,
2376
2377 NULL, NULL,
2378 NULL, NULL,
2379 0, 0,
2380
2381 rgn_add_remove_insn,
2382 begin_schedule_ready,
2383 NULL,
2384 advance_target_bb,
2385 NULL, NULL,
2386 SCHED_RGN
2387 };
2388
2389 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2390 for the interblock scheduler frontend. */
2391 static struct haifa_sched_info rgn_sched_info;
2392
2393 /* Returns maximum priority that an insn was assigned to. */
2394
2395 int
2396 get_rgn_sched_max_insns_priority (void)
2397 {
2398 return rgn_sched_info.sched_max_insns_priority;
2399 }
2400
2401 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2402
2403 static bool
2404 sets_likely_spilled (rtx pat)
2405 {
2406 bool ret = false;
2407 note_stores (pat, sets_likely_spilled_1, &ret);
2408 return ret;
2409 }
2410
2411 static void
2412 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2413 {
2414 bool *ret = (bool *) data;
2415
2416 if (GET_CODE (pat) == SET
2417 && REG_P (x)
2418 && HARD_REGISTER_P (x)
2419 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2420 *ret = true;
2421 }
2422
2423 /* A bitmap to note insns that participate in any dependency. Used in
2424 add_branch_dependences. */
2425 static sbitmap insn_referenced;
2426
2427 /* Add dependences so that branches are scheduled to run last in their
2428 block. */
2429 static void
2430 add_branch_dependences (rtx head, rtx tail)
2431 {
2432 rtx insn, last;
2433
2434 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2435 that can throw exceptions, force them to remain in order at the end of
2436 the block by adding dependencies and giving the last a high priority.
2437 There may be notes present, and prev_head may also be a note.
2438
2439 Branches must obviously remain at the end. Calls should remain at the
2440 end since moving them results in worse register allocation. Uses remain
2441 at the end to ensure proper register allocation.
2442
2443 cc0 setters remain at the end because they can't be moved away from
2444 their cc0 user.
2445
2446 Predecessors of SCHED_GROUP_P instructions at the end remain at the end.
2447
2448 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2449
2450 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2451 values) are not moved before reload because we can wind up with register
2452 allocation failures. */
2453
2454 while (tail != head && DEBUG_INSN_P (tail))
2455 tail = PREV_INSN (tail);
2456
2457 insn = tail;
2458 last = 0;
2459 while (CALL_P (insn)
2460 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
2461 || (NONJUMP_INSN_P (insn)
2462 && (GET_CODE (PATTERN (insn)) == USE
2463 || GET_CODE (PATTERN (insn)) == CLOBBER
2464 || can_throw_internal (insn)
2465 #ifdef HAVE_cc0
2466 || sets_cc0_p (PATTERN (insn))
2467 #endif
2468 || (!reload_completed
2469 && sets_likely_spilled (PATTERN (insn)))))
2470 || NOTE_P (insn)
2471 || (last != 0 && SCHED_GROUP_P (last)))
2472 {
2473 if (!NOTE_P (insn))
2474 {
2475 if (last != 0
2476 && sd_find_dep_between (insn, last, false) == NULL)
2477 {
2478 if (! sched_insns_conditions_mutex_p (last, insn))
2479 add_dependence (last, insn, REG_DEP_ANTI);
2480 bitmap_set_bit (insn_referenced, INSN_LUID (insn));
2481 }
2482
2483 CANT_MOVE (insn) = 1;
2484
2485 last = insn;
2486 }
2487
2488 /* Don't overrun the bounds of the basic block. */
2489 if (insn == head)
2490 break;
2491
2492 do
2493 insn = PREV_INSN (insn);
2494 while (insn != head && DEBUG_INSN_P (insn));
2495 }
2496
2497 /* Make sure these insns are scheduled last in their block. */
2498 insn = last;
2499 if (insn != 0)
2500 while (insn != head)
2501 {
2502 insn = prev_nonnote_insn (insn);
2503
2504 if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
2505 || DEBUG_INSN_P (insn))
2506 continue;
2507
2508 if (! sched_insns_conditions_mutex_p (last, insn))
2509 add_dependence (last, insn, REG_DEP_ANTI);
2510 }
2511
2512 if (!targetm.have_conditional_execution ())
2513 return;
2514
2515 /* Finally, if the block ends in a jump, and we are doing intra-block
2516 scheduling, make sure that the branch depends on any COND_EXEC insns
2517 inside the block to avoid moving the COND_EXECs past the branch insn.
2518
2519 We only have to do this after reload, because (1) before reload there
2520 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2521 scheduler after reload.
2522
2523 FIXME: We could in some cases move COND_EXEC insns past the branch if
2524 this scheduler would be a little smarter. Consider this code:
2525
2526 T = [addr]
2527 C ? addr += 4
2528 !C ? X += 12
2529 C ? T += 1
2530 C ? jump foo
2531
2532 On a target with a one cycle stall on a memory access the optimal
2533 sequence would be:
2534
2535 T = [addr]
2536 C ? addr += 4
2537 C ? T += 1
2538 C ? jump foo
2539 !C ? X += 12
2540
2541 We don't want to put the 'X += 12' before the branch because it just
2542 wastes a cycle of execution time when the branch is taken.
2543
2544 Note that in the example "!C" will always be true. That is another
2545 possible improvement for handling COND_EXECs in this scheduler: it
2546 could remove always-true predicates. */
2547
2548 if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail)))
2549 return;
2550
2551 insn = tail;
2552 while (insn != head)
2553 {
2554 insn = PREV_INSN (insn);
2555
2556 /* Note that we want to add this dependency even when
2557 sched_insns_conditions_mutex_p returns true. The whole point
2558 is that we _want_ this dependency, even if these insns really
2559 are independent. */
2560 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2561 add_dependence (tail, insn, REG_DEP_ANTI);
2562 }
2563 }
2564
2565 /* Data structures for the computation of data dependences in a regions. We
2566 keep one `deps' structure for every basic block. Before analyzing the
2567 data dependences for a bb, its variables are initialized as a function of
2568 the variables of its predecessors. When the analysis for a bb completes,
2569 we save the contents to the corresponding bb_deps[bb] variable. */
2570
2571 static struct deps_desc *bb_deps;
2572
2573 static void
2574 concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
2575 rtx *old_mems_p)
2576 {
2577 rtx new_insns = *old_insns_p;
2578 rtx new_mems = *old_mems_p;
2579
2580 while (copy_insns)
2581 {
2582 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2583 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2584 copy_insns = XEXP (copy_insns, 1);
2585 copy_mems = XEXP (copy_mems, 1);
2586 }
2587
2588 *old_insns_p = new_insns;
2589 *old_mems_p = new_mems;
2590 }
2591
2592 /* Join PRED_DEPS to the SUCC_DEPS. */
2593 void
2594 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2595 {
2596 unsigned reg;
2597 reg_set_iterator rsi;
2598
2599 /* The reg_last lists are inherited by successor. */
2600 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2601 {
2602 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2603 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2604
2605 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2606 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2607 succ_rl->implicit_sets
2608 = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2609 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2610 succ_rl->clobbers);
2611 succ_rl->uses_length += pred_rl->uses_length;
2612 succ_rl->clobbers_length += pred_rl->clobbers_length;
2613 }
2614 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2615
2616 /* Mem read/write lists are inherited by successor. */
2617 concat_insn_mem_list (pred_deps->pending_read_insns,
2618 pred_deps->pending_read_mems,
2619 &succ_deps->pending_read_insns,
2620 &succ_deps->pending_read_mems);
2621 concat_insn_mem_list (pred_deps->pending_write_insns,
2622 pred_deps->pending_write_mems,
2623 &succ_deps->pending_write_insns,
2624 &succ_deps->pending_write_mems);
2625
2626 succ_deps->pending_jump_insns
2627 = concat_INSN_LIST (pred_deps->pending_jump_insns,
2628 succ_deps->pending_jump_insns);
2629 succ_deps->last_pending_memory_flush
2630 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2631 succ_deps->last_pending_memory_flush);
2632
2633 succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2634 succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2635 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2636
2637 /* last_function_call is inherited by successor. */
2638 succ_deps->last_function_call
2639 = concat_INSN_LIST (pred_deps->last_function_call,
2640 succ_deps->last_function_call);
2641
2642 /* last_function_call_may_noreturn is inherited by successor. */
2643 succ_deps->last_function_call_may_noreturn
2644 = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2645 succ_deps->last_function_call_may_noreturn);
2646
2647 /* sched_before_next_call is inherited by successor. */
2648 succ_deps->sched_before_next_call
2649 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2650 succ_deps->sched_before_next_call);
2651 }
2652
2653 /* After computing the dependencies for block BB, propagate the dependencies
2654 found in TMP_DEPS to the successors of the block. */
2655 static void
2656 propagate_deps (int bb, struct deps_desc *pred_deps)
2657 {
2658 basic_block block = BASIC_BLOCK (BB_TO_BLOCK (bb));
2659 edge_iterator ei;
2660 edge e;
2661
2662 /* bb's structures are inherited by its successors. */
2663 FOR_EACH_EDGE (e, ei, block->succs)
2664 {
2665 /* Only bbs "below" bb, in the same region, are interesting. */
2666 if (e->dest == EXIT_BLOCK_PTR
2667 || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2668 || BLOCK_TO_BB (e->dest->index) <= bb)
2669 continue;
2670
2671 deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2672 }
2673
2674 /* These lists should point to the right place, for correct
2675 freeing later. */
2676 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2677 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2678 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2679 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2680 bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
2681
2682 /* Can't allow these to be freed twice. */
2683 pred_deps->pending_read_insns = 0;
2684 pred_deps->pending_read_mems = 0;
2685 pred_deps->pending_write_insns = 0;
2686 pred_deps->pending_write_mems = 0;
2687 pred_deps->pending_jump_insns = 0;
2688 }
2689
2690 /* Compute dependences inside bb. In a multiple blocks region:
2691 (1) a bb is analyzed after its predecessors, and (2) the lists in
2692 effect at the end of bb (after analyzing for bb) are inherited by
2693 bb's successors.
2694
2695 Specifically for reg-reg data dependences, the block insns are
2696 scanned by sched_analyze () top-to-bottom. Three lists are
2697 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2698 reg_last[].implicit_sets for implicit hard register DEFs, and
2699 reg_last[].uses for register USEs.
2700
2701 When analysis is completed for bb, we update for its successors:
2702 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2703 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2704 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2705
2706 The mechanism for computing mem-mem data dependence is very
2707 similar, and the result is interblock dependences in the region. */
2708
2709 static void
2710 compute_block_dependences (int bb)
2711 {
2712 rtx head, tail;
2713 struct deps_desc tmp_deps;
2714
2715 tmp_deps = bb_deps[bb];
2716
2717 /* Do the analysis for this block. */
2718 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2719 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2720
2721 sched_analyze (&tmp_deps, head, tail);
2722
2723 /* Selective scheduling handles control dependencies by itself. */
2724 if (!sel_sched_p ())
2725 add_branch_dependences (head, tail);
2726
2727 if (current_nr_blocks > 1)
2728 propagate_deps (bb, &tmp_deps);
2729
2730 /* Free up the INSN_LISTs. */
2731 free_deps (&tmp_deps);
2732
2733 if (targetm.sched.dependencies_evaluation_hook)
2734 targetm.sched.dependencies_evaluation_hook (head, tail);
2735 }
2736
2737 /* Free dependencies of instructions inside BB. */
2738 static void
2739 free_block_dependencies (int bb)
2740 {
2741 rtx head;
2742 rtx tail;
2743
2744 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2745
2746 if (no_real_insns_p (head, tail))
2747 return;
2748
2749 sched_free_deps (head, tail, true);
2750 }
2751
2752 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2753 them to the unused_*_list variables, so that they can be reused. */
2754
2755 static void
2756 free_pending_lists (void)
2757 {
2758 int bb;
2759
2760 for (bb = 0; bb < current_nr_blocks; bb++)
2761 {
2762 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2763 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2764 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2765 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2766 free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
2767 }
2768 }
2769 \f
2770 /* Print dependences for debugging starting from FROM_BB.
2771 Callable from debugger. */
2772 /* Print dependences for debugging starting from FROM_BB.
2773 Callable from debugger. */
2774 DEBUG_FUNCTION void
2775 debug_rgn_dependencies (int from_bb)
2776 {
2777 int bb;
2778
2779 fprintf (sched_dump,
2780 ";; --------------- forward dependences: ------------ \n");
2781
2782 for (bb = from_bb; bb < current_nr_blocks; bb++)
2783 {
2784 rtx head, tail;
2785
2786 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2787 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2788 BB_TO_BLOCK (bb), bb);
2789
2790 debug_dependencies (head, tail);
2791 }
2792 }
2793
2794 /* Print dependencies information for instructions between HEAD and TAIL.
2795 ??? This function would probably fit best in haifa-sched.c. */
2796 void debug_dependencies (rtx head, rtx tail)
2797 {
2798 rtx insn;
2799 rtx next_tail = NEXT_INSN (tail);
2800
2801 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2802 "insn", "code", "bb", "dep", "prio", "cost",
2803 "reservation");
2804 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2805 "----", "----", "--", "---", "----", "----",
2806 "-----------");
2807
2808 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2809 {
2810 if (! INSN_P (insn))
2811 {
2812 int n;
2813 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2814 if (NOTE_P (insn))
2815 {
2816 n = NOTE_KIND (insn);
2817 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2818 }
2819 else
2820 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2821 continue;
2822 }
2823
2824 fprintf (sched_dump,
2825 ";; %s%5d%6d%6d%6d%6d%6d ",
2826 (SCHED_GROUP_P (insn) ? "+" : " "),
2827 INSN_UID (insn),
2828 INSN_CODE (insn),
2829 BLOCK_NUM (insn),
2830 sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2831 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2832 : INSN_PRIORITY (insn))
2833 : INSN_PRIORITY (insn)),
2834 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2835 : insn_cost (insn))
2836 : insn_cost (insn)));
2837
2838 if (recog_memoized (insn) < 0)
2839 fprintf (sched_dump, "nothing");
2840 else
2841 print_reservation (sched_dump, insn);
2842
2843 fprintf (sched_dump, "\t: ");
2844 {
2845 sd_iterator_def sd_it;
2846 dep_t dep;
2847
2848 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2849 fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)),
2850 DEP_NONREG (dep) ? "n" : "",
2851 DEP_MULTIPLE (dep) ? "m" : "");
2852 }
2853 fprintf (sched_dump, "\n");
2854 }
2855
2856 fprintf (sched_dump, "\n");
2857 }
2858 \f
2859 /* Returns true if all the basic blocks of the current region have
2860 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2861 bool
2862 sched_is_disabled_for_current_region_p (void)
2863 {
2864 int bb;
2865
2866 for (bb = 0; bb < current_nr_blocks; bb++)
2867 if (!(BASIC_BLOCK (BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2868 return false;
2869
2870 return true;
2871 }
2872
2873 /* Free all region dependencies saved in INSN_BACK_DEPS and
2874 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2875 when scheduling, so this function is supposed to be called from
2876 the selective scheduling only. */
2877 void
2878 free_rgn_deps (void)
2879 {
2880 int bb;
2881
2882 for (bb = 0; bb < current_nr_blocks; bb++)
2883 {
2884 rtx head, tail;
2885
2886 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2887 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2888
2889 sched_free_deps (head, tail, false);
2890 }
2891 }
2892
2893 static int rgn_n_insns;
2894
2895 /* Compute insn priority for a current region. */
2896 void
2897 compute_priorities (void)
2898 {
2899 int bb;
2900
2901 current_sched_info->sched_max_insns_priority = 0;
2902 for (bb = 0; bb < current_nr_blocks; bb++)
2903 {
2904 rtx head, tail;
2905
2906 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2907 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2908
2909 if (no_real_insns_p (head, tail))
2910 continue;
2911
2912 rgn_n_insns += set_priorities (head, tail);
2913 }
2914 current_sched_info->sched_max_insns_priority++;
2915 }
2916
2917 /* (Re-)initialize the arrays of DFA states at the end of each basic block.
2918
2919 SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be
2920 zero for the first call to this function, to allocate the arrays for the
2921 first time.
2922
2923 This function is called once during initialization of the scheduler, and
2924 called again to resize the arrays if new basic blocks have been created,
2925 for example for speculation recovery code. */
2926
2927 static void
2928 realloc_bb_state_array (int saved_last_basic_block)
2929 {
2930 char *old_bb_state_array = bb_state_array;
2931 size_t lbb = (size_t) last_basic_block;
2932 size_t slbb = (size_t) saved_last_basic_block;
2933
2934 /* Nothing to do if nothing changed since the last time this was called. */
2935 if (saved_last_basic_block == last_basic_block)
2936 return;
2937
2938 /* The selective scheduler doesn't use the state arrays. */
2939 if (sel_sched_p ())
2940 {
2941 gcc_assert (bb_state_array == NULL && bb_state == NULL);
2942 return;
2943 }
2944
2945 gcc_checking_assert (saved_last_basic_block == 0
2946 || (bb_state_array != NULL && bb_state != NULL));
2947
2948 bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size);
2949 bb_state = XRESIZEVEC (state_t, bb_state, lbb);
2950
2951 /* If BB_STATE_ARRAY has moved, fixup all the state pointers array.
2952 Otherwise only fixup the newly allocated ones. For the state
2953 array itself, only initialize the new entries. */
2954 bool bb_state_array_moved = (bb_state_array != old_bb_state_array);
2955 for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++)
2956 bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size);
2957 for (size_t i = slbb; i < lbb; i++)
2958 state_reset (bb_state[i]);
2959 }
2960
2961 /* Free the arrays of DFA states at the end of each basic block. */
2962
2963 static void
2964 free_bb_state_array (void)
2965 {
2966 free (bb_state_array);
2967 free (bb_state);
2968 bb_state_array = NULL;
2969 bb_state = NULL;
2970 }
2971
2972 /* Schedule a region. A region is either an inner loop, a loop-free
2973 subroutine, or a single basic block. Each bb in the region is
2974 scheduled after its flow predecessors. */
2975
2976 static void
2977 schedule_region (int rgn)
2978 {
2979 int bb;
2980 int sched_rgn_n_insns = 0;
2981
2982 rgn_n_insns = 0;
2983
2984 rgn_setup_region (rgn);
2985
2986 /* Don't schedule region that is marked by
2987 NOTE_DISABLE_SCHED_OF_BLOCK. */
2988 if (sched_is_disabled_for_current_region_p ())
2989 return;
2990
2991 sched_rgn_compute_dependencies (rgn);
2992
2993 sched_rgn_local_init (rgn);
2994
2995 /* Set priorities. */
2996 compute_priorities ();
2997
2998 sched_extend_ready_list (rgn_n_insns);
2999
3000 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3001 {
3002 sched_init_region_reg_pressure_info ();
3003 for (bb = 0; bb < current_nr_blocks; bb++)
3004 {
3005 basic_block first_bb, last_bb;
3006 rtx head, tail;
3007
3008 first_bb = EBB_FIRST_BB (bb);
3009 last_bb = EBB_LAST_BB (bb);
3010
3011 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3012
3013 if (no_real_insns_p (head, tail))
3014 {
3015 gcc_assert (first_bb == last_bb);
3016 continue;
3017 }
3018 sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
3019 }
3020 }
3021
3022 /* Now we can schedule all blocks. */
3023 for (bb = 0; bb < current_nr_blocks; bb++)
3024 {
3025 basic_block first_bb, last_bb, curr_bb;
3026 rtx head, tail;
3027
3028 first_bb = EBB_FIRST_BB (bb);
3029 last_bb = EBB_LAST_BB (bb);
3030
3031 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3032
3033 if (no_real_insns_p (head, tail))
3034 {
3035 gcc_assert (first_bb == last_bb);
3036 continue;
3037 }
3038
3039 current_sched_info->prev_head = PREV_INSN (head);
3040 current_sched_info->next_tail = NEXT_INSN (tail);
3041
3042 remove_notes (head, tail);
3043
3044 unlink_bb_notes (first_bb, last_bb);
3045
3046 target_bb = bb;
3047
3048 gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
3049 current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
3050
3051 curr_bb = first_bb;
3052 if (dbg_cnt (sched_block))
3053 {
3054 edge f;
3055 int saved_last_basic_block = last_basic_block;
3056
3057 schedule_block (&curr_bb, bb_state[first_bb->index]);
3058 gcc_assert (EBB_FIRST_BB (bb) == first_bb);
3059 sched_rgn_n_insns += sched_n_insns;
3060 realloc_bb_state_array (saved_last_basic_block);
3061 f = find_fallthru_edge (last_bb->succs);
3062 if (f && f->probability * 100 / REG_BR_PROB_BASE >=
3063 PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))
3064 {
3065 memcpy (bb_state[f->dest->index], curr_state,
3066 dfa_state_size);
3067 if (sched_verbose >= 5)
3068 fprintf (sched_dump, "saving state for edge %d->%d\n",
3069 f->src->index, f->dest->index);
3070 }
3071 }
3072 else
3073 {
3074 sched_rgn_n_insns += rgn_n_insns;
3075 }
3076
3077 /* Clean up. */
3078 if (current_nr_blocks > 1)
3079 free_trg_info ();
3080 }
3081
3082 /* Sanity check: verify that all region insns were scheduled. */
3083 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3084
3085 sched_finish_ready_list ();
3086
3087 /* Done with this region. */
3088 sched_rgn_local_finish ();
3089
3090 /* Free dependencies. */
3091 for (bb = 0; bb < current_nr_blocks; ++bb)
3092 free_block_dependencies (bb);
3093
3094 gcc_assert (haifa_recovery_bb_ever_added_p
3095 || deps_pools_are_empty_p ());
3096 }
3097
3098 /* Initialize data structures for region scheduling. */
3099
3100 void
3101 sched_rgn_init (bool single_blocks_p)
3102 {
3103 min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3104 / 100);
3105
3106 nr_inter = 0;
3107 nr_spec = 0;
3108
3109 extend_regions ();
3110
3111 CONTAINING_RGN (ENTRY_BLOCK) = -1;
3112 CONTAINING_RGN (EXIT_BLOCK) = -1;
3113
3114 realloc_bb_state_array (0);
3115
3116 /* Compute regions for scheduling. */
3117 if (single_blocks_p
3118 || n_basic_blocks == NUM_FIXED_BLOCKS + 1
3119 || !flag_schedule_interblock
3120 || is_cfg_nonregular ())
3121 {
3122 find_single_block_region (sel_sched_p ());
3123 }
3124 else
3125 {
3126 /* Compute the dominators and post dominators. */
3127 if (!sel_sched_p ())
3128 calculate_dominance_info (CDI_DOMINATORS);
3129
3130 /* Find regions. */
3131 find_rgns ();
3132
3133 if (sched_verbose >= 3)
3134 debug_regions ();
3135
3136 /* For now. This will move as more and more of haifa is converted
3137 to using the cfg code. */
3138 if (!sel_sched_p ())
3139 free_dominance_info (CDI_DOMINATORS);
3140 }
3141
3142 gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
3143
3144 RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
3145 RGN_NR_BLOCKS (nr_regions - 1));
3146 }
3147
3148 /* Free data structures for region scheduling. */
3149 void
3150 sched_rgn_finish (void)
3151 {
3152 free_bb_state_array ();
3153
3154 /* Reposition the prologue and epilogue notes in case we moved the
3155 prologue/epilogue insns. */
3156 if (reload_completed)
3157 reposition_prologue_and_epilogue_notes ();
3158
3159 if (sched_verbose)
3160 {
3161 if (reload_completed == 0
3162 && flag_schedule_interblock)
3163 {
3164 fprintf (sched_dump,
3165 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3166 nr_inter, nr_spec);
3167 }
3168 else
3169 gcc_assert (nr_inter <= 0);
3170 fprintf (sched_dump, "\n\n");
3171 }
3172
3173 nr_regions = 0;
3174
3175 free (rgn_table);
3176 rgn_table = NULL;
3177
3178 free (rgn_bb_table);
3179 rgn_bb_table = NULL;
3180
3181 free (block_to_bb);
3182 block_to_bb = NULL;
3183
3184 free (containing_rgn);
3185 containing_rgn = NULL;
3186
3187 free (ebb_head);
3188 ebb_head = NULL;
3189 }
3190
3191 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3192 point to the region RGN. */
3193 void
3194 rgn_setup_region (int rgn)
3195 {
3196 int bb;
3197
3198 /* Set variables for the current region. */
3199 current_nr_blocks = RGN_NR_BLOCKS (rgn);
3200 current_blocks = RGN_BLOCKS (rgn);
3201
3202 /* EBB_HEAD is a region-scope structure. But we realloc it for
3203 each region to save time/memory/something else.
3204 See comments in add_block1, for what reasons we allocate +1 element. */
3205 ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3206 for (bb = 0; bb <= current_nr_blocks; bb++)
3207 ebb_head[bb] = current_blocks + bb;
3208 }
3209
3210 /* Compute instruction dependencies in region RGN. */
3211 void
3212 sched_rgn_compute_dependencies (int rgn)
3213 {
3214 if (!RGN_DONT_CALC_DEPS (rgn))
3215 {
3216 int bb;
3217
3218 if (sel_sched_p ())
3219 sched_emulate_haifa_p = 1;
3220
3221 init_deps_global ();
3222
3223 /* Initializations for region data dependence analysis. */
3224 bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3225 for (bb = 0; bb < current_nr_blocks; bb++)
3226 init_deps (bb_deps + bb, false);
3227
3228 /* Initialize bitmap used in add_branch_dependences. */
3229 insn_referenced = sbitmap_alloc (sched_max_luid);
3230 bitmap_clear (insn_referenced);
3231
3232 /* Compute backward dependencies. */
3233 for (bb = 0; bb < current_nr_blocks; bb++)
3234 compute_block_dependences (bb);
3235
3236 sbitmap_free (insn_referenced);
3237 free_pending_lists ();
3238 finish_deps_global ();
3239 free (bb_deps);
3240
3241 /* We don't want to recalculate this twice. */
3242 RGN_DONT_CALC_DEPS (rgn) = 1;
3243
3244 if (sel_sched_p ())
3245 sched_emulate_haifa_p = 0;
3246 }
3247 else
3248 /* (This is a recovery block. It is always a single block region.)
3249 OR (We use selective scheduling.) */
3250 gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3251 }
3252
3253 /* Init region data structures. Returns true if this region should
3254 not be scheduled. */
3255 void
3256 sched_rgn_local_init (int rgn)
3257 {
3258 int bb;
3259
3260 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3261 if (current_nr_blocks > 1)
3262 {
3263 basic_block block;
3264 edge e;
3265 edge_iterator ei;
3266
3267 prob = XNEWVEC (int, current_nr_blocks);
3268
3269 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3270 bitmap_vector_clear (dom, current_nr_blocks);
3271
3272 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3273 rgn_nr_edges = 0;
3274 FOR_EACH_BB (block)
3275 {
3276 if (CONTAINING_RGN (block->index) != rgn)
3277 continue;
3278 FOR_EACH_EDGE (e, ei, block->succs)
3279 SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3280 }
3281
3282 rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3283 rgn_nr_edges = 0;
3284 FOR_EACH_BB (block)
3285 {
3286 if (CONTAINING_RGN (block->index) != rgn)
3287 continue;
3288 FOR_EACH_EDGE (e, ei, block->succs)
3289 rgn_edges[rgn_nr_edges++] = e;
3290 }
3291
3292 /* Split edges. */
3293 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3294 bitmap_vector_clear (pot_split, current_nr_blocks);
3295 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3296 bitmap_vector_clear (ancestor_edges, current_nr_blocks);
3297
3298 /* Compute probabilities, dominators, split_edges. */
3299 for (bb = 0; bb < current_nr_blocks; bb++)
3300 compute_dom_prob_ps (bb);
3301
3302 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3303 /* We don't need them anymore. But we want to avoid duplication of
3304 aux fields in the newly created edges. */
3305 FOR_EACH_BB (block)
3306 {
3307 if (CONTAINING_RGN (block->index) != rgn)
3308 continue;
3309 FOR_EACH_EDGE (e, ei, block->succs)
3310 e->aux = NULL;
3311 }
3312 }
3313 }
3314
3315 /* Free data computed for the finished region. */
3316 void
3317 sched_rgn_local_free (void)
3318 {
3319 free (prob);
3320 sbitmap_vector_free (dom);
3321 sbitmap_vector_free (pot_split);
3322 sbitmap_vector_free (ancestor_edges);
3323 free (rgn_edges);
3324 }
3325
3326 /* Free data computed for the finished region. */
3327 void
3328 sched_rgn_local_finish (void)
3329 {
3330 if (current_nr_blocks > 1 && !sel_sched_p ())
3331 {
3332 sched_rgn_local_free ();
3333 }
3334 }
3335
3336 /* Setup scheduler infos. */
3337 void
3338 rgn_setup_common_sched_info (void)
3339 {
3340 memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3341 sizeof (rgn_common_sched_info));
3342
3343 rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3344 rgn_common_sched_info.add_block = rgn_add_block;
3345 rgn_common_sched_info.estimate_number_of_insns
3346 = rgn_estimate_number_of_insns;
3347 rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3348
3349 common_sched_info = &rgn_common_sched_info;
3350 }
3351
3352 /* Setup all *_sched_info structures (for the Haifa frontend
3353 and for the dependence analysis) in the interblock scheduler. */
3354 void
3355 rgn_setup_sched_infos (void)
3356 {
3357 if (!sel_sched_p ())
3358 memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3359 sizeof (rgn_sched_deps_info));
3360 else
3361 memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3362 sizeof (rgn_sched_deps_info));
3363
3364 sched_deps_info = &rgn_sched_deps_info;
3365
3366 memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3367 current_sched_info = &rgn_sched_info;
3368 }
3369
3370 /* The one entry point in this file. */
3371 void
3372 schedule_insns (void)
3373 {
3374 int rgn;
3375
3376 /* Taking care of this degenerate case makes the rest of
3377 this code simpler. */
3378 if (n_basic_blocks == NUM_FIXED_BLOCKS)
3379 return;
3380
3381 rgn_setup_common_sched_info ();
3382 rgn_setup_sched_infos ();
3383
3384 haifa_sched_init ();
3385 sched_rgn_init (reload_completed);
3386
3387 bitmap_initialize (&not_in_df, 0);
3388 bitmap_clear (&not_in_df);
3389
3390 /* Schedule every region in the subroutine. */
3391 for (rgn = 0; rgn < nr_regions; rgn++)
3392 if (dbg_cnt (sched_region))
3393 schedule_region (rgn);
3394
3395 /* Clean up. */
3396 sched_rgn_finish ();
3397 bitmap_clear (&not_in_df);
3398
3399 haifa_sched_finish ();
3400 }
3401
3402 /* INSN has been added to/removed from current region. */
3403 static void
3404 rgn_add_remove_insn (rtx insn, int remove_p)
3405 {
3406 if (!remove_p)
3407 rgn_n_insns++;
3408 else
3409 rgn_n_insns--;
3410
3411 if (INSN_BB (insn) == target_bb)
3412 {
3413 if (!remove_p)
3414 target_n_insns++;
3415 else
3416 target_n_insns--;
3417 }
3418 }
3419
3420 /* Extend internal data structures. */
3421 void
3422 extend_regions (void)
3423 {
3424 rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
3425 rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
3426 block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
3427 containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
3428 }
3429
3430 void
3431 rgn_make_new_region_out_of_new_block (basic_block bb)
3432 {
3433 int i;
3434
3435 i = RGN_BLOCKS (nr_regions);
3436 /* I - first free position in rgn_bb_table. */
3437
3438 rgn_bb_table[i] = bb->index;
3439 RGN_NR_BLOCKS (nr_regions) = 1;
3440 RGN_HAS_REAL_EBB (nr_regions) = 0;
3441 RGN_DONT_CALC_DEPS (nr_regions) = 0;
3442 CONTAINING_RGN (bb->index) = nr_regions;
3443 BLOCK_TO_BB (bb->index) = 0;
3444
3445 nr_regions++;
3446
3447 RGN_BLOCKS (nr_regions) = i + 1;
3448 }
3449
3450 /* BB was added to ebb after AFTER. */
3451 static void
3452 rgn_add_block (basic_block bb, basic_block after)
3453 {
3454 extend_regions ();
3455 bitmap_set_bit (&not_in_df, bb->index);
3456
3457 if (after == 0 || after == EXIT_BLOCK_PTR)
3458 {
3459 rgn_make_new_region_out_of_new_block (bb);
3460 RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR);
3461 }
3462 else
3463 {
3464 int i, pos;
3465
3466 /* We need to fix rgn_table, block_to_bb, containing_rgn
3467 and ebb_head. */
3468
3469 BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3470
3471 /* We extend ebb_head to one more position to
3472 easily find the last position of the last ebb in
3473 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3474 is _always_ valid for access. */
3475
3476 i = BLOCK_TO_BB (after->index) + 1;
3477 pos = ebb_head[i] - 1;
3478 /* Now POS is the index of the last block in the region. */
3479
3480 /* Find index of basic block AFTER. */
3481 for (; rgn_bb_table[pos] != after->index; pos--)
3482 ;
3483
3484 pos++;
3485 gcc_assert (pos > ebb_head[i - 1]);
3486
3487 /* i - ebb right after "AFTER". */
3488 /* ebb_head[i] - VALID. */
3489
3490 /* Source position: ebb_head[i]
3491 Destination position: ebb_head[i] + 1
3492 Last position:
3493 RGN_BLOCKS (nr_regions) - 1
3494 Number of elements to copy: (last_position) - (source_position) + 1
3495 */
3496
3497 memmove (rgn_bb_table + pos + 1,
3498 rgn_bb_table + pos,
3499 ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3500 * sizeof (*rgn_bb_table));
3501
3502 rgn_bb_table[pos] = bb->index;
3503
3504 for (; i <= current_nr_blocks; i++)
3505 ebb_head [i]++;
3506
3507 i = CONTAINING_RGN (after->index);
3508 CONTAINING_RGN (bb->index) = i;
3509
3510 RGN_HAS_REAL_EBB (i) = 1;
3511
3512 for (++i; i <= nr_regions; i++)
3513 RGN_BLOCKS (i)++;
3514 }
3515 }
3516
3517 /* Fix internal data after interblock movement of jump instruction.
3518 For parameter meaning please refer to
3519 sched-int.h: struct sched_info: fix_recovery_cfg. */
3520 static void
3521 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3522 {
3523 int old_pos, new_pos, i;
3524
3525 BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3526
3527 for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3528 rgn_bb_table[old_pos] != check_bb_nexti;
3529 old_pos--)
3530 ;
3531 gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3532
3533 for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3534 rgn_bb_table[new_pos] != bbi;
3535 new_pos--)
3536 ;
3537 new_pos++;
3538 gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3539
3540 gcc_assert (new_pos < old_pos);
3541
3542 memmove (rgn_bb_table + new_pos + 1,
3543 rgn_bb_table + new_pos,
3544 (old_pos - new_pos) * sizeof (*rgn_bb_table));
3545
3546 rgn_bb_table[new_pos] = check_bb_nexti;
3547
3548 for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3549 ebb_head[i]++;
3550 }
3551
3552 /* Return next block in ebb chain. For parameter meaning please refer to
3553 sched-int.h: struct sched_info: advance_target_bb. */
3554 static basic_block
3555 advance_target_bb (basic_block bb, rtx insn)
3556 {
3557 if (insn)
3558 return 0;
3559
3560 gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3561 && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3562 return bb->next_bb;
3563 }
3564
3565 #endif
3566 \f
3567 static bool
3568 gate_handle_sched (void)
3569 {
3570 #ifdef INSN_SCHEDULING
3571 return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
3572 #else
3573 return 0;
3574 #endif
3575 }
3576
3577 /* Run instruction scheduler. */
3578 static unsigned int
3579 rest_of_handle_sched (void)
3580 {
3581 #ifdef INSN_SCHEDULING
3582 if (flag_selective_scheduling
3583 && ! maybe_skip_selective_scheduling ())
3584 run_selective_scheduling ();
3585 else
3586 schedule_insns ();
3587 #endif
3588 return 0;
3589 }
3590
3591 static bool
3592 gate_handle_sched2 (void)
3593 {
3594 #ifdef INSN_SCHEDULING
3595 return optimize > 0 && flag_schedule_insns_after_reload
3596 && !targetm.delay_sched2 && dbg_cnt (sched2_func);
3597 #else
3598 return 0;
3599 #endif
3600 }
3601
3602 /* Run second scheduling pass after reload. */
3603 static unsigned int
3604 rest_of_handle_sched2 (void)
3605 {
3606 #ifdef INSN_SCHEDULING
3607 if (flag_selective_scheduling2
3608 && ! maybe_skip_selective_scheduling ())
3609 run_selective_scheduling ();
3610 else
3611 {
3612 /* Do control and data sched analysis again,
3613 and write some more of the results to dump file. */
3614 if (flag_sched2_use_superblocks)
3615 schedule_ebbs ();
3616 else
3617 schedule_insns ();
3618 }
3619 #endif
3620 return 0;
3621 }
3622
3623 namespace {
3624
3625 const pass_data pass_data_sched =
3626 {
3627 RTL_PASS, /* type */
3628 "sched1", /* name */
3629 OPTGROUP_NONE, /* optinfo_flags */
3630 true, /* has_gate */
3631 true, /* has_execute */
3632 TV_SCHED, /* tv_id */
3633 0, /* properties_required */
3634 0, /* properties_provided */
3635 0, /* properties_destroyed */
3636 0, /* todo_flags_start */
3637 ( TODO_df_finish | TODO_verify_rtl_sharing
3638 | TODO_verify_flow ), /* todo_flags_finish */
3639 };
3640
3641 class pass_sched : public rtl_opt_pass
3642 {
3643 public:
3644 pass_sched (gcc::context *ctxt)
3645 : rtl_opt_pass (pass_data_sched, ctxt)
3646 {}
3647
3648 /* opt_pass methods: */
3649 bool gate () { return gate_handle_sched (); }
3650 unsigned int execute () { return rest_of_handle_sched (); }
3651
3652 }; // class pass_sched
3653
3654 } // anon namespace
3655
3656 rtl_opt_pass *
3657 make_pass_sched (gcc::context *ctxt)
3658 {
3659 return new pass_sched (ctxt);
3660 }
3661
3662 namespace {
3663
3664 const pass_data pass_data_sched2 =
3665 {
3666 RTL_PASS, /* type */
3667 "sched2", /* name */
3668 OPTGROUP_NONE, /* optinfo_flags */
3669 true, /* has_gate */
3670 true, /* has_execute */
3671 TV_SCHED2, /* tv_id */
3672 0, /* properties_required */
3673 0, /* properties_provided */
3674 0, /* properties_destroyed */
3675 0, /* todo_flags_start */
3676 ( TODO_df_finish | TODO_verify_rtl_sharing
3677 | TODO_verify_flow ), /* todo_flags_finish */
3678 };
3679
3680 class pass_sched2 : public rtl_opt_pass
3681 {
3682 public:
3683 pass_sched2 (gcc::context *ctxt)
3684 : rtl_opt_pass (pass_data_sched2, ctxt)
3685 {}
3686
3687 /* opt_pass methods: */
3688 bool gate () { return gate_handle_sched2 (); }
3689 unsigned int execute () { return rest_of_handle_sched2 (); }
3690
3691 }; // class pass_sched2
3692
3693 } // anon namespace
3694
3695 rtl_opt_pass *
3696 make_pass_sched2 (gcc::context *ctxt)
3697 {
3698 return new pass_sched2 (ctxt);
3699 }