Daily bump.
[gcc.git] / gcc / sched-rgn.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This pass implements list scheduling within basic blocks. It is
23 run twice: (1) after flow analysis, but before register allocation,
24 and (2) after register allocation.
25
26 The first run performs interblock scheduling, moving insns between
27 different blocks in the same "region", and the second runs only
28 basic block scheduling.
29
30 Interblock motions performed are useful motions and speculative
31 motions, including speculative loads. Motions requiring code
32 duplication are not supported. The identification of motion type
33 and the check for validity of speculative motions requires
34 construction and analysis of the function's control flow graph.
35
36 The main entry point for this pass is schedule_insns(), called for
37 each function. The work of the scheduler is organized in three
38 levels: (1) function level: insns are subject to splitting,
39 control-flow-graph is constructed, regions are computed (after
40 reload, each region is of one block), (2) region level: control
41 flow graph attributes required for interblock scheduling are
42 computed (dominators, reachability, etc.), data dependences and
43 priorities are computed, and (3) block level: insns in the block
44 are actually scheduled. */
45 \f
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "diagnostic-core.h"
51 #include "rtl.h"
52 #include "tm_p.h"
53 #include "hard-reg-set.h"
54 #include "regs.h"
55 #include "hashtab.h"
56 #include "hash-set.h"
57 #include "vec.h"
58 #include "machmode.h"
59 #include "input.h"
60 #include "function.h"
61 #include "profile.h"
62 #include "flags.h"
63 #include "insn-config.h"
64 #include "insn-attr.h"
65 #include "except.h"
66 #include "recog.h"
67 #include "params.h"
68 #include "dominance.h"
69 #include "cfg.h"
70 #include "cfganal.h"
71 #include "predict.h"
72 #include "basic-block.h"
73 #include "sched-int.h"
74 #include "sel-sched.h"
75 #include "target.h"
76 #include "tree-pass.h"
77 #include "dbgcnt.h"
78
79 #ifdef INSN_SCHEDULING
80
81 /* Some accessor macros for h_i_d members only used within this file. */
82 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
83 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
84
85 /* nr_inter/spec counts interblock/speculative motion for the function. */
86 static int nr_inter, nr_spec;
87
88 static int is_cfg_nonregular (void);
89
90 /* Number of regions in the procedure. */
91 int nr_regions = 0;
92
93 /* Same as above before adding any new regions. */
94 static int nr_regions_initial = 0;
95
96 /* Table of region descriptions. */
97 region *rgn_table = NULL;
98
99 /* Array of lists of regions' blocks. */
100 int *rgn_bb_table = NULL;
101
102 /* Topological order of blocks in the region (if b2 is reachable from
103 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
104 always referred to by either block or b, while its topological
105 order name (in the region) is referred to by bb. */
106 int *block_to_bb = NULL;
107
108 /* The number of the region containing a block. */
109 int *containing_rgn = NULL;
110
111 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
112 Currently we can get a ebb only through splitting of currently
113 scheduling block, therefore, we don't need ebb_head array for every region,
114 hence, its sufficient to hold it for current one only. */
115 int *ebb_head = NULL;
116
117 /* The minimum probability of reaching a source block so that it will be
118 considered for speculative scheduling. */
119 static int min_spec_prob;
120
121 static void find_single_block_region (bool);
122 static void find_rgns (void);
123 static bool too_large (int, int *, int *);
124
125 /* Blocks of the current region being scheduled. */
126 int current_nr_blocks;
127 int current_blocks;
128
129 /* A speculative motion requires checking live information on the path
130 from 'source' to 'target'. The split blocks are those to be checked.
131 After a speculative motion, live information should be modified in
132 the 'update' blocks.
133
134 Lists of split and update blocks for each candidate of the current
135 target are in array bblst_table. */
136 static basic_block *bblst_table;
137 static int bblst_size, bblst_last;
138
139 /* Arrays that hold the DFA state at the end of a basic block, to re-use
140 as the initial state at the start of successor blocks. The BB_STATE
141 array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer
142 into BB_STATE for basic block I. FIXME: This should be a vec. */
143 static char *bb_state_array = NULL;
144 static state_t *bb_state = NULL;
145
146 /* Target info declarations.
147
148 The block currently being scheduled is referred to as the "target" block,
149 while other blocks in the region from which insns can be moved to the
150 target are called "source" blocks. The candidate structure holds info
151 about such sources: are they valid? Speculative? Etc. */
152 typedef struct
153 {
154 basic_block *first_member;
155 int nr_members;
156 }
157 bblst;
158
159 typedef struct
160 {
161 char is_valid;
162 char is_speculative;
163 int src_prob;
164 bblst split_bbs;
165 bblst update_bbs;
166 }
167 candidate;
168
169 static candidate *candidate_table;
170 #define IS_VALID(src) (candidate_table[src].is_valid)
171 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
172 #define IS_SPECULATIVE_INSN(INSN) \
173 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
174 #define SRC_PROB(src) ( candidate_table[src].src_prob )
175
176 /* The bb being currently scheduled. */
177 int target_bb;
178
179 /* List of edges. */
180 typedef struct
181 {
182 edge *first_member;
183 int nr_members;
184 }
185 edgelst;
186
187 static edge *edgelst_table;
188 static int edgelst_last;
189
190 static void extract_edgelst (sbitmap, edgelst *);
191
192 /* Target info functions. */
193 static void split_edges (int, int, edgelst *);
194 static void compute_trg_info (int);
195 void debug_candidate (int);
196 void debug_candidates (int);
197
198 /* Dominators array: dom[i] contains the sbitmap of dominators of
199 bb i in the region. */
200 static sbitmap *dom;
201
202 /* bb 0 is the only region entry. */
203 #define IS_RGN_ENTRY(bb) (!bb)
204
205 /* Is bb_src dominated by bb_trg. */
206 #define IS_DOMINATED(bb_src, bb_trg) \
207 ( bitmap_bit_p (dom[bb_src], bb_trg) )
208
209 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
210 the probability of bb i relative to the region entry. */
211 static int *prob;
212
213 /* Bit-set of edges, where bit i stands for edge i. */
214 typedef sbitmap edgeset;
215
216 /* Number of edges in the region. */
217 static int rgn_nr_edges;
218
219 /* Array of size rgn_nr_edges. */
220 static edge *rgn_edges;
221
222 /* Mapping from each edge in the graph to its number in the rgn. */
223 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
224 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
225
226 /* The split edges of a source bb is different for each target
227 bb. In order to compute this efficiently, the 'potential-split edges'
228 are computed for each bb prior to scheduling a region. This is actually
229 the split edges of each bb relative to the region entry.
230
231 pot_split[bb] is the set of potential split edges of bb. */
232 static edgeset *pot_split;
233
234 /* For every bb, a set of its ancestor edges. */
235 static edgeset *ancestor_edges;
236
237 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
238
239 /* Speculative scheduling functions. */
240 static int check_live_1 (int, rtx);
241 static void update_live_1 (int, rtx);
242 static int is_pfree (rtx, int, int);
243 static int find_conditional_protection (rtx, int);
244 static int is_conditionally_protected (rtx, int, int);
245 static int is_prisky (rtx, int, int);
246 static int is_exception_free (rtx, int, int);
247
248 static bool sets_likely_spilled (rtx);
249 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
250 static void add_branch_dependences (rtx_insn *, rtx_insn *);
251 static void compute_block_dependences (int);
252
253 static void schedule_region (int);
254 static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *,
255 rtx_insn_list **, rtx_expr_list **);
256 static void propagate_deps (int, struct deps_desc *);
257 static void free_pending_lists (void);
258
259 /* Functions for construction of the control flow graph. */
260
261 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
262
263 We decide not to build the control flow graph if there is possibly more
264 than one entry to the function, if computed branches exist, if we
265 have nonlocal gotos, or if we have an unreachable loop. */
266
267 static int
268 is_cfg_nonregular (void)
269 {
270 basic_block b;
271 rtx_insn *insn;
272
273 /* If we have a label that could be the target of a nonlocal goto, then
274 the cfg is not well structured. */
275 if (nonlocal_goto_handler_labels)
276 return 1;
277
278 /* If we have any forced labels, then the cfg is not well structured. */
279 if (forced_labels)
280 return 1;
281
282 /* If we have exception handlers, then we consider the cfg not well
283 structured. ?!? We should be able to handle this now that we
284 compute an accurate cfg for EH. */
285 if (current_function_has_exception_handlers ())
286 return 1;
287
288 /* If we have insns which refer to labels as non-jumped-to operands,
289 then we consider the cfg not well structured. */
290 FOR_EACH_BB_FN (b, cfun)
291 FOR_BB_INSNS (b, insn)
292 {
293 rtx note, set, dest;
294 rtx_insn *next;
295
296 /* If this function has a computed jump, then we consider the cfg
297 not well structured. */
298 if (JUMP_P (insn) && computed_jump_p (insn))
299 return 1;
300
301 if (!INSN_P (insn))
302 continue;
303
304 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
305 if (note == NULL_RTX)
306 continue;
307
308 /* For that label not to be seen as a referred-to label, this
309 must be a single-set which is feeding a jump *only*. This
310 could be a conditional jump with the label split off for
311 machine-specific reasons or a casesi/tablejump. */
312 next = next_nonnote_insn (insn);
313 if (next == NULL_RTX
314 || !JUMP_P (next)
315 || (JUMP_LABEL (next) != XEXP (note, 0)
316 && find_reg_note (next, REG_LABEL_TARGET,
317 XEXP (note, 0)) == NULL_RTX)
318 || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
319 return 1;
320
321 set = single_set (insn);
322 if (set == NULL_RTX)
323 return 1;
324
325 dest = SET_DEST (set);
326 if (!REG_P (dest) || !dead_or_set_p (next, dest))
327 return 1;
328 }
329
330 /* Unreachable loops with more than one basic block are detected
331 during the DFS traversal in find_rgns.
332
333 Unreachable loops with a single block are detected here. This
334 test is redundant with the one in find_rgns, but it's much
335 cheaper to go ahead and catch the trivial case here. */
336 FOR_EACH_BB_FN (b, cfun)
337 {
338 if (EDGE_COUNT (b->preds) == 0
339 || (single_pred_p (b)
340 && single_pred (b) == b))
341 return 1;
342 }
343
344 /* All the tests passed. Consider the cfg well structured. */
345 return 0;
346 }
347
348 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
349
350 static void
351 extract_edgelst (sbitmap set, edgelst *el)
352 {
353 unsigned int i = 0;
354 sbitmap_iterator sbi;
355
356 /* edgelst table space is reused in each call to extract_edgelst. */
357 edgelst_last = 0;
358
359 el->first_member = &edgelst_table[edgelst_last];
360 el->nr_members = 0;
361
362 /* Iterate over each word in the bitset. */
363 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi)
364 {
365 edgelst_table[edgelst_last++] = rgn_edges[i];
366 el->nr_members++;
367 }
368 }
369
370 /* Functions for the construction of regions. */
371
372 /* Print the regions, for debugging purposes. Callable from debugger. */
373
374 DEBUG_FUNCTION void
375 debug_regions (void)
376 {
377 int rgn, bb;
378
379 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
380 for (rgn = 0; rgn < nr_regions; rgn++)
381 {
382 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
383 rgn_table[rgn].rgn_nr_blocks);
384 fprintf (sched_dump, ";;\tbb/block: ");
385
386 /* We don't have ebb_head initialized yet, so we can't use
387 BB_TO_BLOCK (). */
388 current_blocks = RGN_BLOCKS (rgn);
389
390 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
391 fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
392
393 fprintf (sched_dump, "\n\n");
394 }
395 }
396
397 /* Print the region's basic blocks. */
398
399 DEBUG_FUNCTION void
400 debug_region (int rgn)
401 {
402 int bb;
403
404 fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
405 fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
406 rgn_table[rgn].rgn_nr_blocks);
407 fprintf (stderr, ";;\tbb/block: ");
408
409 /* We don't have ebb_head initialized yet, so we can't use
410 BB_TO_BLOCK (). */
411 current_blocks = RGN_BLOCKS (rgn);
412
413 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
414 fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
415
416 fprintf (stderr, "\n\n");
417
418 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
419 {
420 dump_bb (stderr,
421 BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]),
422 0, TDF_SLIM | TDF_BLOCKS);
423 fprintf (stderr, "\n");
424 }
425
426 fprintf (stderr, "\n");
427
428 }
429
430 /* True when a bb with index BB_INDEX contained in region RGN. */
431 static bool
432 bb_in_region_p (int bb_index, int rgn)
433 {
434 int i;
435
436 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
437 if (rgn_bb_table[current_blocks + i] == bb_index)
438 return true;
439
440 return false;
441 }
442
443 /* Dump region RGN to file F using dot syntax. */
444 void
445 dump_region_dot (FILE *f, int rgn)
446 {
447 int i;
448
449 fprintf (f, "digraph Region_%d {\n", rgn);
450
451 /* We don't have ebb_head initialized yet, so we can't use
452 BB_TO_BLOCK (). */
453 current_blocks = RGN_BLOCKS (rgn);
454
455 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
456 {
457 edge e;
458 edge_iterator ei;
459 int src_bb_num = rgn_bb_table[current_blocks + i];
460 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num);
461
462 FOR_EACH_EDGE (e, ei, bb->succs)
463 if (bb_in_region_p (e->dest->index, rgn))
464 fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
465 }
466 fprintf (f, "}\n");
467 }
468
469 /* The same, but first open a file specified by FNAME. */
470 void
471 dump_region_dot_file (const char *fname, int rgn)
472 {
473 FILE *f = fopen (fname, "wt");
474 dump_region_dot (f, rgn);
475 fclose (f);
476 }
477
478 /* Build a single block region for each basic block in the function.
479 This allows for using the same code for interblock and basic block
480 scheduling. */
481
482 static void
483 find_single_block_region (bool ebbs_p)
484 {
485 basic_block bb, ebb_start;
486 int i = 0;
487
488 nr_regions = 0;
489
490 if (ebbs_p) {
491 int probability_cutoff;
492 if (profile_info && flag_branch_probabilities)
493 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
494 else
495 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
496 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
497
498 FOR_EACH_BB_FN (ebb_start, cfun)
499 {
500 RGN_NR_BLOCKS (nr_regions) = 0;
501 RGN_BLOCKS (nr_regions) = i;
502 RGN_DONT_CALC_DEPS (nr_regions) = 0;
503 RGN_HAS_REAL_EBB (nr_regions) = 0;
504
505 for (bb = ebb_start; ; bb = bb->next_bb)
506 {
507 edge e;
508
509 rgn_bb_table[i] = bb->index;
510 RGN_NR_BLOCKS (nr_regions)++;
511 CONTAINING_RGN (bb->index) = nr_regions;
512 BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
513 i++;
514
515 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
516 || LABEL_P (BB_HEAD (bb->next_bb)))
517 break;
518
519 e = find_fallthru_edge (bb->succs);
520 if (! e)
521 break;
522 if (e->probability <= probability_cutoff)
523 break;
524 }
525
526 ebb_start = bb;
527 nr_regions++;
528 }
529 }
530 else
531 FOR_EACH_BB_FN (bb, cfun)
532 {
533 rgn_bb_table[nr_regions] = bb->index;
534 RGN_NR_BLOCKS (nr_regions) = 1;
535 RGN_BLOCKS (nr_regions) = nr_regions;
536 RGN_DONT_CALC_DEPS (nr_regions) = 0;
537 RGN_HAS_REAL_EBB (nr_regions) = 0;
538
539 CONTAINING_RGN (bb->index) = nr_regions;
540 BLOCK_TO_BB (bb->index) = 0;
541 nr_regions++;
542 }
543 }
544
545 /* Estimate number of the insns in the BB. */
546 static int
547 rgn_estimate_number_of_insns (basic_block bb)
548 {
549 int count;
550
551 count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
552
553 if (MAY_HAVE_DEBUG_INSNS)
554 {
555 rtx_insn *insn;
556
557 FOR_BB_INSNS (bb, insn)
558 if (DEBUG_INSN_P (insn))
559 count--;
560 }
561
562 return count;
563 }
564
565 /* Update number of blocks and the estimate for number of insns
566 in the region. Return true if the region is "too large" for interblock
567 scheduling (compile time considerations). */
568
569 static bool
570 too_large (int block, int *num_bbs, int *num_insns)
571 {
572 (*num_bbs)++;
573 (*num_insns) += (common_sched_info->estimate_number_of_insns
574 (BASIC_BLOCK_FOR_FN (cfun, block)));
575
576 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
577 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
578 }
579
580 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
581 is still an inner loop. Put in max_hdr[blk] the header of the most inner
582 loop containing blk. */
583 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
584 { \
585 if (max_hdr[blk] == -1) \
586 max_hdr[blk] = hdr; \
587 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
588 bitmap_clear_bit (inner, hdr); \
589 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
590 { \
591 bitmap_clear_bit (inner,max_hdr[blk]); \
592 max_hdr[blk] = hdr; \
593 } \
594 }
595
596 /* Find regions for interblock scheduling.
597
598 A region for scheduling can be:
599
600 * A loop-free procedure, or
601
602 * A reducible inner loop, or
603
604 * A basic block not contained in any other region.
605
606 ?!? In theory we could build other regions based on extended basic
607 blocks or reverse extended basic blocks. Is it worth the trouble?
608
609 Loop blocks that form a region are put into the region's block list
610 in topological order.
611
612 This procedure stores its results into the following global (ick) variables
613
614 * rgn_nr
615 * rgn_table
616 * rgn_bb_table
617 * block_to_bb
618 * containing region
619
620 We use dominator relationships to avoid making regions out of non-reducible
621 loops.
622
623 This procedure needs to be converted to work on pred/succ lists instead
624 of edge tables. That would simplify it somewhat. */
625
626 static void
627 haifa_find_rgns (void)
628 {
629 int *max_hdr, *dfs_nr, *degree;
630 char no_loops = 1;
631 int node, child, loop_head, i, head, tail;
632 int count = 0, sp, idx = 0;
633 edge_iterator current_edge;
634 edge_iterator *stack;
635 int num_bbs, num_insns, unreachable;
636 int too_large_failure;
637 basic_block bb;
638
639 /* Note if a block is a natural loop header. */
640 sbitmap header;
641
642 /* Note if a block is a natural inner loop header. */
643 sbitmap inner;
644
645 /* Note if a block is in the block queue. */
646 sbitmap in_queue;
647
648 /* Note if a block is in the block queue. */
649 sbitmap in_stack;
650
651 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
652 and a mapping from block to its loop header (if the block is contained
653 in a loop, else -1).
654
655 Store results in HEADER, INNER, and MAX_HDR respectively, these will
656 be used as inputs to the second traversal.
657
658 STACK, SP and DFS_NR are only used during the first traversal. */
659
660 /* Allocate and initialize variables for the first traversal. */
661 max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
662 dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
663 stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
664
665 inner = sbitmap_alloc (last_basic_block_for_fn (cfun));
666 bitmap_ones (inner);
667
668 header = sbitmap_alloc (last_basic_block_for_fn (cfun));
669 bitmap_clear (header);
670
671 in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
672 bitmap_clear (in_queue);
673
674 in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun));
675 bitmap_clear (in_stack);
676
677 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
678 max_hdr[i] = -1;
679
680 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
681 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
682
683 /* DFS traversal to find inner loops in the cfg. */
684
685 current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs);
686 sp = -1;
687
688 while (1)
689 {
690 if (EDGE_PASSED (current_edge))
691 {
692 /* We have reached a leaf node or a node that was already
693 processed. Pop edges off the stack until we find
694 an edge that has not yet been processed. */
695 while (sp >= 0 && EDGE_PASSED (current_edge))
696 {
697 /* Pop entry off the stack. */
698 current_edge = stack[sp--];
699 node = ei_edge (current_edge)->src->index;
700 gcc_assert (node != ENTRY_BLOCK);
701 child = ei_edge (current_edge)->dest->index;
702 gcc_assert (child != EXIT_BLOCK);
703 bitmap_clear_bit (in_stack, child);
704 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
705 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
706 ei_next (&current_edge);
707 }
708
709 /* See if have finished the DFS tree traversal. */
710 if (sp < 0 && EDGE_PASSED (current_edge))
711 break;
712
713 /* Nope, continue the traversal with the popped node. */
714 continue;
715 }
716
717 /* Process a node. */
718 node = ei_edge (current_edge)->src->index;
719 gcc_assert (node != ENTRY_BLOCK);
720 bitmap_set_bit (in_stack, node);
721 dfs_nr[node] = ++count;
722
723 /* We don't traverse to the exit block. */
724 child = ei_edge (current_edge)->dest->index;
725 if (child == EXIT_BLOCK)
726 {
727 SET_EDGE_PASSED (current_edge);
728 ei_next (&current_edge);
729 continue;
730 }
731
732 /* If the successor is in the stack, then we've found a loop.
733 Mark the loop, if it is not a natural loop, then it will
734 be rejected during the second traversal. */
735 if (bitmap_bit_p (in_stack, child))
736 {
737 no_loops = 0;
738 bitmap_set_bit (header, child);
739 UPDATE_LOOP_RELATIONS (node, child);
740 SET_EDGE_PASSED (current_edge);
741 ei_next (&current_edge);
742 continue;
743 }
744
745 /* If the child was already visited, then there is no need to visit
746 it again. Just update the loop relationships and restart
747 with a new edge. */
748 if (dfs_nr[child])
749 {
750 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
751 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
752 SET_EDGE_PASSED (current_edge);
753 ei_next (&current_edge);
754 continue;
755 }
756
757 /* Push an entry on the stack and continue DFS traversal. */
758 stack[++sp] = current_edge;
759 SET_EDGE_PASSED (current_edge);
760 current_edge = ei_start (ei_edge (current_edge)->dest->succs);
761 }
762
763 /* Reset ->aux field used by EDGE_PASSED. */
764 FOR_ALL_BB_FN (bb, cfun)
765 {
766 edge_iterator ei;
767 edge e;
768 FOR_EACH_EDGE (e, ei, bb->succs)
769 e->aux = NULL;
770 }
771
772
773 /* Another check for unreachable blocks. The earlier test in
774 is_cfg_nonregular only finds unreachable blocks that do not
775 form a loop.
776
777 The DFS traversal will mark every block that is reachable from
778 the entry node by placing a nonzero value in dfs_nr. Thus if
779 dfs_nr is zero for any block, then it must be unreachable. */
780 unreachable = 0;
781 FOR_EACH_BB_FN (bb, cfun)
782 if (dfs_nr[bb->index] == 0)
783 {
784 unreachable = 1;
785 break;
786 }
787
788 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
789 to hold degree counts. */
790 degree = dfs_nr;
791
792 FOR_EACH_BB_FN (bb, cfun)
793 degree[bb->index] = EDGE_COUNT (bb->preds);
794
795 /* Do not perform region scheduling if there are any unreachable
796 blocks. */
797 if (!unreachable)
798 {
799 int *queue, *degree1 = NULL;
800 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
801 there basic blocks, which are forced to be region heads.
802 This is done to try to assemble few smaller regions
803 from a too_large region. */
804 sbitmap extended_rgn_header = NULL;
805 bool extend_regions_p;
806
807 if (no_loops)
808 bitmap_set_bit (header, 0);
809
810 /* Second traversal:find reducible inner loops and topologically sort
811 block of each region. */
812
813 queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
814
815 extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
816 if (extend_regions_p)
817 {
818 degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
819 extended_rgn_header =
820 sbitmap_alloc (last_basic_block_for_fn (cfun));
821 bitmap_clear (extended_rgn_header);
822 }
823
824 /* Find blocks which are inner loop headers. We still have non-reducible
825 loops to consider at this point. */
826 FOR_EACH_BB_FN (bb, cfun)
827 {
828 if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
829 {
830 edge e;
831 edge_iterator ei;
832 basic_block jbb;
833
834 /* Now check that the loop is reducible. We do this separate
835 from finding inner loops so that we do not find a reducible
836 loop which contains an inner non-reducible loop.
837
838 A simple way to find reducible/natural loops is to verify
839 that each block in the loop is dominated by the loop
840 header.
841
842 If there exists a block that is not dominated by the loop
843 header, then the block is reachable from outside the loop
844 and thus the loop is not a natural loop. */
845 FOR_EACH_BB_FN (jbb, cfun)
846 {
847 /* First identify blocks in the loop, except for the loop
848 entry block. */
849 if (bb->index == max_hdr[jbb->index] && bb != jbb)
850 {
851 /* Now verify that the block is dominated by the loop
852 header. */
853 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
854 break;
855 }
856 }
857
858 /* If we exited the loop early, then I is the header of
859 a non-reducible loop and we should quit processing it
860 now. */
861 if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun))
862 continue;
863
864 /* I is a header of an inner loop, or block 0 in a subroutine
865 with no loops at all. */
866 head = tail = -1;
867 too_large_failure = 0;
868 loop_head = max_hdr[bb->index];
869
870 if (extend_regions_p)
871 /* We save degree in case when we meet a too_large region
872 and cancel it. We need a correct degree later when
873 calling extend_rgns. */
874 memcpy (degree1, degree,
875 last_basic_block_for_fn (cfun) * sizeof (int));
876
877 /* Decrease degree of all I's successors for topological
878 ordering. */
879 FOR_EACH_EDGE (e, ei, bb->succs)
880 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
881 --degree[e->dest->index];
882
883 /* Estimate # insns, and count # blocks in the region. */
884 num_bbs = 1;
885 num_insns = common_sched_info->estimate_number_of_insns (bb);
886
887 /* Find all loop latches (blocks with back edges to the loop
888 header) or all the leaf blocks in the cfg has no loops.
889
890 Place those blocks into the queue. */
891 if (no_loops)
892 {
893 FOR_EACH_BB_FN (jbb, cfun)
894 /* Leaf nodes have only a single successor which must
895 be EXIT_BLOCK. */
896 if (single_succ_p (jbb)
897 && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
898 {
899 queue[++tail] = jbb->index;
900 bitmap_set_bit (in_queue, jbb->index);
901
902 if (too_large (jbb->index, &num_bbs, &num_insns))
903 {
904 too_large_failure = 1;
905 break;
906 }
907 }
908 }
909 else
910 {
911 edge e;
912
913 FOR_EACH_EDGE (e, ei, bb->preds)
914 {
915 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
916 continue;
917
918 node = e->src->index;
919
920 if (max_hdr[node] == loop_head && node != bb->index)
921 {
922 /* This is a loop latch. */
923 queue[++tail] = node;
924 bitmap_set_bit (in_queue, node);
925
926 if (too_large (node, &num_bbs, &num_insns))
927 {
928 too_large_failure = 1;
929 break;
930 }
931 }
932 }
933 }
934
935 /* Now add all the blocks in the loop to the queue.
936
937 We know the loop is a natural loop; however the algorithm
938 above will not always mark certain blocks as being in the
939 loop. Consider:
940 node children
941 a b,c
942 b c
943 c a,d
944 d b
945
946 The algorithm in the DFS traversal may not mark B & D as part
947 of the loop (i.e. they will not have max_hdr set to A).
948
949 We know they can not be loop latches (else they would have
950 had max_hdr set since they'd have a backedge to a dominator
951 block). So we don't need them on the initial queue.
952
953 We know they are part of the loop because they are dominated
954 by the loop header and can be reached by a backwards walk of
955 the edges starting with nodes on the initial queue.
956
957 It is safe and desirable to include those nodes in the
958 loop/scheduling region. To do so we would need to decrease
959 the degree of a node if it is the target of a backedge
960 within the loop itself as the node is placed in the queue.
961
962 We do not do this because I'm not sure that the actual
963 scheduling code will properly handle this case. ?!? */
964
965 while (head < tail && !too_large_failure)
966 {
967 edge e;
968 child = queue[++head];
969
970 FOR_EACH_EDGE (e, ei,
971 BASIC_BLOCK_FOR_FN (cfun, child)->preds)
972 {
973 node = e->src->index;
974
975 /* See discussion above about nodes not marked as in
976 this loop during the initial DFS traversal. */
977 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
978 || max_hdr[node] != loop_head)
979 {
980 tail = -1;
981 break;
982 }
983 else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
984 {
985 queue[++tail] = node;
986 bitmap_set_bit (in_queue, node);
987
988 if (too_large (node, &num_bbs, &num_insns))
989 {
990 too_large_failure = 1;
991 break;
992 }
993 }
994 }
995 }
996
997 if (tail >= 0 && !too_large_failure)
998 {
999 /* Place the loop header into list of region blocks. */
1000 degree[bb->index] = -1;
1001 rgn_bb_table[idx] = bb->index;
1002 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1003 RGN_BLOCKS (nr_regions) = idx++;
1004 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1005 RGN_HAS_REAL_EBB (nr_regions) = 0;
1006 CONTAINING_RGN (bb->index) = nr_regions;
1007 BLOCK_TO_BB (bb->index) = count = 0;
1008
1009 /* Remove blocks from queue[] when their in degree
1010 becomes zero. Repeat until no blocks are left on the
1011 list. This produces a topological list of blocks in
1012 the region. */
1013 while (tail >= 0)
1014 {
1015 if (head < 0)
1016 head = tail;
1017 child = queue[head];
1018 if (degree[child] == 0)
1019 {
1020 edge e;
1021
1022 degree[child] = -1;
1023 rgn_bb_table[idx++] = child;
1024 BLOCK_TO_BB (child) = ++count;
1025 CONTAINING_RGN (child) = nr_regions;
1026 queue[head] = queue[tail--];
1027
1028 FOR_EACH_EDGE (e, ei,
1029 BASIC_BLOCK_FOR_FN (cfun,
1030 child)->succs)
1031 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1032 --degree[e->dest->index];
1033 }
1034 else
1035 --head;
1036 }
1037 ++nr_regions;
1038 }
1039 else if (extend_regions_p)
1040 {
1041 /* Restore DEGREE. */
1042 int *t = degree;
1043
1044 degree = degree1;
1045 degree1 = t;
1046
1047 /* And force successors of BB to be region heads.
1048 This may provide several smaller regions instead
1049 of one too_large region. */
1050 FOR_EACH_EDGE (e, ei, bb->succs)
1051 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1052 bitmap_set_bit (extended_rgn_header, e->dest->index);
1053 }
1054 }
1055 }
1056 free (queue);
1057
1058 if (extend_regions_p)
1059 {
1060 free (degree1);
1061
1062 bitmap_ior (header, header, extended_rgn_header);
1063 sbitmap_free (extended_rgn_header);
1064
1065 extend_rgns (degree, &idx, header, max_hdr);
1066 }
1067 }
1068
1069 /* Any block that did not end up in a region is placed into a region
1070 by itself. */
1071 FOR_EACH_BB_FN (bb, cfun)
1072 if (degree[bb->index] >= 0)
1073 {
1074 rgn_bb_table[idx] = bb->index;
1075 RGN_NR_BLOCKS (nr_regions) = 1;
1076 RGN_BLOCKS (nr_regions) = idx++;
1077 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1078 RGN_HAS_REAL_EBB (nr_regions) = 0;
1079 CONTAINING_RGN (bb->index) = nr_regions++;
1080 BLOCK_TO_BB (bb->index) = 0;
1081 }
1082
1083 free (max_hdr);
1084 free (degree);
1085 free (stack);
1086 sbitmap_free (header);
1087 sbitmap_free (inner);
1088 sbitmap_free (in_queue);
1089 sbitmap_free (in_stack);
1090 }
1091
1092
1093 /* Wrapper function.
1094 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1095 regions. Otherwise just call find_rgns_haifa. */
1096 static void
1097 find_rgns (void)
1098 {
1099 if (sel_sched_p () && flag_sel_sched_pipelining)
1100 sel_find_rgns ();
1101 else
1102 haifa_find_rgns ();
1103 }
1104
1105 static int gather_region_statistics (int **);
1106 static void print_region_statistics (int *, int, int *, int);
1107
1108 /* Calculate the histogram that shows the number of regions having the
1109 given number of basic blocks, and store it in the RSP array. Return
1110 the size of this array. */
1111 static int
1112 gather_region_statistics (int **rsp)
1113 {
1114 int i, *a = 0, a_sz = 0;
1115
1116 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1117 for (i = 0; i < nr_regions; i++)
1118 {
1119 int nr_blocks = RGN_NR_BLOCKS (i);
1120
1121 gcc_assert (nr_blocks >= 1);
1122
1123 if (nr_blocks > a_sz)
1124 {
1125 a = XRESIZEVEC (int, a, nr_blocks);
1126 do
1127 a[a_sz++] = 0;
1128 while (a_sz != nr_blocks);
1129 }
1130
1131 a[nr_blocks - 1]++;
1132 }
1133
1134 *rsp = a;
1135 return a_sz;
1136 }
1137
1138 /* Print regions statistics. S1 and S2 denote the data before and after
1139 calling extend_rgns, respectively. */
1140 static void
1141 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1142 {
1143 int i;
1144
1145 /* We iterate until s2_sz because extend_rgns does not decrease
1146 the maximal region size. */
1147 for (i = 1; i < s2_sz; i++)
1148 {
1149 int n1, n2;
1150
1151 n2 = s2[i];
1152
1153 if (n2 == 0)
1154 continue;
1155
1156 if (i >= s1_sz)
1157 n1 = 0;
1158 else
1159 n1 = s1[i];
1160
1161 fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1162 "was %d + %d more\n", i + 1, n1, n2 - n1);
1163 }
1164 }
1165
1166 /* Extend regions.
1167 DEGREE - Array of incoming edge count, considering only
1168 the edges, that don't have their sources in formed regions yet.
1169 IDXP - pointer to the next available index in rgn_bb_table.
1170 HEADER - set of all region heads.
1171 LOOP_HDR - mapping from block to the containing loop
1172 (two blocks can reside within one region if they have
1173 the same loop header). */
1174 void
1175 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1176 {
1177 int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1178 int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
1179
1180 max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1181
1182 max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
1183
1184 order = XNEWVEC (int, last_basic_block_for_fn (cfun));
1185 post_order_compute (order, false, false);
1186
1187 for (i = nblocks - 1; i >= 0; i--)
1188 {
1189 int bbn = order[i];
1190 if (degree[bbn] >= 0)
1191 {
1192 max_hdr[bbn] = bbn;
1193 rescan = 1;
1194 }
1195 else
1196 /* This block already was processed in find_rgns. */
1197 max_hdr[bbn] = -1;
1198 }
1199
1200 /* The idea is to topologically walk through CFG in top-down order.
1201 During the traversal, if all the predecessors of a node are
1202 marked to be in the same region (they all have the same max_hdr),
1203 then current node is also marked to be a part of that region.
1204 Otherwise the node starts its own region.
1205 CFG should be traversed until no further changes are made. On each
1206 iteration the set of the region heads is extended (the set of those
1207 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1208 set of all basic blocks, thus the algorithm is guaranteed to
1209 terminate. */
1210
1211 while (rescan && iter < max_iter)
1212 {
1213 rescan = 0;
1214
1215 for (i = nblocks - 1; i >= 0; i--)
1216 {
1217 edge e;
1218 edge_iterator ei;
1219 int bbn = order[i];
1220
1221 if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
1222 {
1223 int hdr = -1;
1224
1225 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds)
1226 {
1227 int predn = e->src->index;
1228
1229 if (predn != ENTRY_BLOCK
1230 /* If pred wasn't processed in find_rgns. */
1231 && max_hdr[predn] != -1
1232 /* And pred and bb reside in the same loop.
1233 (Or out of any loop). */
1234 && loop_hdr[bbn] == loop_hdr[predn])
1235 {
1236 if (hdr == -1)
1237 /* Then bb extends the containing region of pred. */
1238 hdr = max_hdr[predn];
1239 else if (hdr != max_hdr[predn])
1240 /* Too bad, there are at least two predecessors
1241 that reside in different regions. Thus, BB should
1242 begin its own region. */
1243 {
1244 hdr = bbn;
1245 break;
1246 }
1247 }
1248 else
1249 /* BB starts its own region. */
1250 {
1251 hdr = bbn;
1252 break;
1253 }
1254 }
1255
1256 if (hdr == bbn)
1257 {
1258 /* If BB start its own region,
1259 update set of headers with BB. */
1260 bitmap_set_bit (header, bbn);
1261 rescan = 1;
1262 }
1263 else
1264 gcc_assert (hdr != -1);
1265
1266 max_hdr[bbn] = hdr;
1267 }
1268 }
1269
1270 iter++;
1271 }
1272
1273 /* Statistics were gathered on the SPEC2000 package of tests with
1274 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1275
1276 Statistics for SPECint:
1277 1 iteration : 1751 cases (38.7%)
1278 2 iterations: 2770 cases (61.3%)
1279 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1280 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1281 (We don't count single block regions here).
1282
1283 Statistics for SPECfp:
1284 1 iteration : 621 cases (35.9%)
1285 2 iterations: 1110 cases (64.1%)
1286 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1287 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1288 (We don't count single block regions here).
1289
1290 By default we do at most 2 iterations.
1291 This can be overridden with max-sched-extend-regions-iters parameter:
1292 0 - disable region extension,
1293 N > 0 - do at most N iterations. */
1294
1295 if (sched_verbose && iter != 0)
1296 fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1297 rescan ? "... failed" : "");
1298
1299 if (!rescan && iter != 0)
1300 {
1301 int *s1 = NULL, s1_sz = 0;
1302
1303 /* Save the old statistics for later printout. */
1304 if (sched_verbose >= 6)
1305 s1_sz = gather_region_statistics (&s1);
1306
1307 /* We have succeeded. Now assemble the regions. */
1308 for (i = nblocks - 1; i >= 0; i--)
1309 {
1310 int bbn = order[i];
1311
1312 if (max_hdr[bbn] == bbn)
1313 /* BBN is a region head. */
1314 {
1315 edge e;
1316 edge_iterator ei;
1317 int num_bbs = 0, j, num_insns = 0, large;
1318
1319 large = too_large (bbn, &num_bbs, &num_insns);
1320
1321 degree[bbn] = -1;
1322 rgn_bb_table[idx] = bbn;
1323 RGN_BLOCKS (nr_regions) = idx++;
1324 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1325 RGN_HAS_REAL_EBB (nr_regions) = 0;
1326 CONTAINING_RGN (bbn) = nr_regions;
1327 BLOCK_TO_BB (bbn) = 0;
1328
1329 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs)
1330 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1331 degree[e->dest->index]--;
1332
1333 if (!large)
1334 /* Here we check whether the region is too_large. */
1335 for (j = i - 1; j >= 0; j--)
1336 {
1337 int succn = order[j];
1338 if (max_hdr[succn] == bbn)
1339 {
1340 if ((large = too_large (succn, &num_bbs, &num_insns)))
1341 break;
1342 }
1343 }
1344
1345 if (large)
1346 /* If the region is too_large, then wrap every block of
1347 the region into single block region.
1348 Here we wrap region head only. Other blocks are
1349 processed in the below cycle. */
1350 {
1351 RGN_NR_BLOCKS (nr_regions) = 1;
1352 nr_regions++;
1353 }
1354
1355 num_bbs = 1;
1356
1357 for (j = i - 1; j >= 0; j--)
1358 {
1359 int succn = order[j];
1360
1361 if (max_hdr[succn] == bbn)
1362 /* This cycle iterates over all basic blocks, that
1363 are supposed to be in the region with head BBN,
1364 and wraps them into that region (or in single
1365 block region). */
1366 {
1367 gcc_assert (degree[succn] == 0);
1368
1369 degree[succn] = -1;
1370 rgn_bb_table[idx] = succn;
1371 BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1372 CONTAINING_RGN (succn) = nr_regions;
1373
1374 if (large)
1375 /* Wrap SUCCN into single block region. */
1376 {
1377 RGN_BLOCKS (nr_regions) = idx;
1378 RGN_NR_BLOCKS (nr_regions) = 1;
1379 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1380 RGN_HAS_REAL_EBB (nr_regions) = 0;
1381 nr_regions++;
1382 }
1383
1384 idx++;
1385
1386 FOR_EACH_EDGE (e, ei,
1387 BASIC_BLOCK_FOR_FN (cfun, succn)->succs)
1388 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1389 degree[e->dest->index]--;
1390 }
1391 }
1392
1393 if (!large)
1394 {
1395 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1396 nr_regions++;
1397 }
1398 }
1399 }
1400
1401 if (sched_verbose >= 6)
1402 {
1403 int *s2, s2_sz;
1404
1405 /* Get the new statistics and print the comparison with the
1406 one before calling this function. */
1407 s2_sz = gather_region_statistics (&s2);
1408 print_region_statistics (s1, s1_sz, s2, s2_sz);
1409 free (s1);
1410 free (s2);
1411 }
1412 }
1413
1414 free (order);
1415 free (max_hdr);
1416
1417 *idxp = idx;
1418 }
1419
1420 /* Functions for regions scheduling information. */
1421
1422 /* Compute dominators, probability, and potential-split-edges of bb.
1423 Assume that these values were already computed for bb's predecessors. */
1424
1425 static void
1426 compute_dom_prob_ps (int bb)
1427 {
1428 edge_iterator in_ei;
1429 edge in_edge;
1430
1431 /* We shouldn't have any real ebbs yet. */
1432 gcc_assert (ebb_head [bb] == bb + current_blocks);
1433
1434 if (IS_RGN_ENTRY (bb))
1435 {
1436 bitmap_set_bit (dom[bb], 0);
1437 prob[bb] = REG_BR_PROB_BASE;
1438 return;
1439 }
1440
1441 prob[bb] = 0;
1442
1443 /* Initialize dom[bb] to '111..1'. */
1444 bitmap_ones (dom[bb]);
1445
1446 FOR_EACH_EDGE (in_edge, in_ei,
1447 BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds)
1448 {
1449 int pred_bb;
1450 edge out_edge;
1451 edge_iterator out_ei;
1452
1453 if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1454 continue;
1455
1456 pred_bb = BLOCK_TO_BB (in_edge->src->index);
1457 bitmap_and (dom[bb], dom[bb], dom[pred_bb]);
1458 bitmap_ior (ancestor_edges[bb],
1459 ancestor_edges[bb], ancestor_edges[pred_bb]);
1460
1461 bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1462
1463 bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1464
1465 FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1466 bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
1467
1468 prob[bb] += combine_probabilities (prob[pred_bb], in_edge->probability);
1469 // The rounding divide in combine_probabilities can result in an extra
1470 // probability increment propagating along 50-50 edges. Eventually when
1471 // the edges re-merge, the accumulated probability can go slightly above
1472 // REG_BR_PROB_BASE.
1473 if (prob[bb] > REG_BR_PROB_BASE)
1474 prob[bb] = REG_BR_PROB_BASE;
1475 }
1476
1477 bitmap_set_bit (dom[bb], bb);
1478 bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1479
1480 if (sched_verbose >= 2)
1481 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1482 (100 * prob[bb]) / REG_BR_PROB_BASE);
1483 }
1484
1485 /* Functions for target info. */
1486
1487 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1488 Note that bb_trg dominates bb_src. */
1489
1490 static void
1491 split_edges (int bb_src, int bb_trg, edgelst *bl)
1492 {
1493 sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
1494 bitmap_copy (src, pot_split[bb_src]);
1495
1496 bitmap_and_compl (src, src, pot_split[bb_trg]);
1497 extract_edgelst (src, bl);
1498 sbitmap_free (src);
1499 }
1500
1501 /* Find the valid candidate-source-blocks for the target block TRG, compute
1502 their probability, and check if they are speculative or not.
1503 For speculative sources, compute their update-blocks and split-blocks. */
1504
1505 static void
1506 compute_trg_info (int trg)
1507 {
1508 candidate *sp;
1509 edgelst el = { NULL, 0 };
1510 int i, j, k, update_idx;
1511 basic_block block;
1512 sbitmap visited;
1513 edge_iterator ei;
1514 edge e;
1515
1516 candidate_table = XNEWVEC (candidate, current_nr_blocks);
1517
1518 bblst_last = 0;
1519 /* bblst_table holds split blocks and update blocks for each block after
1520 the current one in the region. split blocks and update blocks are
1521 the TO blocks of region edges, so there can be at most rgn_nr_edges
1522 of them. */
1523 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1524 bblst_table = XNEWVEC (basic_block, bblst_size);
1525
1526 edgelst_last = 0;
1527 edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1528
1529 /* Define some of the fields for the target bb as well. */
1530 sp = candidate_table + trg;
1531 sp->is_valid = 1;
1532 sp->is_speculative = 0;
1533 sp->src_prob = REG_BR_PROB_BASE;
1534
1535 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1536
1537 for (i = trg + 1; i < current_nr_blocks; i++)
1538 {
1539 sp = candidate_table + i;
1540
1541 sp->is_valid = IS_DOMINATED (i, trg);
1542 if (sp->is_valid)
1543 {
1544 int tf = prob[trg], cf = prob[i];
1545
1546 /* In CFGs with low probability edges TF can possibly be zero. */
1547 sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0);
1548 sp->is_valid = (sp->src_prob >= min_spec_prob);
1549 }
1550
1551 if (sp->is_valid)
1552 {
1553 split_edges (i, trg, &el);
1554 sp->is_speculative = (el.nr_members) ? 1 : 0;
1555 if (sp->is_speculative && !flag_schedule_speculative)
1556 sp->is_valid = 0;
1557 }
1558
1559 if (sp->is_valid)
1560 {
1561 /* Compute split blocks and store them in bblst_table.
1562 The TO block of every split edge is a split block. */
1563 sp->split_bbs.first_member = &bblst_table[bblst_last];
1564 sp->split_bbs.nr_members = el.nr_members;
1565 for (j = 0; j < el.nr_members; bblst_last++, j++)
1566 bblst_table[bblst_last] = el.first_member[j]->dest;
1567 sp->update_bbs.first_member = &bblst_table[bblst_last];
1568
1569 /* Compute update blocks and store them in bblst_table.
1570 For every split edge, look at the FROM block, and check
1571 all out edges. For each out edge that is not a split edge,
1572 add the TO block to the update block list. This list can end
1573 up with a lot of duplicates. We need to weed them out to avoid
1574 overrunning the end of the bblst_table. */
1575
1576 update_idx = 0;
1577 bitmap_clear (visited);
1578 for (j = 0; j < el.nr_members; j++)
1579 {
1580 block = el.first_member[j]->src;
1581 FOR_EACH_EDGE (e, ei, block->succs)
1582 {
1583 if (!bitmap_bit_p (visited, e->dest->index))
1584 {
1585 for (k = 0; k < el.nr_members; k++)
1586 if (e == el.first_member[k])
1587 break;
1588
1589 if (k >= el.nr_members)
1590 {
1591 bblst_table[bblst_last++] = e->dest;
1592 bitmap_set_bit (visited, e->dest->index);
1593 update_idx++;
1594 }
1595 }
1596 }
1597 }
1598 sp->update_bbs.nr_members = update_idx;
1599
1600 /* Make sure we didn't overrun the end of bblst_table. */
1601 gcc_assert (bblst_last <= bblst_size);
1602 }
1603 else
1604 {
1605 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1606
1607 sp->is_speculative = 0;
1608 sp->src_prob = 0;
1609 }
1610 }
1611
1612 sbitmap_free (visited);
1613 }
1614
1615 /* Free the computed target info. */
1616 static void
1617 free_trg_info (void)
1618 {
1619 free (candidate_table);
1620 free (bblst_table);
1621 free (edgelst_table);
1622 }
1623
1624 /* Print candidates info, for debugging purposes. Callable from debugger. */
1625
1626 DEBUG_FUNCTION void
1627 debug_candidate (int i)
1628 {
1629 if (!candidate_table[i].is_valid)
1630 return;
1631
1632 if (candidate_table[i].is_speculative)
1633 {
1634 int j;
1635 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1636
1637 fprintf (sched_dump, "split path: ");
1638 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1639 {
1640 int b = candidate_table[i].split_bbs.first_member[j]->index;
1641
1642 fprintf (sched_dump, " %d ", b);
1643 }
1644 fprintf (sched_dump, "\n");
1645
1646 fprintf (sched_dump, "update path: ");
1647 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1648 {
1649 int b = candidate_table[i].update_bbs.first_member[j]->index;
1650
1651 fprintf (sched_dump, " %d ", b);
1652 }
1653 fprintf (sched_dump, "\n");
1654 }
1655 else
1656 {
1657 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1658 }
1659 }
1660
1661 /* Print candidates info, for debugging purposes. Callable from debugger. */
1662
1663 DEBUG_FUNCTION void
1664 debug_candidates (int trg)
1665 {
1666 int i;
1667
1668 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1669 BB_TO_BLOCK (trg), trg);
1670 for (i = trg + 1; i < current_nr_blocks; i++)
1671 debug_candidate (i);
1672 }
1673
1674 /* Functions for speculative scheduling. */
1675
1676 static bitmap_head not_in_df;
1677
1678 /* Return 0 if x is a set of a register alive in the beginning of one
1679 of the split-blocks of src, otherwise return 1. */
1680
1681 static int
1682 check_live_1 (int src, rtx x)
1683 {
1684 int i;
1685 int regno;
1686 rtx reg = SET_DEST (x);
1687
1688 if (reg == 0)
1689 return 1;
1690
1691 while (GET_CODE (reg) == SUBREG
1692 || GET_CODE (reg) == ZERO_EXTRACT
1693 || GET_CODE (reg) == STRICT_LOW_PART)
1694 reg = XEXP (reg, 0);
1695
1696 if (GET_CODE (reg) == PARALLEL)
1697 {
1698 int i;
1699
1700 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1701 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1702 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1703 return 1;
1704
1705 return 0;
1706 }
1707
1708 if (!REG_P (reg))
1709 return 1;
1710
1711 regno = REGNO (reg);
1712
1713 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1714 {
1715 /* Global registers are assumed live. */
1716 return 0;
1717 }
1718 else
1719 {
1720 if (regno < FIRST_PSEUDO_REGISTER)
1721 {
1722 /* Check for hard registers. */
1723 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1724 while (--j >= 0)
1725 {
1726 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1727 {
1728 basic_block b = candidate_table[src].split_bbs.first_member[i];
1729 int t = bitmap_bit_p (&not_in_df, b->index);
1730
1731 /* We can have split blocks, that were recently generated.
1732 Such blocks are always outside current region. */
1733 gcc_assert (!t || (CONTAINING_RGN (b->index)
1734 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1735
1736 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1737 return 0;
1738 }
1739 }
1740 }
1741 else
1742 {
1743 /* Check for pseudo registers. */
1744 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1745 {
1746 basic_block b = candidate_table[src].split_bbs.first_member[i];
1747 int t = bitmap_bit_p (&not_in_df, b->index);
1748
1749 gcc_assert (!t || (CONTAINING_RGN (b->index)
1750 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1751
1752 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1753 return 0;
1754 }
1755 }
1756 }
1757
1758 return 1;
1759 }
1760
1761 /* If x is a set of a register R, mark that R is alive in the beginning
1762 of every update-block of src. */
1763
1764 static void
1765 update_live_1 (int src, rtx x)
1766 {
1767 int i;
1768 int regno;
1769 rtx reg = SET_DEST (x);
1770
1771 if (reg == 0)
1772 return;
1773
1774 while (GET_CODE (reg) == SUBREG
1775 || GET_CODE (reg) == ZERO_EXTRACT
1776 || GET_CODE (reg) == STRICT_LOW_PART)
1777 reg = XEXP (reg, 0);
1778
1779 if (GET_CODE (reg) == PARALLEL)
1780 {
1781 int i;
1782
1783 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1784 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1785 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1786
1787 return;
1788 }
1789
1790 if (!REG_P (reg))
1791 return;
1792
1793 /* Global registers are always live, so the code below does not apply
1794 to them. */
1795
1796 regno = REGNO (reg);
1797
1798 if (! HARD_REGISTER_NUM_P (regno)
1799 || !global_regs[regno])
1800 {
1801 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1802 {
1803 basic_block b = candidate_table[src].update_bbs.first_member[i];
1804
1805 if (HARD_REGISTER_NUM_P (regno))
1806 bitmap_set_range (df_get_live_in (b), regno,
1807 hard_regno_nregs[regno][GET_MODE (reg)]);
1808 else
1809 bitmap_set_bit (df_get_live_in (b), regno);
1810 }
1811 }
1812 }
1813
1814 /* Return 1 if insn can be speculatively moved from block src to trg,
1815 otherwise return 0. Called before first insertion of insn to
1816 ready-list or before the scheduling. */
1817
1818 static int
1819 check_live (rtx_insn *insn, int src)
1820 {
1821 /* Find the registers set by instruction. */
1822 if (GET_CODE (PATTERN (insn)) == SET
1823 || GET_CODE (PATTERN (insn)) == CLOBBER)
1824 return check_live_1 (src, PATTERN (insn));
1825 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1826 {
1827 int j;
1828 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1829 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1830 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1831 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1832 return 0;
1833
1834 return 1;
1835 }
1836
1837 return 1;
1838 }
1839
1840 /* Update the live registers info after insn was moved speculatively from
1841 block src to trg. */
1842
1843 static void
1844 update_live (rtx insn, int src)
1845 {
1846 /* Find the registers set by instruction. */
1847 if (GET_CODE (PATTERN (insn)) == SET
1848 || GET_CODE (PATTERN (insn)) == CLOBBER)
1849 update_live_1 (src, PATTERN (insn));
1850 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1851 {
1852 int j;
1853 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1854 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1855 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1856 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1857 }
1858 }
1859
1860 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1861 #define IS_REACHABLE(bb_from, bb_to) \
1862 (bb_from == bb_to \
1863 || IS_RGN_ENTRY (bb_from) \
1864 || (bitmap_bit_p (ancestor_edges[bb_to], \
1865 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \
1866 BB_TO_BLOCK (bb_from)))))))
1867
1868 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1869
1870 static void
1871 set_spec_fed (rtx load_insn)
1872 {
1873 sd_iterator_def sd_it;
1874 dep_t dep;
1875
1876 FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1877 if (DEP_TYPE (dep) == REG_DEP_TRUE)
1878 FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1879 }
1880
1881 /* On the path from the insn to load_insn_bb, find a conditional
1882 branch depending on insn, that guards the speculative load. */
1883
1884 static int
1885 find_conditional_protection (rtx insn, int load_insn_bb)
1886 {
1887 sd_iterator_def sd_it;
1888 dep_t dep;
1889
1890 /* Iterate through DEF-USE forward dependences. */
1891 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1892 {
1893 rtx_insn *next = DEP_CON (dep);
1894
1895 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1896 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1897 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1898 && load_insn_bb != INSN_BB (next)
1899 && DEP_TYPE (dep) == REG_DEP_TRUE
1900 && (JUMP_P (next)
1901 || find_conditional_protection (next, load_insn_bb)))
1902 return 1;
1903 }
1904 return 0;
1905 } /* find_conditional_protection */
1906
1907 /* Returns 1 if the same insn1 that participates in the computation
1908 of load_insn's address is feeding a conditional branch that is
1909 guarding on load_insn. This is true if we find two DEF-USE
1910 chains:
1911 insn1 -> ... -> conditional-branch
1912 insn1 -> ... -> load_insn,
1913 and if a flow path exists:
1914 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1915 and if insn1 is on the path
1916 region-entry -> ... -> bb_trg -> ... load_insn.
1917
1918 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1919 Locate the branch by following INSN_FORW_DEPS from insn1. */
1920
1921 static int
1922 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1923 {
1924 sd_iterator_def sd_it;
1925 dep_t dep;
1926
1927 FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1928 {
1929 rtx_insn *insn1 = DEP_PRO (dep);
1930
1931 /* Must be a DEF-USE dependence upon non-branch. */
1932 if (DEP_TYPE (dep) != REG_DEP_TRUE
1933 || JUMP_P (insn1))
1934 continue;
1935
1936 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1937 if (INSN_BB (insn1) == bb_src
1938 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1939 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1940 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1941 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1942 continue;
1943
1944 /* Now search for the conditional-branch. */
1945 if (find_conditional_protection (insn1, bb_src))
1946 return 1;
1947
1948 /* Recursive step: search another insn1, "above" current insn1. */
1949 return is_conditionally_protected (insn1, bb_src, bb_trg);
1950 }
1951
1952 /* The chain does not exist. */
1953 return 0;
1954 } /* is_conditionally_protected */
1955
1956 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1957 load_insn can move speculatively from bb_src to bb_trg. All the
1958 following must hold:
1959
1960 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1961 (2) load_insn and load1 have a def-use dependence upon
1962 the same insn 'insn1'.
1963 (3) either load2 is in bb_trg, or:
1964 - there's only one split-block, and
1965 - load1 is on the escape path, and
1966
1967 From all these we can conclude that the two loads access memory
1968 addresses that differ at most by a constant, and hence if moving
1969 load_insn would cause an exception, it would have been caused by
1970 load2 anyhow. */
1971
1972 static int
1973 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1974 {
1975 sd_iterator_def back_sd_it;
1976 dep_t back_dep;
1977 candidate *candp = candidate_table + bb_src;
1978
1979 if (candp->split_bbs.nr_members != 1)
1980 /* Must have exactly one escape block. */
1981 return 0;
1982
1983 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1984 {
1985 rtx_insn *insn1 = DEP_PRO (back_dep);
1986
1987 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1988 /* Found a DEF-USE dependence (insn1, load_insn). */
1989 {
1990 sd_iterator_def fore_sd_it;
1991 dep_t fore_dep;
1992
1993 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1994 {
1995 rtx_insn *insn2 = DEP_CON (fore_dep);
1996
1997 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1998 {
1999 /* Found a DEF-USE dependence (insn1, insn2). */
2000 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
2001 /* insn2 not guaranteed to be a 1 base reg load. */
2002 continue;
2003
2004 if (INSN_BB (insn2) == bb_trg)
2005 /* insn2 is the similar load, in the target block. */
2006 return 1;
2007
2008 if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
2009 /* insn2 is a similar load, in a split-block. */
2010 return 1;
2011 }
2012 }
2013 }
2014 }
2015
2016 /* Couldn't find a similar load. */
2017 return 0;
2018 } /* is_pfree */
2019
2020 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
2021 a load moved speculatively, or if load_insn is protected by
2022 a compare on load_insn's address). */
2023
2024 static int
2025 is_prisky (rtx load_insn, int bb_src, int bb_trg)
2026 {
2027 if (FED_BY_SPEC_LOAD (load_insn))
2028 return 1;
2029
2030 if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
2031 /* Dependence may 'hide' out of the region. */
2032 return 1;
2033
2034 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2035 return 1;
2036
2037 return 0;
2038 }
2039
2040 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2041 Return 1 if insn is exception-free (and the motion is valid)
2042 and 0 otherwise. */
2043
2044 static int
2045 is_exception_free (rtx insn, int bb_src, int bb_trg)
2046 {
2047 int insn_class = haifa_classify_insn (insn);
2048
2049 /* Handle non-load insns. */
2050 switch (insn_class)
2051 {
2052 case TRAP_FREE:
2053 return 1;
2054 case TRAP_RISKY:
2055 return 0;
2056 default:;
2057 }
2058
2059 /* Handle loads. */
2060 if (!flag_schedule_speculative_load)
2061 return 0;
2062 IS_LOAD_INSN (insn) = 1;
2063 switch (insn_class)
2064 {
2065 case IFREE:
2066 return (1);
2067 case IRISKY:
2068 return 0;
2069 case PFREE_CANDIDATE:
2070 if (is_pfree (insn, bb_src, bb_trg))
2071 return 1;
2072 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2073 case PRISKY_CANDIDATE:
2074 if (!flag_schedule_speculative_load_dangerous
2075 || is_prisky (insn, bb_src, bb_trg))
2076 return 0;
2077 break;
2078 default:;
2079 }
2080
2081 return flag_schedule_speculative_load_dangerous;
2082 }
2083 \f
2084 /* The number of insns from the current block scheduled so far. */
2085 static int sched_target_n_insns;
2086 /* The number of insns from the current block to be scheduled in total. */
2087 static int target_n_insns;
2088 /* The number of insns from the entire region scheduled so far. */
2089 static int sched_n_insns;
2090
2091 /* Implementations of the sched_info functions for region scheduling. */
2092 static void init_ready_list (void);
2093 static int can_schedule_ready_p (rtx_insn *);
2094 static void begin_schedule_ready (rtx_insn *);
2095 static ds_t new_ready (rtx_insn *, ds_t);
2096 static int schedule_more_p (void);
2097 static const char *rgn_print_insn (const rtx_insn *, int);
2098 static int rgn_rank (rtx_insn *, rtx_insn *);
2099 static void compute_jump_reg_dependencies (rtx, regset);
2100
2101 /* Functions for speculative scheduling. */
2102 static void rgn_add_remove_insn (rtx_insn *, int);
2103 static void rgn_add_block (basic_block, basic_block);
2104 static void rgn_fix_recovery_cfg (int, int, int);
2105 static basic_block advance_target_bb (basic_block, rtx_insn *);
2106
2107 /* Return nonzero if there are more insns that should be scheduled. */
2108
2109 static int
2110 schedule_more_p (void)
2111 {
2112 return sched_target_n_insns < target_n_insns;
2113 }
2114
2115 /* Add all insns that are initially ready to the ready list READY. Called
2116 once before scheduling a set of insns. */
2117
2118 static void
2119 init_ready_list (void)
2120 {
2121 rtx_insn *prev_head = current_sched_info->prev_head;
2122 rtx_insn *next_tail = current_sched_info->next_tail;
2123 int bb_src;
2124 rtx_insn *insn;
2125
2126 target_n_insns = 0;
2127 sched_target_n_insns = 0;
2128 sched_n_insns = 0;
2129
2130 /* Print debugging information. */
2131 if (sched_verbose >= 5)
2132 debug_rgn_dependencies (target_bb);
2133
2134 /* Prepare current target block info. */
2135 if (current_nr_blocks > 1)
2136 compute_trg_info (target_bb);
2137
2138 /* Initialize ready list with all 'ready' insns in target block.
2139 Count number of insns in the target block being scheduled. */
2140 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2141 {
2142 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2143 TODO_SPEC (insn) = HARD_DEP;
2144 try_ready (insn);
2145 target_n_insns++;
2146
2147 gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2148 }
2149
2150 /* Add to ready list all 'ready' insns in valid source blocks.
2151 For speculative insns, check-live, exception-free, and
2152 issue-delay. */
2153 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2154 if (IS_VALID (bb_src))
2155 {
2156 rtx_insn *src_head;
2157 rtx_insn *src_next_tail;
2158 rtx_insn *tail, *head;
2159
2160 get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2161 &head, &tail);
2162 src_next_tail = NEXT_INSN (tail);
2163 src_head = head;
2164
2165 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2166 if (INSN_P (insn))
2167 {
2168 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2169 TODO_SPEC (insn) = HARD_DEP;
2170 try_ready (insn);
2171 }
2172 }
2173 }
2174
2175 /* Called after taking INSN from the ready list. Returns nonzero if this
2176 insn can be scheduled, nonzero if we should silently discard it. */
2177
2178 static int
2179 can_schedule_ready_p (rtx_insn *insn)
2180 {
2181 /* An interblock motion? */
2182 if (INSN_BB (insn) != target_bb
2183 && IS_SPECULATIVE_INSN (insn)
2184 && !check_live (insn, INSN_BB (insn)))
2185 return 0;
2186 else
2187 return 1;
2188 }
2189
2190 /* Updates counter and other information. Split from can_schedule_ready_p ()
2191 because when we schedule insn speculatively then insn passed to
2192 can_schedule_ready_p () differs from the one passed to
2193 begin_schedule_ready (). */
2194 static void
2195 begin_schedule_ready (rtx_insn *insn)
2196 {
2197 /* An interblock motion? */
2198 if (INSN_BB (insn) != target_bb)
2199 {
2200 if (IS_SPECULATIVE_INSN (insn))
2201 {
2202 gcc_assert (check_live (insn, INSN_BB (insn)));
2203
2204 update_live (insn, INSN_BB (insn));
2205
2206 /* For speculative load, mark insns fed by it. */
2207 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2208 set_spec_fed (insn);
2209
2210 nr_spec++;
2211 }
2212 nr_inter++;
2213 }
2214 else
2215 {
2216 /* In block motion. */
2217 sched_target_n_insns++;
2218 }
2219 sched_n_insns++;
2220 }
2221
2222 /* Called after INSN has all its hard dependencies resolved and the speculation
2223 of type TS is enough to overcome them all.
2224 Return nonzero if it should be moved to the ready list or the queue, or zero
2225 if we should silently discard it. */
2226 static ds_t
2227 new_ready (rtx_insn *next, ds_t ts)
2228 {
2229 if (INSN_BB (next) != target_bb)
2230 {
2231 int not_ex_free = 0;
2232
2233 /* For speculative insns, before inserting to ready/queue,
2234 check live, exception-free, and issue-delay. */
2235 if (!IS_VALID (INSN_BB (next))
2236 || CANT_MOVE (next)
2237 || (IS_SPECULATIVE_INSN (next)
2238 && ((recog_memoized (next) >= 0
2239 && min_insn_conflict_delay (curr_state, next, next)
2240 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2241 || IS_SPECULATION_CHECK_P (next)
2242 || !check_live (next, INSN_BB (next))
2243 || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2244 target_bb)))))
2245 {
2246 if (not_ex_free
2247 /* We are here because is_exception_free () == false.
2248 But we possibly can handle that with control speculation. */
2249 && sched_deps_info->generate_spec_deps
2250 && spec_info->mask & BEGIN_CONTROL)
2251 {
2252 ds_t new_ds;
2253
2254 /* Add control speculation to NEXT's dependency type. */
2255 new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2256
2257 /* Check if NEXT can be speculated with new dependency type. */
2258 if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2259 /* Here we got new control-speculative instruction. */
2260 ts = new_ds;
2261 else
2262 /* NEXT isn't ready yet. */
2263 ts = DEP_POSTPONED;
2264 }
2265 else
2266 /* NEXT isn't ready yet. */
2267 ts = DEP_POSTPONED;
2268 }
2269 }
2270
2271 return ts;
2272 }
2273
2274 /* Return a string that contains the insn uid and optionally anything else
2275 necessary to identify this insn in an output. It's valid to use a
2276 static buffer for this. The ALIGNED parameter should cause the string
2277 to be formatted so that multiple output lines will line up nicely. */
2278
2279 static const char *
2280 rgn_print_insn (const rtx_insn *insn, int aligned)
2281 {
2282 static char tmp[80];
2283
2284 if (aligned)
2285 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2286 else
2287 {
2288 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2289 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2290 else
2291 sprintf (tmp, "%d", INSN_UID (insn));
2292 }
2293 return tmp;
2294 }
2295
2296 /* Compare priority of two insns. Return a positive number if the second
2297 insn is to be preferred for scheduling, and a negative one if the first
2298 is to be preferred. Zero if they are equally good. */
2299
2300 static int
2301 rgn_rank (rtx_insn *insn1, rtx_insn *insn2)
2302 {
2303 /* Some comparison make sense in interblock scheduling only. */
2304 if (INSN_BB (insn1) != INSN_BB (insn2))
2305 {
2306 int spec_val, prob_val;
2307
2308 /* Prefer an inblock motion on an interblock motion. */
2309 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2310 return 1;
2311 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2312 return -1;
2313
2314 /* Prefer a useful motion on a speculative one. */
2315 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2316 if (spec_val)
2317 return spec_val;
2318
2319 /* Prefer a more probable (speculative) insn. */
2320 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2321 if (prob_val)
2322 return prob_val;
2323 }
2324 return 0;
2325 }
2326
2327 /* NEXT is an instruction that depends on INSN (a backward dependence);
2328 return nonzero if we should include this dependence in priority
2329 calculations. */
2330
2331 int
2332 contributes_to_priority (rtx_insn *next, rtx_insn *insn)
2333 {
2334 /* NEXT and INSN reside in one ebb. */
2335 return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2336 }
2337
2338 /* INSN is a JUMP_INSN. Store the set of registers that must be
2339 considered as used by this jump in USED. */
2340
2341 static void
2342 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2343 regset used ATTRIBUTE_UNUSED)
2344 {
2345 /* Nothing to do here, since we postprocess jumps in
2346 add_branch_dependences. */
2347 }
2348
2349 /* This variable holds common_sched_info hooks and data relevant to
2350 the interblock scheduler. */
2351 static struct common_sched_info_def rgn_common_sched_info;
2352
2353
2354 /* This holds data for the dependence analysis relevant to
2355 the interblock scheduler. */
2356 static struct sched_deps_info_def rgn_sched_deps_info;
2357
2358 /* This holds constant data used for initializing the above structure
2359 for the Haifa scheduler. */
2360 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2361 {
2362 compute_jump_reg_dependencies,
2363 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2364 0, 0, 0
2365 };
2366
2367 /* Same as above, but for the selective scheduler. */
2368 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2369 {
2370 compute_jump_reg_dependencies,
2371 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2372 0, 0, 0
2373 };
2374
2375 /* Return true if scheduling INSN will trigger finish of scheduling
2376 current block. */
2377 static bool
2378 rgn_insn_finishes_block_p (rtx_insn *insn)
2379 {
2380 if (INSN_BB (insn) == target_bb
2381 && sched_target_n_insns + 1 == target_n_insns)
2382 /* INSN is the last not-scheduled instruction in the current block. */
2383 return true;
2384
2385 return false;
2386 }
2387
2388 /* Used in schedule_insns to initialize current_sched_info for scheduling
2389 regions (or single basic blocks). */
2390
2391 static const struct haifa_sched_info rgn_const_sched_info =
2392 {
2393 init_ready_list,
2394 can_schedule_ready_p,
2395 schedule_more_p,
2396 new_ready,
2397 rgn_rank,
2398 rgn_print_insn,
2399 contributes_to_priority,
2400 rgn_insn_finishes_block_p,
2401
2402 NULL, NULL,
2403 NULL, NULL,
2404 0, 0,
2405
2406 rgn_add_remove_insn,
2407 begin_schedule_ready,
2408 NULL,
2409 advance_target_bb,
2410 NULL, NULL,
2411 SCHED_RGN
2412 };
2413
2414 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2415 for the interblock scheduler frontend. */
2416 static struct haifa_sched_info rgn_sched_info;
2417
2418 /* Returns maximum priority that an insn was assigned to. */
2419
2420 int
2421 get_rgn_sched_max_insns_priority (void)
2422 {
2423 return rgn_sched_info.sched_max_insns_priority;
2424 }
2425
2426 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2427
2428 static bool
2429 sets_likely_spilled (rtx pat)
2430 {
2431 bool ret = false;
2432 note_stores (pat, sets_likely_spilled_1, &ret);
2433 return ret;
2434 }
2435
2436 static void
2437 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2438 {
2439 bool *ret = (bool *) data;
2440
2441 if (GET_CODE (pat) == SET
2442 && REG_P (x)
2443 && HARD_REGISTER_P (x)
2444 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2445 *ret = true;
2446 }
2447
2448 /* A bitmap to note insns that participate in any dependency. Used in
2449 add_branch_dependences. */
2450 static sbitmap insn_referenced;
2451
2452 /* Add dependences so that branches are scheduled to run last in their
2453 block. */
2454 static void
2455 add_branch_dependences (rtx_insn *head, rtx_insn *tail)
2456 {
2457 rtx_insn *insn, *last;
2458
2459 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2460 that can throw exceptions, force them to remain in order at the end of
2461 the block by adding dependencies and giving the last a high priority.
2462 There may be notes present, and prev_head may also be a note.
2463
2464 Branches must obviously remain at the end. Calls should remain at the
2465 end since moving them results in worse register allocation. Uses remain
2466 at the end to ensure proper register allocation.
2467
2468 cc0 setters remain at the end because they can't be moved away from
2469 their cc0 user.
2470
2471 Predecessors of SCHED_GROUP_P instructions at the end remain at the end.
2472
2473 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2474
2475 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2476 values) are not moved before reload because we can wind up with register
2477 allocation failures. */
2478
2479 while (tail != head && DEBUG_INSN_P (tail))
2480 tail = PREV_INSN (tail);
2481
2482 insn = tail;
2483 last = 0;
2484 while (CALL_P (insn)
2485 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
2486 || (NONJUMP_INSN_P (insn)
2487 && (GET_CODE (PATTERN (insn)) == USE
2488 || GET_CODE (PATTERN (insn)) == CLOBBER
2489 || can_throw_internal (insn)
2490 #ifdef HAVE_cc0
2491 || sets_cc0_p (PATTERN (insn))
2492 #endif
2493 || (!reload_completed
2494 && sets_likely_spilled (PATTERN (insn)))))
2495 || NOTE_P (insn)
2496 || (last != 0 && SCHED_GROUP_P (last)))
2497 {
2498 if (!NOTE_P (insn))
2499 {
2500 if (last != 0
2501 && sd_find_dep_between (insn, last, false) == NULL)
2502 {
2503 if (! sched_insns_conditions_mutex_p (last, insn))
2504 add_dependence (last, insn, REG_DEP_ANTI);
2505 bitmap_set_bit (insn_referenced, INSN_LUID (insn));
2506 }
2507
2508 CANT_MOVE (insn) = 1;
2509
2510 last = insn;
2511 }
2512
2513 /* Don't overrun the bounds of the basic block. */
2514 if (insn == head)
2515 break;
2516
2517 do
2518 insn = PREV_INSN (insn);
2519 while (insn != head && DEBUG_INSN_P (insn));
2520 }
2521
2522 /* Make sure these insns are scheduled last in their block. */
2523 insn = last;
2524 if (insn != 0)
2525 while (insn != head)
2526 {
2527 insn = prev_nonnote_insn (insn);
2528
2529 if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
2530 || DEBUG_INSN_P (insn))
2531 continue;
2532
2533 if (! sched_insns_conditions_mutex_p (last, insn))
2534 add_dependence (last, insn, REG_DEP_ANTI);
2535 }
2536
2537 if (!targetm.have_conditional_execution ())
2538 return;
2539
2540 /* Finally, if the block ends in a jump, and we are doing intra-block
2541 scheduling, make sure that the branch depends on any COND_EXEC insns
2542 inside the block to avoid moving the COND_EXECs past the branch insn.
2543
2544 We only have to do this after reload, because (1) before reload there
2545 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2546 scheduler after reload.
2547
2548 FIXME: We could in some cases move COND_EXEC insns past the branch if
2549 this scheduler would be a little smarter. Consider this code:
2550
2551 T = [addr]
2552 C ? addr += 4
2553 !C ? X += 12
2554 C ? T += 1
2555 C ? jump foo
2556
2557 On a target with a one cycle stall on a memory access the optimal
2558 sequence would be:
2559
2560 T = [addr]
2561 C ? addr += 4
2562 C ? T += 1
2563 C ? jump foo
2564 !C ? X += 12
2565
2566 We don't want to put the 'X += 12' before the branch because it just
2567 wastes a cycle of execution time when the branch is taken.
2568
2569 Note that in the example "!C" will always be true. That is another
2570 possible improvement for handling COND_EXECs in this scheduler: it
2571 could remove always-true predicates. */
2572
2573 if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail)))
2574 return;
2575
2576 insn = tail;
2577 while (insn != head)
2578 {
2579 insn = PREV_INSN (insn);
2580
2581 /* Note that we want to add this dependency even when
2582 sched_insns_conditions_mutex_p returns true. The whole point
2583 is that we _want_ this dependency, even if these insns really
2584 are independent. */
2585 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2586 add_dependence (tail, insn, REG_DEP_ANTI);
2587 }
2588 }
2589
2590 /* Data structures for the computation of data dependences in a regions. We
2591 keep one `deps' structure for every basic block. Before analyzing the
2592 data dependences for a bb, its variables are initialized as a function of
2593 the variables of its predecessors. When the analysis for a bb completes,
2594 we save the contents to the corresponding bb_deps[bb] variable. */
2595
2596 static struct deps_desc *bb_deps;
2597
2598 static void
2599 concat_insn_mem_list (rtx_insn_list *copy_insns,
2600 rtx_expr_list *copy_mems,
2601 rtx_insn_list **old_insns_p,
2602 rtx_expr_list **old_mems_p)
2603 {
2604 rtx_insn_list *new_insns = *old_insns_p;
2605 rtx_expr_list *new_mems = *old_mems_p;
2606
2607 while (copy_insns)
2608 {
2609 new_insns = alloc_INSN_LIST (copy_insns->insn (), new_insns);
2610 new_mems = alloc_EXPR_LIST (VOIDmode, copy_mems->element (), new_mems);
2611 copy_insns = copy_insns->next ();
2612 copy_mems = copy_mems->next ();
2613 }
2614
2615 *old_insns_p = new_insns;
2616 *old_mems_p = new_mems;
2617 }
2618
2619 /* Join PRED_DEPS to the SUCC_DEPS. */
2620 void
2621 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2622 {
2623 unsigned reg;
2624 reg_set_iterator rsi;
2625
2626 /* The reg_last lists are inherited by successor. */
2627 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2628 {
2629 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2630 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2631
2632 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2633 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2634 succ_rl->implicit_sets
2635 = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2636 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2637 succ_rl->clobbers);
2638 succ_rl->uses_length += pred_rl->uses_length;
2639 succ_rl->clobbers_length += pred_rl->clobbers_length;
2640 }
2641 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2642
2643 /* Mem read/write lists are inherited by successor. */
2644 concat_insn_mem_list (pred_deps->pending_read_insns,
2645 pred_deps->pending_read_mems,
2646 &succ_deps->pending_read_insns,
2647 &succ_deps->pending_read_mems);
2648 concat_insn_mem_list (pred_deps->pending_write_insns,
2649 pred_deps->pending_write_mems,
2650 &succ_deps->pending_write_insns,
2651 &succ_deps->pending_write_mems);
2652
2653 succ_deps->pending_jump_insns
2654 = concat_INSN_LIST (pred_deps->pending_jump_insns,
2655 succ_deps->pending_jump_insns);
2656 succ_deps->last_pending_memory_flush
2657 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2658 succ_deps->last_pending_memory_flush);
2659
2660 succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2661 succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2662 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2663
2664 /* last_function_call is inherited by successor. */
2665 succ_deps->last_function_call
2666 = concat_INSN_LIST (pred_deps->last_function_call,
2667 succ_deps->last_function_call);
2668
2669 /* last_function_call_may_noreturn is inherited by successor. */
2670 succ_deps->last_function_call_may_noreturn
2671 = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2672 succ_deps->last_function_call_may_noreturn);
2673
2674 /* sched_before_next_call is inherited by successor. */
2675 succ_deps->sched_before_next_call
2676 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2677 succ_deps->sched_before_next_call);
2678 }
2679
2680 /* After computing the dependencies for block BB, propagate the dependencies
2681 found in TMP_DEPS to the successors of the block. */
2682 static void
2683 propagate_deps (int bb, struct deps_desc *pred_deps)
2684 {
2685 basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
2686 edge_iterator ei;
2687 edge e;
2688
2689 /* bb's structures are inherited by its successors. */
2690 FOR_EACH_EDGE (e, ei, block->succs)
2691 {
2692 /* Only bbs "below" bb, in the same region, are interesting. */
2693 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2694 || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2695 || BLOCK_TO_BB (e->dest->index) <= bb)
2696 continue;
2697
2698 deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2699 }
2700
2701 /* These lists should point to the right place, for correct
2702 freeing later. */
2703 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2704 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2705 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2706 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2707 bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
2708
2709 /* Can't allow these to be freed twice. */
2710 pred_deps->pending_read_insns = 0;
2711 pred_deps->pending_read_mems = 0;
2712 pred_deps->pending_write_insns = 0;
2713 pred_deps->pending_write_mems = 0;
2714 pred_deps->pending_jump_insns = 0;
2715 }
2716
2717 /* Compute dependences inside bb. In a multiple blocks region:
2718 (1) a bb is analyzed after its predecessors, and (2) the lists in
2719 effect at the end of bb (after analyzing for bb) are inherited by
2720 bb's successors.
2721
2722 Specifically for reg-reg data dependences, the block insns are
2723 scanned by sched_analyze () top-to-bottom. Three lists are
2724 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2725 reg_last[].implicit_sets for implicit hard register DEFs, and
2726 reg_last[].uses for register USEs.
2727
2728 When analysis is completed for bb, we update for its successors:
2729 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2730 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2731 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2732
2733 The mechanism for computing mem-mem data dependence is very
2734 similar, and the result is interblock dependences in the region. */
2735
2736 static void
2737 compute_block_dependences (int bb)
2738 {
2739 rtx_insn *head, *tail;
2740 struct deps_desc tmp_deps;
2741
2742 tmp_deps = bb_deps[bb];
2743
2744 /* Do the analysis for this block. */
2745 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2746 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2747
2748 sched_analyze (&tmp_deps, head, tail);
2749
2750 /* Selective scheduling handles control dependencies by itself. */
2751 if (!sel_sched_p ())
2752 add_branch_dependences (head, tail);
2753
2754 if (current_nr_blocks > 1)
2755 propagate_deps (bb, &tmp_deps);
2756
2757 /* Free up the INSN_LISTs. */
2758 free_deps (&tmp_deps);
2759
2760 if (targetm.sched.dependencies_evaluation_hook)
2761 targetm.sched.dependencies_evaluation_hook (head, tail);
2762 }
2763
2764 /* Free dependencies of instructions inside BB. */
2765 static void
2766 free_block_dependencies (int bb)
2767 {
2768 rtx_insn *head;
2769 rtx_insn *tail;
2770
2771 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2772
2773 if (no_real_insns_p (head, tail))
2774 return;
2775
2776 sched_free_deps (head, tail, true);
2777 }
2778
2779 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2780 them to the unused_*_list variables, so that they can be reused. */
2781
2782 static void
2783 free_pending_lists (void)
2784 {
2785 int bb;
2786
2787 for (bb = 0; bb < current_nr_blocks; bb++)
2788 {
2789 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2790 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2791 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2792 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2793 free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
2794 }
2795 }
2796 \f
2797 /* Print dependences for debugging starting from FROM_BB.
2798 Callable from debugger. */
2799 /* Print dependences for debugging starting from FROM_BB.
2800 Callable from debugger. */
2801 DEBUG_FUNCTION void
2802 debug_rgn_dependencies (int from_bb)
2803 {
2804 int bb;
2805
2806 fprintf (sched_dump,
2807 ";; --------------- forward dependences: ------------ \n");
2808
2809 for (bb = from_bb; bb < current_nr_blocks; bb++)
2810 {
2811 rtx_insn *head, *tail;
2812
2813 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2814 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2815 BB_TO_BLOCK (bb), bb);
2816
2817 debug_dependencies (head, tail);
2818 }
2819 }
2820
2821 /* Print dependencies information for instructions between HEAD and TAIL.
2822 ??? This function would probably fit best in haifa-sched.c. */
2823 void debug_dependencies (rtx_insn *head, rtx_insn *tail)
2824 {
2825 rtx_insn *insn;
2826 rtx_insn *next_tail = NEXT_INSN (tail);
2827
2828 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2829 "insn", "code", "bb", "dep", "prio", "cost",
2830 "reservation");
2831 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2832 "----", "----", "--", "---", "----", "----",
2833 "-----------");
2834
2835 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2836 {
2837 if (! INSN_P (insn))
2838 {
2839 int n;
2840 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2841 if (NOTE_P (insn))
2842 {
2843 n = NOTE_KIND (insn);
2844 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2845 }
2846 else
2847 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2848 continue;
2849 }
2850
2851 fprintf (sched_dump,
2852 ";; %s%5d%6d%6d%6d%6d%6d ",
2853 (SCHED_GROUP_P (insn) ? "+" : " "),
2854 INSN_UID (insn),
2855 INSN_CODE (insn),
2856 BLOCK_NUM (insn),
2857 sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2858 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2859 : INSN_PRIORITY (insn))
2860 : INSN_PRIORITY (insn)),
2861 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2862 : insn_cost (insn))
2863 : insn_cost (insn)));
2864
2865 if (recog_memoized (insn) < 0)
2866 fprintf (sched_dump, "nothing");
2867 else
2868 print_reservation (sched_dump, insn);
2869
2870 fprintf (sched_dump, "\t: ");
2871 {
2872 sd_iterator_def sd_it;
2873 dep_t dep;
2874
2875 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2876 fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)),
2877 DEP_NONREG (dep) ? "n" : "",
2878 DEP_MULTIPLE (dep) ? "m" : "");
2879 }
2880 fprintf (sched_dump, "\n");
2881 }
2882
2883 fprintf (sched_dump, "\n");
2884 }
2885 \f
2886 /* Returns true if all the basic blocks of the current region have
2887 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2888 bool
2889 sched_is_disabled_for_current_region_p (void)
2890 {
2891 int bb;
2892
2893 for (bb = 0; bb < current_nr_blocks; bb++)
2894 if (!(BASIC_BLOCK_FOR_FN (cfun,
2895 BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2896 return false;
2897
2898 return true;
2899 }
2900
2901 /* Free all region dependencies saved in INSN_BACK_DEPS and
2902 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2903 when scheduling, so this function is supposed to be called from
2904 the selective scheduling only. */
2905 void
2906 free_rgn_deps (void)
2907 {
2908 int bb;
2909
2910 for (bb = 0; bb < current_nr_blocks; bb++)
2911 {
2912 rtx_insn *head, *tail;
2913
2914 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2915 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2916
2917 sched_free_deps (head, tail, false);
2918 }
2919 }
2920
2921 static int rgn_n_insns;
2922
2923 /* Compute insn priority for a current region. */
2924 void
2925 compute_priorities (void)
2926 {
2927 int bb;
2928
2929 current_sched_info->sched_max_insns_priority = 0;
2930 for (bb = 0; bb < current_nr_blocks; bb++)
2931 {
2932 rtx_insn *head, *tail;
2933
2934 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2935 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2936
2937 if (no_real_insns_p (head, tail))
2938 continue;
2939
2940 rgn_n_insns += set_priorities (head, tail);
2941 }
2942 current_sched_info->sched_max_insns_priority++;
2943 }
2944
2945 /* (Re-)initialize the arrays of DFA states at the end of each basic block.
2946
2947 SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be
2948 zero for the first call to this function, to allocate the arrays for the
2949 first time.
2950
2951 This function is called once during initialization of the scheduler, and
2952 called again to resize the arrays if new basic blocks have been created,
2953 for example for speculation recovery code. */
2954
2955 static void
2956 realloc_bb_state_array (int saved_last_basic_block)
2957 {
2958 char *old_bb_state_array = bb_state_array;
2959 size_t lbb = (size_t) last_basic_block_for_fn (cfun);
2960 size_t slbb = (size_t) saved_last_basic_block;
2961
2962 /* Nothing to do if nothing changed since the last time this was called. */
2963 if (saved_last_basic_block == last_basic_block_for_fn (cfun))
2964 return;
2965
2966 /* The selective scheduler doesn't use the state arrays. */
2967 if (sel_sched_p ())
2968 {
2969 gcc_assert (bb_state_array == NULL && bb_state == NULL);
2970 return;
2971 }
2972
2973 gcc_checking_assert (saved_last_basic_block == 0
2974 || (bb_state_array != NULL && bb_state != NULL));
2975
2976 bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size);
2977 bb_state = XRESIZEVEC (state_t, bb_state, lbb);
2978
2979 /* If BB_STATE_ARRAY has moved, fixup all the state pointers array.
2980 Otherwise only fixup the newly allocated ones. For the state
2981 array itself, only initialize the new entries. */
2982 bool bb_state_array_moved = (bb_state_array != old_bb_state_array);
2983 for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++)
2984 bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size);
2985 for (size_t i = slbb; i < lbb; i++)
2986 state_reset (bb_state[i]);
2987 }
2988
2989 /* Free the arrays of DFA states at the end of each basic block. */
2990
2991 static void
2992 free_bb_state_array (void)
2993 {
2994 free (bb_state_array);
2995 free (bb_state);
2996 bb_state_array = NULL;
2997 bb_state = NULL;
2998 }
2999
3000 /* Schedule a region. A region is either an inner loop, a loop-free
3001 subroutine, or a single basic block. Each bb in the region is
3002 scheduled after its flow predecessors. */
3003
3004 static void
3005 schedule_region (int rgn)
3006 {
3007 int bb;
3008 int sched_rgn_n_insns = 0;
3009
3010 rgn_n_insns = 0;
3011
3012 /* Do not support register pressure sensitive scheduling for the new regions
3013 as we don't update the liveness info for them. */
3014 if (sched_pressure != SCHED_PRESSURE_NONE
3015 && rgn >= nr_regions_initial)
3016 {
3017 free_global_sched_pressure_data ();
3018 sched_pressure = SCHED_PRESSURE_NONE;
3019 }
3020
3021 rgn_setup_region (rgn);
3022
3023 /* Don't schedule region that is marked by
3024 NOTE_DISABLE_SCHED_OF_BLOCK. */
3025 if (sched_is_disabled_for_current_region_p ())
3026 return;
3027
3028 sched_rgn_compute_dependencies (rgn);
3029
3030 sched_rgn_local_init (rgn);
3031
3032 /* Set priorities. */
3033 compute_priorities ();
3034
3035 sched_extend_ready_list (rgn_n_insns);
3036
3037 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3038 {
3039 sched_init_region_reg_pressure_info ();
3040 for (bb = 0; bb < current_nr_blocks; bb++)
3041 {
3042 basic_block first_bb, last_bb;
3043 rtx_insn *head, *tail;
3044
3045 first_bb = EBB_FIRST_BB (bb);
3046 last_bb = EBB_LAST_BB (bb);
3047
3048 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3049
3050 if (no_real_insns_p (head, tail))
3051 {
3052 gcc_assert (first_bb == last_bb);
3053 continue;
3054 }
3055 sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
3056 }
3057 }
3058
3059 /* Now we can schedule all blocks. */
3060 for (bb = 0; bb < current_nr_blocks; bb++)
3061 {
3062 basic_block first_bb, last_bb, curr_bb;
3063 rtx_insn *head, *tail;
3064
3065 first_bb = EBB_FIRST_BB (bb);
3066 last_bb = EBB_LAST_BB (bb);
3067
3068 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3069
3070 if (no_real_insns_p (head, tail))
3071 {
3072 gcc_assert (first_bb == last_bb);
3073 continue;
3074 }
3075
3076 current_sched_info->prev_head = PREV_INSN (head);
3077 current_sched_info->next_tail = NEXT_INSN (tail);
3078
3079 remove_notes (head, tail);
3080
3081 unlink_bb_notes (first_bb, last_bb);
3082
3083 target_bb = bb;
3084
3085 gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
3086 current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
3087
3088 curr_bb = first_bb;
3089 if (dbg_cnt (sched_block))
3090 {
3091 edge f;
3092 int saved_last_basic_block = last_basic_block_for_fn (cfun);
3093
3094 schedule_block (&curr_bb, bb_state[first_bb->index]);
3095 gcc_assert (EBB_FIRST_BB (bb) == first_bb);
3096 sched_rgn_n_insns += sched_n_insns;
3097 realloc_bb_state_array (saved_last_basic_block);
3098 f = find_fallthru_edge (last_bb->succs);
3099 if (f && f->probability * 100 / REG_BR_PROB_BASE >=
3100 PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))
3101 {
3102 memcpy (bb_state[f->dest->index], curr_state,
3103 dfa_state_size);
3104 if (sched_verbose >= 5)
3105 fprintf (sched_dump, "saving state for edge %d->%d\n",
3106 f->src->index, f->dest->index);
3107 }
3108 }
3109 else
3110 {
3111 sched_rgn_n_insns += rgn_n_insns;
3112 }
3113
3114 /* Clean up. */
3115 if (current_nr_blocks > 1)
3116 free_trg_info ();
3117 }
3118
3119 /* Sanity check: verify that all region insns were scheduled. */
3120 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3121
3122 sched_finish_ready_list ();
3123
3124 /* Done with this region. */
3125 sched_rgn_local_finish ();
3126
3127 /* Free dependencies. */
3128 for (bb = 0; bb < current_nr_blocks; ++bb)
3129 free_block_dependencies (bb);
3130
3131 gcc_assert (haifa_recovery_bb_ever_added_p
3132 || deps_pools_are_empty_p ());
3133 }
3134
3135 /* Initialize data structures for region scheduling. */
3136
3137 void
3138 sched_rgn_init (bool single_blocks_p)
3139 {
3140 min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3141 / 100);
3142
3143 nr_inter = 0;
3144 nr_spec = 0;
3145
3146 extend_regions ();
3147
3148 CONTAINING_RGN (ENTRY_BLOCK) = -1;
3149 CONTAINING_RGN (EXIT_BLOCK) = -1;
3150
3151 realloc_bb_state_array (0);
3152
3153 /* Compute regions for scheduling. */
3154 if (single_blocks_p
3155 || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
3156 || !flag_schedule_interblock
3157 || is_cfg_nonregular ())
3158 {
3159 find_single_block_region (sel_sched_p ());
3160 }
3161 else
3162 {
3163 /* Compute the dominators and post dominators. */
3164 if (!sel_sched_p ())
3165 calculate_dominance_info (CDI_DOMINATORS);
3166
3167 /* Find regions. */
3168 find_rgns ();
3169
3170 if (sched_verbose >= 3)
3171 debug_regions ();
3172
3173 /* For now. This will move as more and more of haifa is converted
3174 to using the cfg code. */
3175 if (!sel_sched_p ())
3176 free_dominance_info (CDI_DOMINATORS);
3177 }
3178
3179 gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
3180
3181 RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
3182 RGN_NR_BLOCKS (nr_regions - 1));
3183 nr_regions_initial = nr_regions;
3184 }
3185
3186 /* Free data structures for region scheduling. */
3187 void
3188 sched_rgn_finish (void)
3189 {
3190 free_bb_state_array ();
3191
3192 /* Reposition the prologue and epilogue notes in case we moved the
3193 prologue/epilogue insns. */
3194 if (reload_completed)
3195 reposition_prologue_and_epilogue_notes ();
3196
3197 if (sched_verbose)
3198 {
3199 if (reload_completed == 0
3200 && flag_schedule_interblock)
3201 {
3202 fprintf (sched_dump,
3203 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3204 nr_inter, nr_spec);
3205 }
3206 else
3207 gcc_assert (nr_inter <= 0);
3208 fprintf (sched_dump, "\n\n");
3209 }
3210
3211 nr_regions = 0;
3212
3213 free (rgn_table);
3214 rgn_table = NULL;
3215
3216 free (rgn_bb_table);
3217 rgn_bb_table = NULL;
3218
3219 free (block_to_bb);
3220 block_to_bb = NULL;
3221
3222 free (containing_rgn);
3223 containing_rgn = NULL;
3224
3225 free (ebb_head);
3226 ebb_head = NULL;
3227 }
3228
3229 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3230 point to the region RGN. */
3231 void
3232 rgn_setup_region (int rgn)
3233 {
3234 int bb;
3235
3236 /* Set variables for the current region. */
3237 current_nr_blocks = RGN_NR_BLOCKS (rgn);
3238 current_blocks = RGN_BLOCKS (rgn);
3239
3240 /* EBB_HEAD is a region-scope structure. But we realloc it for
3241 each region to save time/memory/something else.
3242 See comments in add_block1, for what reasons we allocate +1 element. */
3243 ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3244 for (bb = 0; bb <= current_nr_blocks; bb++)
3245 ebb_head[bb] = current_blocks + bb;
3246 }
3247
3248 /* Compute instruction dependencies in region RGN. */
3249 void
3250 sched_rgn_compute_dependencies (int rgn)
3251 {
3252 if (!RGN_DONT_CALC_DEPS (rgn))
3253 {
3254 int bb;
3255
3256 if (sel_sched_p ())
3257 sched_emulate_haifa_p = 1;
3258
3259 init_deps_global ();
3260
3261 /* Initializations for region data dependence analysis. */
3262 bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3263 for (bb = 0; bb < current_nr_blocks; bb++)
3264 init_deps (bb_deps + bb, false);
3265
3266 /* Initialize bitmap used in add_branch_dependences. */
3267 insn_referenced = sbitmap_alloc (sched_max_luid);
3268 bitmap_clear (insn_referenced);
3269
3270 /* Compute backward dependencies. */
3271 for (bb = 0; bb < current_nr_blocks; bb++)
3272 compute_block_dependences (bb);
3273
3274 sbitmap_free (insn_referenced);
3275 free_pending_lists ();
3276 finish_deps_global ();
3277 free (bb_deps);
3278
3279 /* We don't want to recalculate this twice. */
3280 RGN_DONT_CALC_DEPS (rgn) = 1;
3281
3282 if (sel_sched_p ())
3283 sched_emulate_haifa_p = 0;
3284 }
3285 else
3286 /* (This is a recovery block. It is always a single block region.)
3287 OR (We use selective scheduling.) */
3288 gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3289 }
3290
3291 /* Init region data structures. Returns true if this region should
3292 not be scheduled. */
3293 void
3294 sched_rgn_local_init (int rgn)
3295 {
3296 int bb;
3297
3298 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3299 if (current_nr_blocks > 1)
3300 {
3301 basic_block block;
3302 edge e;
3303 edge_iterator ei;
3304
3305 prob = XNEWVEC (int, current_nr_blocks);
3306
3307 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3308 bitmap_vector_clear (dom, current_nr_blocks);
3309
3310 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3311 rgn_nr_edges = 0;
3312 FOR_EACH_BB_FN (block, cfun)
3313 {
3314 if (CONTAINING_RGN (block->index) != rgn)
3315 continue;
3316 FOR_EACH_EDGE (e, ei, block->succs)
3317 SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3318 }
3319
3320 rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3321 rgn_nr_edges = 0;
3322 FOR_EACH_BB_FN (block, cfun)
3323 {
3324 if (CONTAINING_RGN (block->index) != rgn)
3325 continue;
3326 FOR_EACH_EDGE (e, ei, block->succs)
3327 rgn_edges[rgn_nr_edges++] = e;
3328 }
3329
3330 /* Split edges. */
3331 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3332 bitmap_vector_clear (pot_split, current_nr_blocks);
3333 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3334 bitmap_vector_clear (ancestor_edges, current_nr_blocks);
3335
3336 /* Compute probabilities, dominators, split_edges. */
3337 for (bb = 0; bb < current_nr_blocks; bb++)
3338 compute_dom_prob_ps (bb);
3339
3340 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3341 /* We don't need them anymore. But we want to avoid duplication of
3342 aux fields in the newly created edges. */
3343 FOR_EACH_BB_FN (block, cfun)
3344 {
3345 if (CONTAINING_RGN (block->index) != rgn)
3346 continue;
3347 FOR_EACH_EDGE (e, ei, block->succs)
3348 e->aux = NULL;
3349 }
3350 }
3351 }
3352
3353 /* Free data computed for the finished region. */
3354 void
3355 sched_rgn_local_free (void)
3356 {
3357 free (prob);
3358 sbitmap_vector_free (dom);
3359 sbitmap_vector_free (pot_split);
3360 sbitmap_vector_free (ancestor_edges);
3361 free (rgn_edges);
3362 }
3363
3364 /* Free data computed for the finished region. */
3365 void
3366 sched_rgn_local_finish (void)
3367 {
3368 if (current_nr_blocks > 1 && !sel_sched_p ())
3369 {
3370 sched_rgn_local_free ();
3371 }
3372 }
3373
3374 /* Setup scheduler infos. */
3375 void
3376 rgn_setup_common_sched_info (void)
3377 {
3378 memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3379 sizeof (rgn_common_sched_info));
3380
3381 rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3382 rgn_common_sched_info.add_block = rgn_add_block;
3383 rgn_common_sched_info.estimate_number_of_insns
3384 = rgn_estimate_number_of_insns;
3385 rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3386
3387 common_sched_info = &rgn_common_sched_info;
3388 }
3389
3390 /* Setup all *_sched_info structures (for the Haifa frontend
3391 and for the dependence analysis) in the interblock scheduler. */
3392 void
3393 rgn_setup_sched_infos (void)
3394 {
3395 if (!sel_sched_p ())
3396 memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3397 sizeof (rgn_sched_deps_info));
3398 else
3399 memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3400 sizeof (rgn_sched_deps_info));
3401
3402 sched_deps_info = &rgn_sched_deps_info;
3403
3404 memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3405 current_sched_info = &rgn_sched_info;
3406 }
3407
3408 /* The one entry point in this file. */
3409 void
3410 schedule_insns (void)
3411 {
3412 int rgn;
3413
3414 /* Taking care of this degenerate case makes the rest of
3415 this code simpler. */
3416 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
3417 return;
3418
3419 rgn_setup_common_sched_info ();
3420 rgn_setup_sched_infos ();
3421
3422 haifa_sched_init ();
3423 sched_rgn_init (reload_completed);
3424
3425 bitmap_initialize (&not_in_df, 0);
3426 bitmap_clear (&not_in_df);
3427
3428 /* Schedule every region in the subroutine. */
3429 for (rgn = 0; rgn < nr_regions; rgn++)
3430 if (dbg_cnt (sched_region))
3431 schedule_region (rgn);
3432
3433 /* Clean up. */
3434 sched_rgn_finish ();
3435 bitmap_clear (&not_in_df);
3436
3437 haifa_sched_finish ();
3438 }
3439
3440 /* INSN has been added to/removed from current region. */
3441 static void
3442 rgn_add_remove_insn (rtx_insn *insn, int remove_p)
3443 {
3444 if (!remove_p)
3445 rgn_n_insns++;
3446 else
3447 rgn_n_insns--;
3448
3449 if (INSN_BB (insn) == target_bb)
3450 {
3451 if (!remove_p)
3452 target_n_insns++;
3453 else
3454 target_n_insns--;
3455 }
3456 }
3457
3458 /* Extend internal data structures. */
3459 void
3460 extend_regions (void)
3461 {
3462 rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
3463 rgn_bb_table = XRESIZEVEC (int, rgn_bb_table,
3464 n_basic_blocks_for_fn (cfun));
3465 block_to_bb = XRESIZEVEC (int, block_to_bb,
3466 last_basic_block_for_fn (cfun));
3467 containing_rgn = XRESIZEVEC (int, containing_rgn,
3468 last_basic_block_for_fn (cfun));
3469 }
3470
3471 void
3472 rgn_make_new_region_out_of_new_block (basic_block bb)
3473 {
3474 int i;
3475
3476 i = RGN_BLOCKS (nr_regions);
3477 /* I - first free position in rgn_bb_table. */
3478
3479 rgn_bb_table[i] = bb->index;
3480 RGN_NR_BLOCKS (nr_regions) = 1;
3481 RGN_HAS_REAL_EBB (nr_regions) = 0;
3482 RGN_DONT_CALC_DEPS (nr_regions) = 0;
3483 CONTAINING_RGN (bb->index) = nr_regions;
3484 BLOCK_TO_BB (bb->index) = 0;
3485
3486 nr_regions++;
3487
3488 RGN_BLOCKS (nr_regions) = i + 1;
3489 }
3490
3491 /* BB was added to ebb after AFTER. */
3492 static void
3493 rgn_add_block (basic_block bb, basic_block after)
3494 {
3495 extend_regions ();
3496 bitmap_set_bit (&not_in_df, bb->index);
3497
3498 if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun))
3499 {
3500 rgn_make_new_region_out_of_new_block (bb);
3501 RGN_DONT_CALC_DEPS (nr_regions - 1) = (after
3502 == EXIT_BLOCK_PTR_FOR_FN (cfun));
3503 }
3504 else
3505 {
3506 int i, pos;
3507
3508 /* We need to fix rgn_table, block_to_bb, containing_rgn
3509 and ebb_head. */
3510
3511 BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3512
3513 /* We extend ebb_head to one more position to
3514 easily find the last position of the last ebb in
3515 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3516 is _always_ valid for access. */
3517
3518 i = BLOCK_TO_BB (after->index) + 1;
3519 pos = ebb_head[i] - 1;
3520 /* Now POS is the index of the last block in the region. */
3521
3522 /* Find index of basic block AFTER. */
3523 for (; rgn_bb_table[pos] != after->index; pos--)
3524 ;
3525
3526 pos++;
3527 gcc_assert (pos > ebb_head[i - 1]);
3528
3529 /* i - ebb right after "AFTER". */
3530 /* ebb_head[i] - VALID. */
3531
3532 /* Source position: ebb_head[i]
3533 Destination position: ebb_head[i] + 1
3534 Last position:
3535 RGN_BLOCKS (nr_regions) - 1
3536 Number of elements to copy: (last_position) - (source_position) + 1
3537 */
3538
3539 memmove (rgn_bb_table + pos + 1,
3540 rgn_bb_table + pos,
3541 ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3542 * sizeof (*rgn_bb_table));
3543
3544 rgn_bb_table[pos] = bb->index;
3545
3546 for (; i <= current_nr_blocks; i++)
3547 ebb_head [i]++;
3548
3549 i = CONTAINING_RGN (after->index);
3550 CONTAINING_RGN (bb->index) = i;
3551
3552 RGN_HAS_REAL_EBB (i) = 1;
3553
3554 for (++i; i <= nr_regions; i++)
3555 RGN_BLOCKS (i)++;
3556 }
3557 }
3558
3559 /* Fix internal data after interblock movement of jump instruction.
3560 For parameter meaning please refer to
3561 sched-int.h: struct sched_info: fix_recovery_cfg. */
3562 static void
3563 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3564 {
3565 int old_pos, new_pos, i;
3566
3567 BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3568
3569 for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3570 rgn_bb_table[old_pos] != check_bb_nexti;
3571 old_pos--)
3572 ;
3573 gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3574
3575 for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3576 rgn_bb_table[new_pos] != bbi;
3577 new_pos--)
3578 ;
3579 new_pos++;
3580 gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3581
3582 gcc_assert (new_pos < old_pos);
3583
3584 memmove (rgn_bb_table + new_pos + 1,
3585 rgn_bb_table + new_pos,
3586 (old_pos - new_pos) * sizeof (*rgn_bb_table));
3587
3588 rgn_bb_table[new_pos] = check_bb_nexti;
3589
3590 for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3591 ebb_head[i]++;
3592 }
3593
3594 /* Return next block in ebb chain. For parameter meaning please refer to
3595 sched-int.h: struct sched_info: advance_target_bb. */
3596 static basic_block
3597 advance_target_bb (basic_block bb, rtx_insn *insn)
3598 {
3599 if (insn)
3600 return 0;
3601
3602 gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3603 && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3604 return bb->next_bb;
3605 }
3606
3607 #endif
3608 \f
3609 /* Run instruction scheduler. */
3610 static unsigned int
3611 rest_of_handle_live_range_shrinkage (void)
3612 {
3613 #ifdef INSN_SCHEDULING
3614 int saved;
3615
3616 initialize_live_range_shrinkage ();
3617 saved = flag_schedule_interblock;
3618 flag_schedule_interblock = false;
3619 schedule_insns ();
3620 flag_schedule_interblock = saved;
3621 finish_live_range_shrinkage ();
3622 #endif
3623 return 0;
3624 }
3625
3626 /* Run instruction scheduler. */
3627 static unsigned int
3628 rest_of_handle_sched (void)
3629 {
3630 #ifdef INSN_SCHEDULING
3631 if (flag_selective_scheduling
3632 && ! maybe_skip_selective_scheduling ())
3633 run_selective_scheduling ();
3634 else
3635 schedule_insns ();
3636 #endif
3637 return 0;
3638 }
3639
3640 /* Run second scheduling pass after reload. */
3641 static unsigned int
3642 rest_of_handle_sched2 (void)
3643 {
3644 #ifdef INSN_SCHEDULING
3645 if (flag_selective_scheduling2
3646 && ! maybe_skip_selective_scheduling ())
3647 run_selective_scheduling ();
3648 else
3649 {
3650 /* Do control and data sched analysis again,
3651 and write some more of the results to dump file. */
3652 if (flag_sched2_use_superblocks)
3653 schedule_ebbs ();
3654 else
3655 schedule_insns ();
3656 }
3657 #endif
3658 return 0;
3659 }
3660
3661 namespace {
3662
3663 const pass_data pass_data_live_range_shrinkage =
3664 {
3665 RTL_PASS, /* type */
3666 "lr_shrinkage", /* name */
3667 OPTGROUP_NONE, /* optinfo_flags */
3668 TV_LIVE_RANGE_SHRINKAGE, /* tv_id */
3669 0, /* properties_required */
3670 0, /* properties_provided */
3671 0, /* properties_destroyed */
3672 0, /* todo_flags_start */
3673 TODO_df_finish, /* todo_flags_finish */
3674 };
3675
3676 class pass_live_range_shrinkage : public rtl_opt_pass
3677 {
3678 public:
3679 pass_live_range_shrinkage(gcc::context *ctxt)
3680 : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt)
3681 {}
3682
3683 /* opt_pass methods: */
3684 virtual bool gate (function *)
3685 {
3686 #ifdef INSN_SCHEDULING
3687 return flag_live_range_shrinkage;
3688 #else
3689 return 0;
3690 #endif
3691 }
3692
3693 virtual unsigned int execute (function *)
3694 {
3695 return rest_of_handle_live_range_shrinkage ();
3696 }
3697
3698 }; // class pass_live_range_shrinkage
3699
3700 } // anon namespace
3701
3702 rtl_opt_pass *
3703 make_pass_live_range_shrinkage (gcc::context *ctxt)
3704 {
3705 return new pass_live_range_shrinkage (ctxt);
3706 }
3707
3708 namespace {
3709
3710 const pass_data pass_data_sched =
3711 {
3712 RTL_PASS, /* type */
3713 "sched1", /* name */
3714 OPTGROUP_NONE, /* optinfo_flags */
3715 TV_SCHED, /* tv_id */
3716 0, /* properties_required */
3717 0, /* properties_provided */
3718 0, /* properties_destroyed */
3719 0, /* todo_flags_start */
3720 TODO_df_finish, /* todo_flags_finish */
3721 };
3722
3723 class pass_sched : public rtl_opt_pass
3724 {
3725 public:
3726 pass_sched (gcc::context *ctxt)
3727 : rtl_opt_pass (pass_data_sched, ctxt)
3728 {}
3729
3730 /* opt_pass methods: */
3731 virtual bool gate (function *);
3732 virtual unsigned int execute (function *) { return rest_of_handle_sched (); }
3733
3734 }; // class pass_sched
3735
3736 bool
3737 pass_sched::gate (function *)
3738 {
3739 #ifdef INSN_SCHEDULING
3740 return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
3741 #else
3742 return 0;
3743 #endif
3744 }
3745
3746 } // anon namespace
3747
3748 rtl_opt_pass *
3749 make_pass_sched (gcc::context *ctxt)
3750 {
3751 return new pass_sched (ctxt);
3752 }
3753
3754 namespace {
3755
3756 const pass_data pass_data_sched2 =
3757 {
3758 RTL_PASS, /* type */
3759 "sched2", /* name */
3760 OPTGROUP_NONE, /* optinfo_flags */
3761 TV_SCHED2, /* tv_id */
3762 0, /* properties_required */
3763 0, /* properties_provided */
3764 0, /* properties_destroyed */
3765 0, /* todo_flags_start */
3766 TODO_df_finish, /* todo_flags_finish */
3767 };
3768
3769 class pass_sched2 : public rtl_opt_pass
3770 {
3771 public:
3772 pass_sched2 (gcc::context *ctxt)
3773 : rtl_opt_pass (pass_data_sched2, ctxt)
3774 {}
3775
3776 /* opt_pass methods: */
3777 virtual bool gate (function *);
3778 virtual unsigned int execute (function *)
3779 {
3780 return rest_of_handle_sched2 ();
3781 }
3782
3783 }; // class pass_sched2
3784
3785 bool
3786 pass_sched2::gate (function *)
3787 {
3788 #ifdef INSN_SCHEDULING
3789 return optimize > 0 && flag_schedule_insns_after_reload
3790 && !targetm.delay_sched2 && dbg_cnt (sched2_func);
3791 #else
3792 return 0;
3793 #endif
3794 }
3795
3796 } // anon namespace
3797
3798 rtl_opt_pass *
3799 make_pass_sched2 (gcc::context *ctxt)
3800 {
3801 return new pass_sched2 (ctxt);
3802 }