1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
28 #include "diagnostic-core.h"
31 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
39 #include "cfglayout.h"
41 #include "sched-int.h"
45 #ifdef INSN_SCHEDULING
47 /* The number of insns to be scheduled in total. */
48 static int rgn_n_insns
;
50 /* The number of insns scheduled so far. */
51 static int sched_rgn_n_insns
;
53 /* Set of blocks, that already have their dependencies calculated. */
54 static bitmap_head dont_calc_deps
;
56 /* Last basic block in current ebb. */
57 static basic_block last_bb
;
59 /* Implementations of the sched_info functions for region scheduling. */
60 static void init_ready_list (void);
61 static void begin_schedule_ready (rtx
);
62 static int schedule_more_p (void);
63 static const char *ebb_print_insn (const_rtx
, int);
64 static int rank (rtx
, rtx
);
65 static int ebb_contributes_to_priority (rtx
, rtx
);
66 static basic_block
earliest_block_with_similiar_load (basic_block
, rtx
);
67 static void add_deps_for_risky_insns (rtx
, rtx
);
68 static void debug_ebb_dependencies (rtx
, rtx
);
70 static void ebb_add_remove_insn (rtx
, int);
71 static void ebb_add_block (basic_block
, basic_block
);
72 static basic_block
advance_target_bb (basic_block
, rtx
);
73 static void ebb_fix_recovery_cfg (int, int, int);
75 /* Allocate memory and store the state of the frontend. Return the allocated
81 *p
= sched_rgn_n_insns
;
85 /* Restore the state of the frontend from P_, then free it. */
87 restore_ebb_state (void *p_
)
90 sched_rgn_n_insns
= *p
;
94 /* Return nonzero if there are more insns that should be scheduled. */
97 schedule_more_p (void)
99 return sched_rgn_n_insns
< rgn_n_insns
;
102 /* Print dependency information about ebb between HEAD and TAIL. */
104 debug_ebb_dependencies (rtx head
, rtx tail
)
107 ";; --------------- forward dependences: ------------ \n");
109 fprintf (sched_dump
, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
110 BLOCK_NUM (head
), BLOCK_NUM (tail
));
112 debug_dependencies (head
, tail
);
115 /* Add all insns that are initially ready to the ready list READY. Called
116 once before scheduling a set of insns. */
119 init_ready_list (void)
122 rtx prev_head
= current_sched_info
->prev_head
;
123 rtx next_tail
= current_sched_info
->next_tail
;
126 sched_rgn_n_insns
= 0;
128 /* Print debugging information. */
129 if (sched_verbose
>= 5)
130 debug_ebb_dependencies (NEXT_INSN (prev_head
), PREV_INSN (next_tail
));
132 /* Initialize ready list with all 'ready' insns in target block.
133 Count number of insns in the target block being scheduled. */
134 for (insn
= NEXT_INSN (prev_head
); insn
!= next_tail
; insn
= NEXT_INSN (insn
))
140 gcc_assert (n
== rgn_n_insns
);
143 /* INSN is being scheduled after LAST. Update counters. */
145 begin_schedule_ready (rtx insn ATTRIBUTE_UNUSED
)
150 /* INSN is being moved to its place in the schedule, after LAST. */
152 begin_move_insn (rtx insn
, rtx last
)
154 if (BLOCK_FOR_INSN (insn
) == last_bb
155 /* INSN is a jump in the last block, ... */
156 && control_flow_insn_p (insn
)
157 /* that is going to be moved over some instructions. */
158 && last
!= PREV_INSN (insn
))
163 /* An obscure special case, where we do have partially dead
164 instruction scheduled after last control flow instruction.
165 In this case we can create new basic block. It is
166 always exactly one basic block last in the sequence. */
168 e
= find_fallthru_edge (last_bb
->succs
);
170 gcc_checking_assert (!e
|| !(e
->flags
& EDGE_COMPLEX
));
172 gcc_checking_assert (BLOCK_FOR_INSN (insn
) == last_bb
173 && !IS_SPECULATION_CHECK_P (insn
)
174 && BB_HEAD (last_bb
) != insn
175 && BB_END (last_bb
) == insn
);
180 x
= NEXT_INSN (insn
);
182 gcc_checking_assert (NOTE_P (x
) || LABEL_P (x
));
184 gcc_checking_assert (BARRIER_P (x
));
190 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb
)));
194 /* Create an empty unreachable block after the INSN. */
195 rtx next
= NEXT_INSN (insn
);
196 if (next
&& BARRIER_P (next
))
197 next
= NEXT_INSN (next
);
198 bb
= create_basic_block (next
, NULL_RTX
, last_bb
);
201 /* split_edge () creates BB before E->DEST. Keep in mind, that
202 this operation extends scheduling region till the end of BB.
203 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
204 of the scheduling region. */
205 current_sched_info
->next_tail
= NEXT_INSN (BB_END (bb
));
206 gcc_assert (current_sched_info
->next_tail
);
208 /* Append new basic block to the end of the ebb. */
209 sched_init_only_bb (bb
, last_bb
);
210 gcc_assert (last_bb
== bb
);
214 /* Return a string that contains the insn uid and optionally anything else
215 necessary to identify this insn in an output. It's valid to use a
216 static buffer for this. The ALIGNED parameter should cause the string
217 to be formatted so that multiple output lines will line up nicely. */
220 ebb_print_insn (const_rtx insn
, int aligned ATTRIBUTE_UNUSED
)
224 /* '+' before insn means it is a new cycle start. */
225 if (GET_MODE (insn
) == TImode
)
226 sprintf (tmp
, "+ %4d", INSN_UID (insn
));
228 sprintf (tmp
, " %4d", INSN_UID (insn
));
233 /* Compare priority of two insns. Return a positive number if the second
234 insn is to be preferred for scheduling, and a negative one if the first
235 is to be preferred. Zero if they are equally good. */
238 rank (rtx insn1
, rtx insn2
)
240 basic_block bb1
= BLOCK_FOR_INSN (insn1
);
241 basic_block bb2
= BLOCK_FOR_INSN (insn2
);
243 if (bb1
->count
> bb2
->count
244 || bb1
->frequency
> bb2
->frequency
)
246 if (bb1
->count
< bb2
->count
247 || bb1
->frequency
< bb2
->frequency
)
252 /* NEXT is an instruction that depends on INSN (a backward dependence);
253 return nonzero if we should include this dependence in priority
257 ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED
,
258 rtx insn ATTRIBUTE_UNUSED
)
263 /* INSN is a JUMP_INSN. Store the set of registers that
264 must be considered as used by this jump in USED. */
267 ebb_compute_jump_reg_dependencies (rtx insn
, regset used
)
269 basic_block b
= BLOCK_FOR_INSN (insn
);
273 FOR_EACH_EDGE (e
, ei
, b
->succs
)
274 if ((e
->flags
& EDGE_FALLTHRU
) == 0)
275 bitmap_ior_into (used
, df_get_live_in (e
->dest
));
278 /* Used in schedule_insns to initialize current_sched_info for scheduling
279 regions (or single basic blocks). */
281 static struct common_sched_info_def ebb_common_sched_info
;
283 static struct sched_deps_info_def ebb_sched_deps_info
=
285 ebb_compute_jump_reg_dependencies
,
286 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
291 static struct haifa_sched_info ebb_sched_info
=
299 ebb_contributes_to_priority
,
300 NULL
, /* insn_finishes_block_p */
307 begin_schedule_ready
,
315 /* We can create new blocks in begin_schedule_ready (). */
319 /* Returns the earliest block in EBB currently being processed where a
320 "similar load" 'insn2' is found, and hence LOAD_INSN can move
321 speculatively into the found block. All the following must hold:
323 (1) both loads have 1 base register (PFREE_CANDIDATEs).
324 (2) load_insn and load2 have a def-use dependence upon
325 the same insn 'insn1'.
327 From all these we can conclude that the two loads access memory
328 addresses that differ at most by a constant, and hence if moving
329 load_insn would cause an exception, it would have been caused by
332 The function uses list (given by LAST_BLOCK) of already processed
333 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
336 earliest_block_with_similiar_load (basic_block last_block
, rtx load_insn
)
338 sd_iterator_def back_sd_it
;
340 basic_block bb
, earliest_block
= NULL
;
342 FOR_EACH_DEP (load_insn
, SD_LIST_BACK
, back_sd_it
, back_dep
)
344 rtx insn1
= DEP_PRO (back_dep
);
346 if (DEP_TYPE (back_dep
) == REG_DEP_TRUE
)
347 /* Found a DEF-USE dependence (insn1, load_insn). */
349 sd_iterator_def fore_sd_it
;
352 FOR_EACH_DEP (insn1
, SD_LIST_FORW
, fore_sd_it
, fore_dep
)
354 rtx insn2
= DEP_CON (fore_dep
);
355 basic_block insn2_block
= BLOCK_FOR_INSN (insn2
);
357 if (DEP_TYPE (fore_dep
) == REG_DEP_TRUE
)
359 if (earliest_block
!= NULL
360 && earliest_block
->index
< insn2_block
->index
)
363 /* Found a DEF-USE dependence (insn1, insn2). */
364 if (haifa_classify_insn (insn2
) != PFREE_CANDIDATE
)
365 /* insn2 not guaranteed to be a 1 base reg load. */
368 for (bb
= last_block
; bb
; bb
= (basic_block
) bb
->aux
)
369 if (insn2_block
== bb
)
373 /* insn2 is the similar load. */
374 earliest_block
= insn2_block
;
380 return earliest_block
;
383 /* The following function adds dependencies between jumps and risky
384 insns in given ebb. */
387 add_deps_for_risky_insns (rtx head
, rtx tail
)
391 rtx last_jump
= NULL_RTX
;
392 rtx next_tail
= NEXT_INSN (tail
);
393 basic_block last_block
= NULL
, bb
;
395 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
397 add_delay_dependencies (insn
);
398 if (control_flow_insn_p (insn
))
400 bb
= BLOCK_FOR_INSN (insn
);
401 bb
->aux
= last_block
;
403 /* Ensure blocks stay in the same order. */
405 add_dependence (insn
, last_jump
, REG_DEP_ANTI
);
408 else if (INSN_P (insn
) && last_jump
!= NULL_RTX
)
410 classification
= haifa_classify_insn (insn
);
413 switch (classification
)
415 case PFREE_CANDIDATE
:
416 if (flag_schedule_speculative_load
)
418 bb
= earliest_block_with_similiar_load (last_block
, insn
);
421 bb
= (basic_block
) bb
->aux
;
430 case PRISKY_CANDIDATE
:
431 /* ??? We could implement better checking PRISKY_CANDIDATEs
432 analogous to sched-rgn.c. */
433 /* We can not change the mode of the backward
434 dependency because REG_DEP_ANTI has the lowest
436 if (! sched_insns_conditions_mutex_p (insn
, prev
))
438 if ((current_sched_info
->flags
& DO_SPECULATION
)
439 && (spec_info
->mask
& BEGIN_CONTROL
))
441 dep_def _dep
, *dep
= &_dep
;
443 init_dep (dep
, prev
, insn
, REG_DEP_ANTI
);
445 if (current_sched_info
->flags
& USE_DEPS_LIST
)
447 DEP_STATUS (dep
) = set_dep_weak (DEP_ANTI
, BEGIN_CONTROL
,
451 sd_add_or_update_dep (dep
, false);
454 add_dependence (insn
, prev
, REG_DEP_CONTROL
);
464 /* Maintain the invariant that bb->aux is clear after use. */
467 bb
= (basic_block
) last_block
->aux
;
468 last_block
->aux
= NULL
;
473 /* Schedule a single extended basic block, defined by the boundaries
476 We change our expectations about scheduler behaviour depending on
477 whether MODULO_SCHEDULING is true. If it is, we expect that the
478 caller has already called set_modulo_params and created delay pairs
479 as appropriate. If the modulo schedule failed, we return
483 schedule_ebb (rtx head
, rtx tail
, bool modulo_scheduling
)
485 basic_block first_bb
, target_bb
;
486 struct deps_desc tmp_deps
;
489 /* Blah. We should fix the rest of the code not to get confused by
493 if (NOTE_P (head
) || DEBUG_INSN_P (head
))
494 head
= NEXT_INSN (head
);
495 else if (NOTE_P (tail
) || DEBUG_INSN_P (tail
))
496 tail
= PREV_INSN (tail
);
497 else if (LABEL_P (head
))
498 head
= NEXT_INSN (head
);
503 first_bb
= BLOCK_FOR_INSN (head
);
504 last_bb
= BLOCK_FOR_INSN (tail
);
506 if (no_real_insns_p (head
, tail
))
507 return BLOCK_FOR_INSN (tail
);
509 gcc_assert (INSN_P (head
) && INSN_P (tail
));
511 if (!bitmap_bit_p (&dont_calc_deps
, first_bb
->index
))
515 /* Compute dependencies. */
516 init_deps (&tmp_deps
, false);
517 sched_analyze (&tmp_deps
, head
, tail
);
518 free_deps (&tmp_deps
);
520 add_deps_for_risky_insns (head
, tail
);
522 if (targetm
.sched
.dependencies_evaluation_hook
)
523 targetm
.sched
.dependencies_evaluation_hook (head
, tail
);
525 finish_deps_global ();
528 /* Only recovery blocks can have their dependencies already calculated,
529 and they always are single block ebbs. */
530 gcc_assert (first_bb
== last_bb
);
532 /* Set priorities. */
533 current_sched_info
->sched_max_insns_priority
= 0;
534 rgn_n_insns
= set_priorities (head
, tail
);
535 current_sched_info
->sched_max_insns_priority
++;
537 current_sched_info
->prev_head
= PREV_INSN (head
);
538 current_sched_info
->next_tail
= NEXT_INSN (tail
);
540 remove_notes (head
, tail
);
542 unlink_bb_notes (first_bb
, last_bb
);
544 target_bb
= first_bb
;
546 /* Make ready list big enough to hold all the instructions from the ebb. */
547 sched_extend_ready_list (rgn_n_insns
);
548 success
= schedule_block (&target_bb
);
549 gcc_assert (success
|| modulo_scheduling
);
551 /* Free ready list. */
552 sched_finish_ready_list ();
554 /* We might pack all instructions into fewer blocks,
555 so we may made some of them empty. Can't assert (b == last_bb). */
557 /* Sanity check: verify that all region insns were scheduled. */
558 gcc_assert (modulo_scheduling
|| sched_rgn_n_insns
== rgn_n_insns
);
560 /* Free dependencies. */
561 sched_free_deps (current_sched_info
->head
, current_sched_info
->tail
, true);
563 gcc_assert (haifa_recovery_bb_ever_added_p
564 || deps_pools_are_empty_p ());
566 if (EDGE_COUNT (last_bb
->preds
) == 0)
567 /* LAST_BB is unreachable. */
569 gcc_assert (first_bb
!= last_bb
570 && EDGE_COUNT (last_bb
->succs
) == 0);
571 last_bb
= last_bb
->prev_bb
;
572 delete_basic_block (last_bb
->next_bb
);
575 return success
? last_bb
: NULL
;
578 /* Perform initializations before running schedule_ebbs or a single
581 schedule_ebbs_init (void)
585 memcpy (&ebb_common_sched_info
, &haifa_common_sched_info
,
586 sizeof (ebb_common_sched_info
));
588 ebb_common_sched_info
.fix_recovery_cfg
= ebb_fix_recovery_cfg
;
589 ebb_common_sched_info
.add_block
= ebb_add_block
;
590 ebb_common_sched_info
.sched_pass_id
= SCHED_EBB_PASS
;
592 common_sched_info
= &ebb_common_sched_info
;
593 sched_deps_info
= &ebb_sched_deps_info
;
594 current_sched_info
= &ebb_sched_info
;
599 compute_bb_for_insn ();
601 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
602 bitmap_initialize (&dont_calc_deps
, 0);
603 bitmap_clear (&dont_calc_deps
);
606 /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */
608 schedule_ebbs_finish (void)
610 bitmap_clear (&dont_calc_deps
);
612 /* Reposition the prologue and epilogue notes in case we moved the
613 prologue/epilogue insns. */
614 if (reload_completed
)
615 reposition_prologue_and_epilogue_notes ();
617 haifa_sched_finish ();
620 /* The main entry point in this file. */
626 int probability_cutoff
;
629 /* Taking care of this degenerate case makes the rest of
630 this code simpler. */
631 if (n_basic_blocks
== NUM_FIXED_BLOCKS
)
634 if (profile_info
&& flag_branch_probabilities
)
635 probability_cutoff
= PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK
);
637 probability_cutoff
= PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY
);
638 probability_cutoff
= REG_BR_PROB_BASE
/ 100 * probability_cutoff
;
640 schedule_ebbs_init ();
642 /* Schedule every region in the subroutine. */
645 rtx head
= BB_HEAD (bb
);
647 if (bb
->flags
& BB_DISABLE_SCHEDULE
)
654 if (bb
->next_bb
== EXIT_BLOCK_PTR
655 || LABEL_P (BB_HEAD (bb
->next_bb
)))
657 e
= find_fallthru_edge (bb
->succs
);
660 if (e
->probability
<= probability_cutoff
)
662 if (e
->dest
->flags
& BB_DISABLE_SCHEDULE
)
667 bb
= schedule_ebb (head
, tail
, false);
669 schedule_ebbs_finish ();
672 /* INSN has been added to/removed from current ebb. */
674 ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED
, int remove_p
)
682 /* BB was added to ebb after AFTER. */
684 ebb_add_block (basic_block bb
, basic_block after
)
686 /* Recovery blocks are always bounded by BARRIERS,
687 therefore, they always form single block EBB,
688 therefore, we can use rec->index to identify such EBBs. */
689 if (after
== EXIT_BLOCK_PTR
)
690 bitmap_set_bit (&dont_calc_deps
, bb
->index
);
691 else if (after
== last_bb
)
695 /* Return next block in ebb chain. For parameter meaning please refer to
696 sched-int.h: struct sched_info: advance_target_bb. */
698 advance_target_bb (basic_block bb
, rtx insn
)
702 if (BLOCK_FOR_INSN (insn
) != bb
703 && control_flow_insn_p (insn
)
704 /* We handle interblock movement of the speculation check
705 or over a speculation check in
706 haifa-sched.c: move_block_after_check (). */
707 && !IS_SPECULATION_BRANCHY_CHECK_P (insn
)
708 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb
)))
710 /* Assert that we don't move jumps across blocks. */
711 gcc_assert (!control_flow_insn_p (BB_END (bb
))
712 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb
->next_bb
)));
719 /* Return next non empty block. */
723 gcc_assert (bb
!= last_bb
);
727 while (bb_note (bb
) == BB_END (bb
));
733 /* Fix internal data after interblock movement of jump instruction.
734 For parameter meaning please refer to
735 sched-int.h: struct sched_info: fix_recovery_cfg. */
737 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED
, int jump_bbi
,
740 gcc_assert (last_bb
->index
!= bbi
);
742 if (jump_bb_nexti
== last_bb
->index
)
743 last_bb
= BASIC_BLOCK (jump_bbi
);
746 #endif /* INSN_SCHEDULING */