1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
131 #include "diagnostic-core.h"
135 #include "hard-reg-set.h"
137 #include "function.h"
139 #include "insn-config.h"
140 #include "insn-attr.h"
143 #include "sched-int.h"
151 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
153 #ifdef INSN_SCHEDULING
155 /* issue_rate is the number of insns that can be scheduled in the same
156 machine cycle. It can be defined in the config/mach/mach.h file,
157 otherwise we set it to 1. */
161 /* sched-verbose controls the amount of debugging output the
162 scheduler prints. It is controlled by -fsched-verbose=N:
163 N>0 and no -DSR : the output is directed to stderr.
164 N>=10 will direct the printouts to stderr (regardless of -dSR).
166 N=2: bb's probabilities, detailed ready list info, unit/insn info.
167 N=3: rtl at abort point, control-flow, regions info.
168 N=5: dependences info. */
170 static int sched_verbose_param
= 0;
171 int sched_verbose
= 0;
173 /* Debugging file. All printouts are sent to dump, which is always set,
174 either to stderr, or to the dump listing file (-dRS). */
175 FILE *sched_dump
= 0;
177 /* fix_sched_param() is called from toplev.c upon detection
178 of the -fsched-verbose=N option. */
181 fix_sched_param (const char *param
, const char *val
)
183 if (!strcmp (param
, "verbose"))
184 sched_verbose_param
= atoi (val
);
186 warning (0, "fix_sched_param: unknown param: %s", param
);
189 /* This is a placeholder for the scheduler parameters common
190 to all schedulers. */
191 struct common_sched_info_def
*common_sched_info
;
193 #define INSN_TICK(INSN) (HID (INSN)->tick)
194 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
196 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
197 then it should be recalculated from scratch. */
198 #define INVALID_TICK (-(max_insn_queue_index + 1))
199 /* The minimal value of the INSN_TICK of an instruction. */
200 #define MIN_TICK (-max_insn_queue_index)
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
206 static struct spec_info_def spec_info_var
;
207 /* Description of the speculative part of the scheduling.
208 If NULL - no speculation. */
209 spec_info_t spec_info
= NULL
;
211 /* True, if recovery block was added during scheduling of current block.
212 Used to determine, if we need to fix INSN_TICKs. */
213 static bool haifa_recovery_bb_recently_added_p
;
215 /* True, if recovery block was added during this scheduling pass.
216 Used to determine if we should have empty memory pools of dependencies
217 after finishing current region. */
218 bool haifa_recovery_bb_ever_added_p
;
220 /* Counters of different types of speculative instructions. */
221 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
223 /* Array used in {unlink, restore}_bb_notes. */
224 static rtx
*bb_header
= 0;
226 /* Basic block after which recovery blocks will be created. */
227 static basic_block before_recovery
;
229 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
231 basic_block after_recovery
;
233 /* FALSE if we add bb to another region, so we don't need to initialize it. */
234 bool adding_bb_to_current_region_p
= true;
238 /* An instruction is ready to be scheduled when all insns preceding it
239 have already been scheduled. It is important to ensure that all
240 insns which use its result will not be executed until its result
241 has been computed. An insn is maintained in one of four structures:
243 (P) the "Pending" set of insns which cannot be scheduled until
244 their dependencies have been satisfied.
245 (Q) the "Queued" set of insns that can be scheduled when sufficient
247 (R) the "Ready" list of unscheduled, uncommitted insns.
248 (S) the "Scheduled" list of insns.
250 Initially, all insns are either "Pending" or "Ready" depending on
251 whether their dependencies are satisfied.
253 Insns move from the "Ready" list to the "Scheduled" list as they
254 are committed to the schedule. As this occurs, the insns in the
255 "Pending" list have their dependencies satisfied and move to either
256 the "Ready" list or the "Queued" set depending on whether
257 sufficient time has passed to make them ready. As time passes,
258 insns move from the "Queued" set to the "Ready" list.
260 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
261 unscheduled insns, i.e., those that are ready, queued, and pending.
262 The "Queued" set (Q) is implemented by the variable `insn_queue'.
263 The "Ready" list (R) is implemented by the variables `ready' and
265 The "Scheduled" list (S) is the new insn chain built by this pass.
267 The transition (R->S) is implemented in the scheduling loop in
268 `schedule_block' when the best insn to schedule is chosen.
269 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
270 insns move from the ready list to the scheduled list.
271 The transition (Q->R) is implemented in 'queue_to_insn' as time
272 passes or stalls are introduced. */
274 /* Implement a circular buffer to delay instructions until sufficient
275 time has passed. For the new pipeline description interface,
276 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
277 than maximal time of instruction execution computed by genattr.c on
278 the base maximal time of functional unit reservations and getting a
279 result. This is the longest time an insn may be queued. */
281 static rtx
*insn_queue
;
282 static int q_ptr
= 0;
283 static int q_size
= 0;
284 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
285 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
287 #define QUEUE_SCHEDULED (-3)
288 #define QUEUE_NOWHERE (-2)
289 #define QUEUE_READY (-1)
290 /* QUEUE_SCHEDULED - INSN is scheduled.
291 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
293 QUEUE_READY - INSN is in ready list.
294 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
296 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
298 /* The following variable value refers for all current and future
299 reservations of the processor units. */
302 /* The following variable value is size of memory representing all
303 current and future reservations of the processor units. */
304 size_t dfa_state_size
;
306 /* The following array is used to find the best insn from ready when
307 the automaton pipeline interface is used. */
308 char *ready_try
= NULL
;
310 /* The ready list. */
311 struct ready_list ready
= {NULL
, 0, 0, 0, 0};
313 /* The pointer to the ready list (to be removed). */
314 static struct ready_list
*readyp
= &ready
;
316 /* Scheduling clock. */
317 static int clock_var
;
319 static int may_trap_exp (const_rtx
, int);
321 /* Nonzero iff the address is comprised from at most 1 register. */
322 #define CONST_BASED_ADDRESS_P(x) \
324 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
325 || (GET_CODE (x) == LO_SUM)) \
326 && (CONSTANT_P (XEXP (x, 0)) \
327 || CONSTANT_P (XEXP (x, 1)))))
329 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
330 as found by analyzing insn's expression. */
333 static int haifa_luid_for_non_insn (rtx x
);
335 /* Haifa version of sched_info hooks common to all headers. */
336 const struct common_sched_info_def haifa_common_sched_info
=
338 NULL
, /* fix_recovery_cfg */
339 NULL
, /* add_block */
340 NULL
, /* estimate_number_of_insns */
341 haifa_luid_for_non_insn
, /* luid_for_non_insn */
342 SCHED_PASS_UNKNOWN
/* sched_pass_id */
345 const struct sched_scan_info_def
*sched_scan_info
;
347 /* Mapping from instruction UID to its Logical UID. */
348 VEC (int, heap
) *sched_luids
= NULL
;
350 /* Next LUID to assign to an instruction. */
351 int sched_max_luid
= 1;
353 /* Haifa Instruction Data. */
354 VEC (haifa_insn_data_def
, heap
) *h_i_d
= NULL
;
356 void (* sched_init_only_bb
) (basic_block
, basic_block
);
358 /* Split block function. Different schedulers might use different functions
359 to handle their internal data consistent. */
360 basic_block (* sched_split_block
) (basic_block
, rtx
);
362 /* Create empty basic block after the specified block. */
363 basic_block (* sched_create_empty_bb
) (basic_block
);
366 may_trap_exp (const_rtx x
, int is_store
)
375 if (code
== MEM
&& may_trap_p (x
))
382 /* The insn uses memory: a volatile load. */
383 if (MEM_VOLATILE_P (x
))
385 /* An exception-free load. */
388 /* A load with 1 base register, to be further checked. */
389 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
390 return PFREE_CANDIDATE
;
391 /* No info on the load, to be further checked. */
392 return PRISKY_CANDIDATE
;
397 int i
, insn_class
= TRAP_FREE
;
399 /* Neither store nor load, check if it may cause a trap. */
402 /* Recursive step: walk the insn... */
403 fmt
= GET_RTX_FORMAT (code
);
404 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
408 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
409 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
411 else if (fmt
[i
] == 'E')
414 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
416 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
417 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
418 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
422 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
429 /* Classifies rtx X of an insn for the purpose of verifying that X can be
430 executed speculatively (and consequently the insn can be moved
431 speculatively), by examining X, returning:
432 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
433 TRAP_FREE: non-load insn.
434 IFREE: load from a globally safe location.
435 IRISKY: volatile load.
436 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
437 being either PFREE or PRISKY. */
440 haifa_classify_rtx (const_rtx x
)
442 int tmp_class
= TRAP_FREE
;
443 int insn_class
= TRAP_FREE
;
446 if (GET_CODE (x
) == PARALLEL
)
448 int i
, len
= XVECLEN (x
, 0);
450 for (i
= len
- 1; i
>= 0; i
--)
452 tmp_class
= haifa_classify_rtx (XVECEXP (x
, 0, i
));
453 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
454 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
464 /* Test if it is a 'store'. */
465 tmp_class
= may_trap_exp (XEXP (x
, 0), 1);
468 /* Test if it is a store. */
469 tmp_class
= may_trap_exp (SET_DEST (x
), 1);
470 if (tmp_class
== TRAP_RISKY
)
472 /* Test if it is a load. */
474 WORST_CLASS (tmp_class
,
475 may_trap_exp (SET_SRC (x
), 0));
478 tmp_class
= haifa_classify_rtx (COND_EXEC_CODE (x
));
479 if (tmp_class
== TRAP_RISKY
)
481 tmp_class
= WORST_CLASS (tmp_class
,
482 may_trap_exp (COND_EXEC_TEST (x
), 0));
485 tmp_class
= TRAP_RISKY
;
489 insn_class
= tmp_class
;
496 haifa_classify_insn (const_rtx insn
)
498 return haifa_classify_rtx (PATTERN (insn
));
501 /* Forward declarations. */
503 static int priority (rtx
);
504 static int rank_for_schedule (const void *, const void *);
505 static void swap_sort (rtx
*, int);
506 static void queue_insn (rtx
, int);
507 static int schedule_insn (rtx
);
508 static void adjust_priority (rtx
);
509 static void advance_one_cycle (void);
510 static void extend_h_i_d (void);
513 /* Notes handling mechanism:
514 =========================
515 Generally, NOTES are saved before scheduling and restored after scheduling.
516 The scheduler distinguishes between two types of notes:
518 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
519 Before scheduling a region, a pointer to the note is added to the insn
520 that follows or precedes it. (This happens as part of the data dependence
521 computation). After scheduling an insn, the pointer contained in it is
522 used for regenerating the corresponding note (in reemit_notes).
524 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
525 these notes are put in a list (in rm_other_notes() and
526 unlink_other_notes ()). After scheduling the block, these notes are
527 inserted at the beginning of the block (in schedule_block()). */
529 static void ready_add (struct ready_list
*, rtx
, bool);
530 static rtx
ready_remove_first (struct ready_list
*);
531 static rtx
ready_remove_first_dispatch (struct ready_list
*ready
);
533 static void queue_to_ready (struct ready_list
*);
534 static int early_queue_to_ready (state_t
, struct ready_list
*);
536 static void debug_ready_list (struct ready_list
*);
538 /* The following functions are used to implement multi-pass scheduling
539 on the first cycle. */
540 static rtx
ready_remove (struct ready_list
*, int);
541 static void ready_remove_insn (rtx
);
543 static int choose_ready (struct ready_list
*, rtx
*);
545 static void fix_inter_tick (rtx
, rtx
);
546 static int fix_tick_ready (rtx
);
547 static void change_queue_index (rtx
, int);
549 /* The following functions are used to implement scheduling of data/control
550 speculative instructions. */
552 static void extend_h_i_d (void);
553 static void init_h_i_d (rtx
);
554 static void generate_recovery_code (rtx
);
555 static void process_insn_forw_deps_be_in_spec (rtx
, rtx
, ds_t
);
556 static void begin_speculative_block (rtx
);
557 static void add_to_speculative_block (rtx
);
558 static void init_before_recovery (basic_block
*);
559 static void create_check_block_twin (rtx
, bool);
560 static void fix_recovery_deps (basic_block
);
561 static void haifa_change_pattern (rtx
, rtx
);
562 static void dump_new_block_header (int, basic_block
, rtx
, rtx
);
563 static void restore_bb_notes (basic_block
);
564 static void fix_jump_move (rtx
);
565 static void move_block_after_check (rtx
);
566 static void move_succs (VEC(edge
,gc
) **, basic_block
);
567 static void sched_remove_insn (rtx
);
568 static void clear_priorities (rtx
, rtx_vec_t
*);
569 static void calc_priorities (rtx_vec_t
);
570 static void add_jump_dependencies (rtx
, rtx
);
571 #ifdef ENABLE_CHECKING
572 static int has_edge_p (VEC(edge
,gc
) *, int);
573 static void check_cfg (rtx
, rtx
);
576 #endif /* INSN_SCHEDULING */
578 /* Point to state used for the current scheduling pass. */
579 struct haifa_sched_info
*current_sched_info
;
581 #ifndef INSN_SCHEDULING
583 schedule_insns (void)
588 /* Do register pressure sensitive insn scheduling if the flag is set
590 bool sched_pressure_p
;
592 /* Map regno -> its cover class. The map defined only when
593 SCHED_PRESSURE_P is true. */
594 enum reg_class
*sched_regno_cover_class
;
596 /* The current register pressure. Only elements corresponding cover
597 classes are defined. */
598 static int curr_reg_pressure
[N_REG_CLASSES
];
600 /* Saved value of the previous array. */
601 static int saved_reg_pressure
[N_REG_CLASSES
];
603 /* Register living at given scheduling point. */
604 static bitmap curr_reg_live
;
606 /* Saved value of the previous array. */
607 static bitmap saved_reg_live
;
609 /* Registers mentioned in the current region. */
610 static bitmap region_ref_regs
;
612 /* Initiate register pressure relative info for scheduling the current
613 region. Currently it is only clearing register mentioned in the
616 sched_init_region_reg_pressure_info (void)
618 bitmap_clear (region_ref_regs
);
621 /* Update current register pressure related info after birth (if
622 BIRTH_P) or death of register REGNO. */
624 mark_regno_birth_or_death (int regno
, bool birth_p
)
626 enum reg_class cover_class
;
628 cover_class
= sched_regno_cover_class
[regno
];
629 if (regno
>= FIRST_PSEUDO_REGISTER
)
631 if (cover_class
!= NO_REGS
)
635 bitmap_set_bit (curr_reg_live
, regno
);
636 curr_reg_pressure
[cover_class
]
637 += ira_reg_class_nregs
[cover_class
][PSEUDO_REGNO_MODE (regno
)];
641 bitmap_clear_bit (curr_reg_live
, regno
);
642 curr_reg_pressure
[cover_class
]
643 -= ira_reg_class_nregs
[cover_class
][PSEUDO_REGNO_MODE (regno
)];
647 else if (cover_class
!= NO_REGS
648 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
652 bitmap_set_bit (curr_reg_live
, regno
);
653 curr_reg_pressure
[cover_class
]++;
657 bitmap_clear_bit (curr_reg_live
, regno
);
658 curr_reg_pressure
[cover_class
]--;
663 /* Initiate current register pressure related info from living
664 registers given by LIVE. */
666 initiate_reg_pressure_info (bitmap live
)
672 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
673 curr_reg_pressure
[ira_reg_class_cover
[i
]] = 0;
674 bitmap_clear (curr_reg_live
);
675 EXECUTE_IF_SET_IN_BITMAP (live
, 0, j
, bi
)
676 if (current_nr_blocks
== 1 || bitmap_bit_p (region_ref_regs
, j
))
677 mark_regno_birth_or_death (j
, true);
680 /* Mark registers in X as mentioned in the current region. */
682 setup_ref_regs (rtx x
)
685 const RTX_CODE code
= GET_CODE (x
);
691 if (regno
>= FIRST_PSEUDO_REGISTER
)
692 bitmap_set_bit (region_ref_regs
, REGNO (x
));
694 for (i
= hard_regno_nregs
[regno
][GET_MODE (x
)] - 1; i
>= 0; i
--)
695 bitmap_set_bit (region_ref_regs
, regno
+ i
);
698 fmt
= GET_RTX_FORMAT (code
);
699 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
701 setup_ref_regs (XEXP (x
, i
));
702 else if (fmt
[i
] == 'E')
704 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
705 setup_ref_regs (XVECEXP (x
, i
, j
));
709 /* Initiate current register pressure related info at the start of
712 initiate_bb_reg_pressure_info (basic_block bb
)
717 if (current_nr_blocks
> 1)
718 FOR_BB_INSNS (bb
, insn
)
719 if (NONDEBUG_INSN_P (insn
))
720 setup_ref_regs (PATTERN (insn
));
721 initiate_reg_pressure_info (df_get_live_in (bb
));
722 #ifdef EH_RETURN_DATA_REGNO
723 if (bb_has_eh_pred (bb
))
726 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
728 if (regno
== INVALID_REGNUM
)
730 if (! bitmap_bit_p (df_get_live_in (bb
), regno
))
731 mark_regno_birth_or_death (regno
, true);
736 /* Save current register pressure related info. */
738 save_reg_pressure (void)
742 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
743 saved_reg_pressure
[ira_reg_class_cover
[i
]]
744 = curr_reg_pressure
[ira_reg_class_cover
[i
]];
745 bitmap_copy (saved_reg_live
, curr_reg_live
);
748 /* Restore saved register pressure related info. */
750 restore_reg_pressure (void)
754 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
755 curr_reg_pressure
[ira_reg_class_cover
[i
]]
756 = saved_reg_pressure
[ira_reg_class_cover
[i
]];
757 bitmap_copy (curr_reg_live
, saved_reg_live
);
760 /* Return TRUE if the register is dying after its USE. */
762 dying_use_p (struct reg_use_data
*use
)
764 struct reg_use_data
*next
;
766 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
767 if (NONDEBUG_INSN_P (next
->insn
)
768 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
773 /* Print info about the current register pressure and its excess for
776 print_curr_reg_pressure (void)
781 fprintf (sched_dump
, ";;\t");
782 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
784 cl
= ira_reg_class_cover
[i
];
785 gcc_assert (curr_reg_pressure
[cl
] >= 0);
786 fprintf (sched_dump
, " %s:%d(%d)", reg_class_names
[cl
],
787 curr_reg_pressure
[cl
],
788 curr_reg_pressure
[cl
] - ira_available_class_regs
[cl
]);
790 fprintf (sched_dump
, "\n");
793 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
794 so that insns independent of the last scheduled insn will be preferred
795 over dependent instructions. */
797 static rtx last_scheduled_insn
;
799 /* Cached cost of the instruction. Use below function to get cost of the
800 insn. -1 here means that the field is not initialized. */
801 #define INSN_COST(INSN) (HID (INSN)->cost)
803 /* Compute cost of executing INSN.
804 This is the number of cycles between instruction issue and
805 instruction results. */
813 if (recog_memoized (insn
) < 0)
816 cost
= insn_default_latency (insn
);
823 cost
= INSN_COST (insn
);
827 /* A USE insn, or something else we don't need to
828 understand. We can't pass these directly to
829 result_ready_cost or insn_default_latency because it will
830 trigger a fatal error for unrecognizable insns. */
831 if (recog_memoized (insn
) < 0)
833 INSN_COST (insn
) = 0;
838 cost
= insn_default_latency (insn
);
842 INSN_COST (insn
) = cost
;
849 /* Compute cost of dependence LINK.
850 This is the number of cycles between instruction issue and
852 ??? We also use this function to call recog_memoized on all insns. */
854 dep_cost_1 (dep_t link
, dw_t dw
)
856 rtx insn
= DEP_PRO (link
);
857 rtx used
= DEP_CON (link
);
860 /* A USE insn should never require the value used to be computed.
861 This allows the computation of a function's result and parameter
862 values to overlap the return and call. We don't care about the
863 the dependence cost when only decreasing register pressure. */
864 if (recog_memoized (used
) < 0)
867 recog_memoized (insn
);
871 enum reg_note dep_type
= DEP_TYPE (link
);
873 cost
= insn_cost (insn
);
875 if (INSN_CODE (insn
) >= 0)
877 if (dep_type
== REG_DEP_ANTI
)
879 else if (dep_type
== REG_DEP_OUTPUT
)
881 cost
= (insn_default_latency (insn
)
882 - insn_default_latency (used
));
886 else if (bypass_p (insn
))
887 cost
= insn_latency (insn
, used
);
891 if (targetm
.sched
.adjust_cost_2
)
892 cost
= targetm
.sched
.adjust_cost_2 (used
, (int) dep_type
, insn
, cost
,
894 else if (targetm
.sched
.adjust_cost
!= NULL
)
896 /* This variable is used for backward compatibility with the
898 rtx dep_cost_rtx_link
= alloc_INSN_LIST (NULL_RTX
, NULL_RTX
);
900 /* Make it self-cycled, so that if some tries to walk over this
901 incomplete list he/she will be caught in an endless loop. */
902 XEXP (dep_cost_rtx_link
, 1) = dep_cost_rtx_link
;
904 /* Targets use only REG_NOTE_KIND of the link. */
905 PUT_REG_NOTE_KIND (dep_cost_rtx_link
, DEP_TYPE (link
));
907 cost
= targetm
.sched
.adjust_cost (used
, dep_cost_rtx_link
,
910 free_INSN_LIST_node (dep_cost_rtx_link
);
920 /* Compute cost of dependence LINK.
921 This is the number of cycles between instruction issue and
922 instruction results. */
924 dep_cost (dep_t link
)
926 return dep_cost_1 (link
, 0);
929 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
930 INSN_PRIORITY explicitly. */
932 increase_insn_priority (rtx insn
, int amount
)
936 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
937 if (INSN_PRIORITY_KNOWN (insn
))
938 INSN_PRIORITY (insn
) += amount
;
942 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
943 Use EXPR_PRIORITY instead. */
944 sel_add_to_insn_priority (insn
, amount
);
948 /* Return 'true' if DEP should be included in priority calculations. */
950 contributes_to_priority_p (dep_t dep
)
952 if (DEBUG_INSN_P (DEP_CON (dep
))
953 || DEBUG_INSN_P (DEP_PRO (dep
)))
956 /* Critical path is meaningful in block boundaries only. */
957 if (!current_sched_info
->contributes_to_priority (DEP_CON (dep
),
961 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
962 then speculative instructions will less likely be
963 scheduled. That is because the priority of
964 their producers will increase, and, thus, the
965 producers will more likely be scheduled, thus,
966 resolving the dependence. */
967 if (sched_deps_info
->generate_spec_deps
968 && !(spec_info
->flags
& COUNT_SPEC_IN_CRITICAL_PATH
)
969 && (DEP_STATUS (dep
) & SPECULATIVE
))
975 /* Compute the number of nondebug forward deps of an insn. */
978 dep_list_size (rtx insn
)
980 sd_iterator_def sd_it
;
982 int dbgcount
= 0, nodbgcount
= 0;
984 if (!MAY_HAVE_DEBUG_INSNS
)
985 return sd_lists_size (insn
, SD_LIST_FORW
);
987 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
989 if (DEBUG_INSN_P (DEP_CON (dep
)))
991 else if (!DEBUG_INSN_P (DEP_PRO (dep
)))
995 gcc_assert (dbgcount
+ nodbgcount
== sd_lists_size (insn
, SD_LIST_FORW
));
1000 /* Compute the priority number for INSN. */
1004 if (! INSN_P (insn
))
1007 /* We should not be interested in priority of an already scheduled insn. */
1008 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
1010 if (!INSN_PRIORITY_KNOWN (insn
))
1012 int this_priority
= -1;
1014 if (dep_list_size (insn
) == 0)
1015 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1016 some forward deps but all of them are ignored by
1017 contributes_to_priority hook. At the moment we set priority of
1019 this_priority
= insn_cost (insn
);
1022 rtx prev_first
, twin
;
1025 /* For recovery check instructions we calculate priority slightly
1026 different than that of normal instructions. Instead of walking
1027 through INSN_FORW_DEPS (check) list, we walk through
1028 INSN_FORW_DEPS list of each instruction in the corresponding
1031 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1032 rec
= sel_sched_p () ? NULL
: RECOVERY_BLOCK (insn
);
1033 if (!rec
|| rec
== EXIT_BLOCK_PTR
)
1035 prev_first
= PREV_INSN (insn
);
1040 prev_first
= NEXT_INSN (BB_HEAD (rec
));
1041 twin
= PREV_INSN (BB_END (rec
));
1046 sd_iterator_def sd_it
;
1049 FOR_EACH_DEP (twin
, SD_LIST_FORW
, sd_it
, dep
)
1054 next
= DEP_CON (dep
);
1056 if (BLOCK_FOR_INSN (next
) != rec
)
1060 if (!contributes_to_priority_p (dep
))
1064 cost
= dep_cost (dep
);
1067 struct _dep _dep1
, *dep1
= &_dep1
;
1069 init_dep (dep1
, insn
, next
, REG_DEP_ANTI
);
1071 cost
= dep_cost (dep1
);
1074 next_priority
= cost
+ priority (next
);
1076 if (next_priority
> this_priority
)
1077 this_priority
= next_priority
;
1081 twin
= PREV_INSN (twin
);
1083 while (twin
!= prev_first
);
1086 if (this_priority
< 0)
1088 gcc_assert (this_priority
== -1);
1090 this_priority
= insn_cost (insn
);
1093 INSN_PRIORITY (insn
) = this_priority
;
1094 INSN_PRIORITY_STATUS (insn
) = 1;
1097 return INSN_PRIORITY (insn
);
1100 /* Macros and functions for keeping the priority queue sorted, and
1101 dealing with queuing and dequeuing of instructions. */
1103 #define SCHED_SORT(READY, N_READY) \
1104 do { if ((N_READY) == 2) \
1105 swap_sort (READY, N_READY); \
1106 else if ((N_READY) > 2) \
1107 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1110 /* Setup info about the current register pressure impact of scheduling
1111 INSN at the current scheduling point. */
1113 setup_insn_reg_pressure_info (rtx insn
)
1115 int i
, change
, before
, after
, hard_regno
;
1116 int excess_cost_change
;
1117 enum machine_mode mode
;
1119 struct reg_pressure_data
*pressure_info
;
1120 int *max_reg_pressure
;
1121 struct reg_use_data
*use
;
1122 static int death
[N_REG_CLASSES
];
1124 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1126 excess_cost_change
= 0;
1127 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1128 death
[ira_reg_class_cover
[i
]] = 0;
1129 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1130 if (dying_use_p (use
))
1132 cl
= sched_regno_cover_class
[use
->regno
];
1133 if (use
->regno
< FIRST_PSEUDO_REGISTER
)
1136 death
[cl
] += ira_reg_class_nregs
[cl
][PSEUDO_REGNO_MODE (use
->regno
)];
1138 pressure_info
= INSN_REG_PRESSURE (insn
);
1139 max_reg_pressure
= INSN_MAX_REG_PRESSURE (insn
);
1140 gcc_assert (pressure_info
!= NULL
&& max_reg_pressure
!= NULL
);
1141 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1143 cl
= ira_reg_class_cover
[i
];
1144 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1145 change
= (int) pressure_info
[i
].set_increase
- death
[cl
];
1146 before
= MAX (0, max_reg_pressure
[i
] - ira_available_class_regs
[cl
]);
1147 after
= MAX (0, max_reg_pressure
[i
] + change
1148 - ira_available_class_regs
[cl
]);
1149 hard_regno
= ira_class_hard_regs
[cl
][0];
1150 gcc_assert (hard_regno
>= 0);
1151 mode
= reg_raw_mode
[hard_regno
];
1152 excess_cost_change
+= ((after
- before
)
1153 * (ira_memory_move_cost
[mode
][cl
][0]
1154 + ira_memory_move_cost
[mode
][cl
][1]));
1156 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn
) = excess_cost_change
;
1159 /* Returns a positive value if x is preferred; returns a negative value if
1160 y is preferred. Should never return 0, since that will make the sort
1164 rank_for_schedule (const void *x
, const void *y
)
1166 rtx tmp
= *(const rtx
*) y
;
1167 rtx tmp2
= *(const rtx
*) x
;
1169 int tmp_class
, tmp2_class
;
1170 int val
, priority_val
, info_val
;
1172 if (MAY_HAVE_DEBUG_INSNS
)
1174 /* Schedule debug insns as early as possible. */
1175 if (DEBUG_INSN_P (tmp
) && !DEBUG_INSN_P (tmp2
))
1177 else if (DEBUG_INSN_P (tmp2
))
1181 /* The insn in a schedule group should be issued the first. */
1182 if (flag_sched_group_heuristic
&&
1183 SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
1184 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
1186 /* Make sure that priority of TMP and TMP2 are initialized. */
1187 gcc_assert (INSN_PRIORITY_KNOWN (tmp
) && INSN_PRIORITY_KNOWN (tmp2
));
1189 if (sched_pressure_p
)
1193 /* Prefer insn whose scheduling results in the smallest register
1195 if ((diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
1196 + (INSN_TICK (tmp
) > clock_var
1197 ? INSN_TICK (tmp
) - clock_var
: 0)
1198 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
)
1199 - (INSN_TICK (tmp2
) > clock_var
1200 ? INSN_TICK (tmp2
) - clock_var
: 0))) != 0)
1205 if (sched_pressure_p
1206 && (INSN_TICK (tmp2
) > clock_var
|| INSN_TICK (tmp
) > clock_var
))
1208 if (INSN_TICK (tmp
) <= clock_var
)
1210 else if (INSN_TICK (tmp2
) <= clock_var
)
1213 return INSN_TICK (tmp
) - INSN_TICK (tmp2
);
1215 /* Prefer insn with higher priority. */
1216 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
1218 if (flag_sched_critical_path_heuristic
&& priority_val
)
1219 return priority_val
;
1221 /* Prefer speculative insn with greater dependencies weakness. */
1222 if (flag_sched_spec_insn_heuristic
&& spec_info
)
1228 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
1230 dw1
= ds_weak (ds1
);
1234 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
1236 dw2
= ds_weak (ds2
);
1241 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
1245 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
1246 if(flag_sched_rank_heuristic
&& info_val
)
1249 if (flag_sched_last_insn_heuristic
)
1251 last
= last_scheduled_insn
;
1253 if (DEBUG_INSN_P (last
) && last
!= current_sched_info
->prev_head
)
1255 last
= PREV_INSN (last
);
1256 while (!NONDEBUG_INSN_P (last
)
1257 && last
!= current_sched_info
->prev_head
);
1260 /* Compare insns based on their relation to the last scheduled
1262 if (flag_sched_last_insn_heuristic
&& NONDEBUG_INSN_P (last
))
1267 /* Classify the instructions into three classes:
1268 1) Data dependent on last schedule insn.
1269 2) Anti/Output dependent on last scheduled insn.
1270 3) Independent of last scheduled insn, or has latency of one.
1271 Choose the insn from the highest numbered class if different. */
1272 dep1
= sd_find_dep_between (last
, tmp
, true);
1274 if (dep1
== NULL
|| dep_cost (dep1
) == 1)
1276 else if (/* Data dependence. */
1277 DEP_TYPE (dep1
) == REG_DEP_TRUE
)
1282 dep2
= sd_find_dep_between (last
, tmp2
, true);
1284 if (dep2
== NULL
|| dep_cost (dep2
) == 1)
1286 else if (/* Data dependence. */
1287 DEP_TYPE (dep2
) == REG_DEP_TRUE
)
1292 if ((val
= tmp2_class
- tmp_class
))
1296 /* Prefer the insn which has more later insns that depend on it.
1297 This gives the scheduler more freedom when scheduling later
1298 instructions at the expense of added register pressure. */
1300 val
= (dep_list_size (tmp2
) - dep_list_size (tmp
));
1302 if (flag_sched_dep_count_heuristic
&& val
!= 0)
1305 /* If insns are equally good, sort by INSN_LUID (original insn order),
1306 so that we make the sort stable. This minimizes instruction movement,
1307 thus minimizing sched's effect on debugging and cross-jumping. */
1308 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
1311 /* Resort the array A in which only element at index N may be out of order. */
1313 HAIFA_INLINE
static void
1314 swap_sort (rtx
*a
, int n
)
1316 rtx insn
= a
[n
- 1];
1319 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
1327 /* Add INSN to the insn queue so that it can be executed at least
1328 N_CYCLES after the currently executing insn. Preserve insns
1329 chain for debugging purposes. */
1331 HAIFA_INLINE
static void
1332 queue_insn (rtx insn
, int n_cycles
)
1334 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
1335 rtx link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
1337 gcc_assert (n_cycles
<= max_insn_queue_index
);
1338 gcc_assert (!DEBUG_INSN_P (insn
));
1340 insn_queue
[next_q
] = link
;
1343 if (sched_verbose
>= 2)
1345 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
1346 (*current_sched_info
->print_insn
) (insn
, 0));
1348 fprintf (sched_dump
, "queued for %d cycles.\n", n_cycles
);
1351 QUEUE_INDEX (insn
) = next_q
;
1354 /* Remove INSN from queue. */
1356 queue_remove (rtx insn
)
1358 gcc_assert (QUEUE_INDEX (insn
) >= 0);
1359 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
1361 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
1364 /* Return a pointer to the bottom of the ready list, i.e. the insn
1365 with the lowest priority. */
1368 ready_lastpos (struct ready_list
*ready
)
1370 gcc_assert (ready
->n_ready
>= 1);
1371 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
1374 /* Add an element INSN to the ready list so that it ends up with the
1375 lowest/highest priority depending on FIRST_P. */
1377 HAIFA_INLINE
static void
1378 ready_add (struct ready_list
*ready
, rtx insn
, bool first_p
)
1382 if (ready
->first
== ready
->n_ready
)
1384 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
1385 ready_lastpos (ready
),
1386 ready
->n_ready
* sizeof (rtx
));
1387 ready
->first
= ready
->veclen
- 1;
1389 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
1393 if (ready
->first
== ready
->veclen
- 1)
1396 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1397 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
1398 ready_lastpos (ready
),
1399 ready
->n_ready
* sizeof (rtx
));
1400 ready
->first
= ready
->veclen
- 2;
1402 ready
->vec
[++(ready
->first
)] = insn
;
1406 if (DEBUG_INSN_P (insn
))
1409 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
1410 QUEUE_INDEX (insn
) = QUEUE_READY
;
1413 /* Remove the element with the highest priority from the ready list and
1416 HAIFA_INLINE
static rtx
1417 ready_remove_first (struct ready_list
*ready
)
1421 gcc_assert (ready
->n_ready
);
1422 t
= ready
->vec
[ready
->first
--];
1424 if (DEBUG_INSN_P (t
))
1426 /* If the queue becomes empty, reset it. */
1427 if (ready
->n_ready
== 0)
1428 ready
->first
= ready
->veclen
- 1;
1430 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
1431 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1436 /* The following code implements multi-pass scheduling for the first
1437 cycle. In other words, we will try to choose ready insn which
1438 permits to start maximum number of insns on the same cycle. */
1440 /* Return a pointer to the element INDEX from the ready. INDEX for
1441 insn with the highest priority is 0, and the lowest priority has
1445 ready_element (struct ready_list
*ready
, int index
)
1447 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1449 return ready
->vec
[ready
->first
- index
];
1452 /* Remove the element INDEX from the ready list and return it. INDEX
1453 for insn with the highest priority is 0, and the lowest priority
1456 HAIFA_INLINE
static rtx
1457 ready_remove (struct ready_list
*ready
, int index
)
1463 return ready_remove_first (ready
);
1464 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1465 t
= ready
->vec
[ready
->first
- index
];
1467 if (DEBUG_INSN_P (t
))
1469 for (i
= index
; i
< ready
->n_ready
; i
++)
1470 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
1471 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1475 /* Remove INSN from the ready list. */
1477 ready_remove_insn (rtx insn
)
1481 for (i
= 0; i
< readyp
->n_ready
; i
++)
1482 if (ready_element (readyp
, i
) == insn
)
1484 ready_remove (readyp
, i
);
1490 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1494 ready_sort (struct ready_list
*ready
)
1497 rtx
*first
= ready_lastpos (ready
);
1499 if (sched_pressure_p
)
1501 for (i
= 0; i
< ready
->n_ready
; i
++)
1502 if (!DEBUG_INSN_P (first
[i
]))
1503 setup_insn_reg_pressure_info (first
[i
]);
1505 SCHED_SORT (first
, ready
->n_ready
);
1508 /* PREV is an insn that is ready to execute. Adjust its priority if that
1509 will help shorten or lengthen register lifetimes as appropriate. Also
1510 provide a hook for the target to tweak itself. */
1512 HAIFA_INLINE
static void
1513 adjust_priority (rtx prev
)
1515 /* ??? There used to be code here to try and estimate how an insn
1516 affected register lifetimes, but it did it by looking at REG_DEAD
1517 notes, which we removed in schedule_region. Nor did it try to
1518 take into account register pressure or anything useful like that.
1520 Revisit when we have a machine model to work with and not before. */
1522 if (targetm
.sched
.adjust_priority
)
1523 INSN_PRIORITY (prev
) =
1524 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
1527 /* Advance DFA state STATE on one cycle. */
1529 advance_state (state_t state
)
1531 if (targetm
.sched
.dfa_pre_advance_cycle
)
1532 targetm
.sched
.dfa_pre_advance_cycle ();
1534 if (targetm
.sched
.dfa_pre_cycle_insn
)
1535 state_transition (state
,
1536 targetm
.sched
.dfa_pre_cycle_insn ());
1538 state_transition (state
, NULL
);
1540 if (targetm
.sched
.dfa_post_cycle_insn
)
1541 state_transition (state
,
1542 targetm
.sched
.dfa_post_cycle_insn ());
1544 if (targetm
.sched
.dfa_post_advance_cycle
)
1545 targetm
.sched
.dfa_post_advance_cycle ();
1548 /* Advance time on one cycle. */
1549 HAIFA_INLINE
static void
1550 advance_one_cycle (void)
1552 advance_state (curr_state
);
1553 if (sched_verbose
>= 6)
1554 fprintf (sched_dump
, ";;\tAdvanced a state.\n");
1557 /* Clock at which the previous instruction was issued. */
1558 static int last_clock_var
;
1560 /* Update register pressure after scheduling INSN. */
1562 update_register_pressure (rtx insn
)
1564 struct reg_use_data
*use
;
1565 struct reg_set_data
*set
;
1567 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1569 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1570 if (dying_use_p (use
) && bitmap_bit_p (curr_reg_live
, use
->regno
))
1571 mark_regno_birth_or_death (use
->regno
, false);
1572 for (set
= INSN_REG_SET_LIST (insn
); set
!= NULL
; set
= set
->next_insn_set
)
1573 mark_regno_birth_or_death (set
->regno
, true);
1576 /* Set up or update (if UPDATE_P) max register pressure (see its
1577 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1578 after insn AFTER. */
1580 setup_insn_max_reg_pressure (rtx after
, bool update_p
)
1585 static int max_reg_pressure
[N_REG_CLASSES
];
1587 save_reg_pressure ();
1588 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1589 max_reg_pressure
[ira_reg_class_cover
[i
]]
1590 = curr_reg_pressure
[ira_reg_class_cover
[i
]];
1591 for (insn
= NEXT_INSN (after
);
1592 insn
!= NULL_RTX
&& ! BARRIER_P (insn
)
1593 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (after
);
1594 insn
= NEXT_INSN (insn
))
1595 if (NONDEBUG_INSN_P (insn
))
1598 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1600 p
= max_reg_pressure
[ira_reg_class_cover
[i
]];
1601 if (INSN_MAX_REG_PRESSURE (insn
)[i
] != p
)
1604 INSN_MAX_REG_PRESSURE (insn
)[i
]
1605 = max_reg_pressure
[ira_reg_class_cover
[i
]];
1608 if (update_p
&& eq_p
)
1610 update_register_pressure (insn
);
1611 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1612 if (max_reg_pressure
[ira_reg_class_cover
[i
]]
1613 < curr_reg_pressure
[ira_reg_class_cover
[i
]])
1614 max_reg_pressure
[ira_reg_class_cover
[i
]]
1615 = curr_reg_pressure
[ira_reg_class_cover
[i
]];
1617 restore_reg_pressure ();
1620 /* Update the current register pressure after scheduling INSN. Update
1621 also max register pressure for unscheduled insns of the current
1624 update_reg_and_insn_max_reg_pressure (rtx insn
)
1627 int before
[N_REG_CLASSES
];
1629 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1630 before
[i
] = curr_reg_pressure
[ira_reg_class_cover
[i
]];
1631 update_register_pressure (insn
);
1632 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1633 if (curr_reg_pressure
[ira_reg_class_cover
[i
]] != before
[i
])
1635 if (i
< ira_reg_class_cover_size
)
1636 setup_insn_max_reg_pressure (insn
, true);
1639 /* Set up register pressure at the beginning of basic block BB whose
1640 insns starting after insn AFTER. Set up also max register pressure
1641 for all insns of the basic block. */
1643 sched_setup_bb_reg_pressure_info (basic_block bb
, rtx after
)
1645 gcc_assert (sched_pressure_p
);
1646 initiate_bb_reg_pressure_info (bb
);
1647 setup_insn_max_reg_pressure (after
, false);
1650 /* INSN is the "currently executing insn". Launch each insn which was
1651 waiting on INSN. READY is the ready list which contains the insns
1652 that are ready to fire. CLOCK is the current cycle. The function
1653 returns necessary cycle advance after issuing the insn (it is not
1654 zero for insns in a schedule group). */
1657 schedule_insn (rtx insn
)
1659 sd_iterator_def sd_it
;
1664 if (sched_verbose
>= 1)
1666 struct reg_pressure_data
*pressure_info
;
1669 print_insn (buf
, insn
, 0);
1671 fprintf (sched_dump
, ";;\t%3i--> %-40s:", clock_var
, buf
);
1673 if (recog_memoized (insn
) < 0)
1674 fprintf (sched_dump
, "nothing");
1676 print_reservation (sched_dump
, insn
);
1677 pressure_info
= INSN_REG_PRESSURE (insn
);
1678 if (pressure_info
!= NULL
)
1680 fputc (':', sched_dump
);
1681 for (i
= 0; i
< ira_reg_class_cover_size
; i
++)
1682 fprintf (sched_dump
, "%s%+d(%d)",
1683 reg_class_names
[ira_reg_class_cover
[i
]],
1684 pressure_info
[i
].set_increase
, pressure_info
[i
].change
);
1686 fputc ('\n', sched_dump
);
1689 if (sched_pressure_p
&& !DEBUG_INSN_P (insn
))
1690 update_reg_and_insn_max_reg_pressure (insn
);
1692 /* Scheduling instruction should have all its dependencies resolved and
1693 should have been removed from the ready list. */
1694 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_BACK
));
1696 /* Reset debug insns invalidated by moving this insn. */
1697 if (MAY_HAVE_DEBUG_INSNS
&& !DEBUG_INSN_P (insn
))
1698 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
1699 sd_iterator_cond (&sd_it
, &dep
);)
1701 rtx dbg
= DEP_PRO (dep
);
1702 struct reg_use_data
*use
, *next
;
1704 gcc_assert (DEBUG_INSN_P (dbg
));
1706 if (sched_verbose
>= 6)
1707 fprintf (sched_dump
, ";;\t\tresetting: debug insn %d\n",
1710 /* ??? Rather than resetting the debug insn, we might be able
1711 to emit a debug temp before the just-scheduled insn, but
1712 this would involve checking that the expression at the
1713 point of the debug insn is equivalent to the expression
1714 before the just-scheduled insn. They might not be: the
1715 expression in the debug insn may depend on other insns not
1716 yet scheduled that set MEMs, REGs or even other debug
1717 insns. It's not clear that attempting to preserve debug
1718 information in these cases is worth the effort, given how
1719 uncommon these resets are and the likelihood that the debug
1720 temps introduced won't survive the schedule change. */
1721 INSN_VAR_LOCATION_LOC (dbg
) = gen_rtx_UNKNOWN_VAR_LOC ();
1722 df_insn_rescan (dbg
);
1724 /* Unknown location doesn't use any registers. */
1725 for (use
= INSN_REG_USE_LIST (dbg
); use
!= NULL
; use
= next
)
1727 struct reg_use_data
*prev
= use
;
1729 /* Remove use from the cyclic next_regno_use chain first. */
1730 while (prev
->next_regno_use
!= use
)
1731 prev
= prev
->next_regno_use
;
1732 prev
->next_regno_use
= use
->next_regno_use
;
1733 next
= use
->next_insn_use
;
1736 INSN_REG_USE_LIST (dbg
) = NULL
;
1738 /* We delete rather than resolve these deps, otherwise we
1739 crash in sched_free_deps(), because forward deps are
1740 expected to be released before backward deps. */
1741 sd_delete_dep (sd_it
);
1744 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
1745 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
1747 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
1748 if (INSN_TICK (insn
) > clock_var
)
1749 /* INSN has been prematurely moved from the queue to the ready list.
1750 This is possible only if following flag is set. */
1751 gcc_assert (flag_sched_stalled_insns
);
1753 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1754 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1755 INSN_TICK (insn
) = clock_var
;
1757 /* Update dependent instructions. */
1758 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
1759 sd_iterator_cond (&sd_it
, &dep
);)
1761 rtx next
= DEP_CON (dep
);
1763 /* Resolve the dependence between INSN and NEXT.
1764 sd_resolve_dep () moves current dep to another list thus
1765 advancing the iterator. */
1766 sd_resolve_dep (sd_it
);
1768 /* Don't bother trying to mark next as ready if insn is a debug
1769 insn. If insn is the last hard dependency, it will have
1770 already been discounted. */
1771 if (DEBUG_INSN_P (insn
) && !DEBUG_INSN_P (next
))
1774 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
1778 effective_cost
= try_ready (next
);
1780 if (effective_cost
>= 0
1781 && SCHED_GROUP_P (next
)
1782 && advance
< effective_cost
)
1783 advance
= effective_cost
;
1786 /* Check always has only one forward dependence (to the first insn in
1787 the recovery block), therefore, this will be executed only once. */
1789 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
1790 fix_recovery_deps (RECOVERY_BLOCK (insn
));
1794 /* This is the place where scheduler doesn't *basically* need backward and
1795 forward dependencies for INSN anymore. Nevertheless they are used in
1796 heuristics in rank_for_schedule (), early_queue_to_ready () and in
1797 some targets (e.g. rs6000). Thus the earliest place where we *can*
1798 remove dependencies is after targetm.sched.finish () call in
1799 schedule_block (). But, on the other side, the safest place to remove
1800 dependencies is when we are finishing scheduling entire region. As we
1801 don't generate [many] dependencies during scheduling itself, we won't
1802 need memory until beginning of next region.
1803 Bottom line: Dependencies are removed for all insns in the end of
1804 scheduling the region. */
1806 /* Annotate the instruction with issue information -- TImode
1807 indicates that the instruction is expected not to be able
1808 to issue on the same cycle as the previous insn. A machine
1809 may use this information to decide how the instruction should
1812 && GET_CODE (PATTERN (insn
)) != USE
1813 && GET_CODE (PATTERN (insn
)) != CLOBBER
1814 && !DEBUG_INSN_P (insn
))
1816 if (reload_completed
)
1817 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
1818 last_clock_var
= clock_var
;
1824 /* Functions for handling of notes. */
1826 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
1828 concat_note_lists (rtx from_end
, rtx
*to_endp
)
1832 /* It's easy when have nothing to concat. */
1833 if (from_end
== NULL
)
1836 /* It's also easy when destination is empty. */
1837 if (*to_endp
== NULL
)
1839 *to_endp
= from_end
;
1843 from_start
= from_end
;
1844 while (PREV_INSN (from_start
) != NULL
)
1845 from_start
= PREV_INSN (from_start
);
1847 PREV_INSN (from_start
) = *to_endp
;
1848 NEXT_INSN (*to_endp
) = from_start
;
1849 *to_endp
= from_end
;
1852 /* Delete notes between HEAD and TAIL and put them in the chain
1853 of notes ended by NOTE_LIST. */
1855 remove_notes (rtx head
, rtx tail
)
1857 rtx next_tail
, insn
, next
;
1860 if (head
== tail
&& !INSN_P (head
))
1863 next_tail
= NEXT_INSN (tail
);
1864 for (insn
= head
; insn
!= next_tail
; insn
= next
)
1866 next
= NEXT_INSN (insn
);
1870 switch (NOTE_KIND (insn
))
1872 case NOTE_INSN_BASIC_BLOCK
:
1875 case NOTE_INSN_EPILOGUE_BEG
:
1879 add_reg_note (next
, REG_SAVE_NOTE
,
1880 GEN_INT (NOTE_INSN_EPILOGUE_BEG
));
1888 /* Add the note to list that ends at NOTE_LIST. */
1889 PREV_INSN (insn
) = note_list
;
1890 NEXT_INSN (insn
) = NULL_RTX
;
1892 NEXT_INSN (note_list
) = insn
;
1897 gcc_assert ((sel_sched_p () || insn
!= tail
) && insn
!= head
);
1902 /* Return the head and tail pointers of ebb starting at BEG and ending
1905 get_ebb_head_tail (basic_block beg
, basic_block end
, rtx
*headp
, rtx
*tailp
)
1907 rtx beg_head
= BB_HEAD (beg
);
1908 rtx beg_tail
= BB_END (beg
);
1909 rtx end_head
= BB_HEAD (end
);
1910 rtx end_tail
= BB_END (end
);
1912 /* Don't include any notes or labels at the beginning of the BEG
1913 basic block, or notes at the end of the END basic blocks. */
1915 if (LABEL_P (beg_head
))
1916 beg_head
= NEXT_INSN (beg_head
);
1918 while (beg_head
!= beg_tail
)
1919 if (NOTE_P (beg_head
) || BOUNDARY_DEBUG_INSN_P (beg_head
))
1920 beg_head
= NEXT_INSN (beg_head
);
1927 end_head
= beg_head
;
1928 else if (LABEL_P (end_head
))
1929 end_head
= NEXT_INSN (end_head
);
1931 while (end_head
!= end_tail
)
1932 if (NOTE_P (end_tail
) || BOUNDARY_DEBUG_INSN_P (end_tail
))
1933 end_tail
= PREV_INSN (end_tail
);
1940 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1943 no_real_insns_p (const_rtx head
, const_rtx tail
)
1945 while (head
!= NEXT_INSN (tail
))
1947 if (!NOTE_P (head
) && !LABEL_P (head
)
1948 && !BOUNDARY_DEBUG_INSN_P (head
))
1950 head
= NEXT_INSN (head
);
1955 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
1956 previously found among the insns. Insert them just before HEAD. */
1958 restore_other_notes (rtx head
, basic_block head_bb
)
1962 rtx note_head
= note_list
;
1965 head_bb
= BLOCK_FOR_INSN (head
);
1967 head
= NEXT_INSN (bb_note (head_bb
));
1969 while (PREV_INSN (note_head
))
1971 set_block_for_insn (note_head
, head_bb
);
1972 note_head
= PREV_INSN (note_head
);
1974 /* In the above cycle we've missed this note. */
1975 set_block_for_insn (note_head
, head_bb
);
1977 PREV_INSN (note_head
) = PREV_INSN (head
);
1978 NEXT_INSN (PREV_INSN (head
)) = note_head
;
1979 PREV_INSN (head
) = note_list
;
1980 NEXT_INSN (note_list
) = head
;
1982 if (BLOCK_FOR_INSN (head
) != head_bb
)
1983 BB_END (head_bb
) = note_list
;
1991 /* Move insns that became ready to fire from queue to ready list. */
1994 queue_to_ready (struct ready_list
*ready
)
2000 q_ptr
= NEXT_Q (q_ptr
);
2002 if (dbg_cnt (sched_insn
) == false)
2003 /* If debug counter is activated do not requeue insn next after
2004 last_scheduled_insn. */
2005 skip_insn
= next_nonnote_nondebug_insn (last_scheduled_insn
);
2007 skip_insn
= NULL_RTX
;
2009 /* Add all pending insns that can be scheduled without stalls to the
2011 for (link
= insn_queue
[q_ptr
]; link
; link
= XEXP (link
, 1))
2013 insn
= XEXP (link
, 0);
2016 if (sched_verbose
>= 2)
2017 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
2018 (*current_sched_info
->print_insn
) (insn
, 0));
2020 /* If the ready list is full, delay the insn for 1 cycle.
2021 See the comment in schedule_block for the rationale. */
2022 if (!reload_completed
2023 && ready
->n_ready
- ready
->n_debug
> MAX_SCHED_READY_INSNS
2024 && !SCHED_GROUP_P (insn
)
2025 && insn
!= skip_insn
)
2027 if (sched_verbose
>= 2)
2028 fprintf (sched_dump
, "requeued because ready full\n");
2029 queue_insn (insn
, 1);
2033 ready_add (ready
, insn
, false);
2034 if (sched_verbose
>= 2)
2035 fprintf (sched_dump
, "moving to ready without stalls\n");
2038 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
2040 /* If there are no ready insns, stall until one is ready and add all
2041 of the pending insns at that point to the ready list. */
2042 if (ready
->n_ready
== 0)
2046 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
2048 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
2050 for (; link
; link
= XEXP (link
, 1))
2052 insn
= XEXP (link
, 0);
2055 if (sched_verbose
>= 2)
2056 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
2057 (*current_sched_info
->print_insn
) (insn
, 0));
2059 ready_add (ready
, insn
, false);
2060 if (sched_verbose
>= 2)
2061 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
2063 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
2065 advance_one_cycle ();
2070 advance_one_cycle ();
2073 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
2074 clock_var
+= stalls
;
2078 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2079 prematurely move INSN from the queue to the ready list. Currently,
2080 if a target defines the hook 'is_costly_dependence', this function
2081 uses the hook to check whether there exist any dependences which are
2082 considered costly by the target, between INSN and other insns that
2083 have already been scheduled. Dependences are checked up to Y cycles
2084 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2085 controlling this value.
2086 (Other considerations could be taken into account instead (or in
2087 addition) depending on user flags and target hooks. */
2090 ok_for_early_queue_removal (rtx insn
)
2093 rtx prev_insn
= last_scheduled_insn
;
2095 if (targetm
.sched
.is_costly_dependence
)
2097 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
2099 for ( ; prev_insn
; prev_insn
= PREV_INSN (prev_insn
))
2103 if (prev_insn
== current_sched_info
->prev_head
)
2109 if (!NOTE_P (prev_insn
))
2113 dep
= sd_find_dep_between (prev_insn
, insn
, true);
2117 cost
= dep_cost (dep
);
2119 if (targetm
.sched
.is_costly_dependence (dep
, cost
,
2120 flag_sched_stalled_insns_dep
- n_cycles
))
2125 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
2131 prev_insn
= PREV_INSN (prev_insn
);
2139 /* Remove insns from the queue, before they become "ready" with respect
2140 to FU latency considerations. */
2143 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
2151 state_t temp_state
= alloca (dfa_state_size
);
2153 int insns_removed
= 0;
2156 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2159 X == 0: There is no limit on how many queued insns can be removed
2160 prematurely. (flag_sched_stalled_insns = -1).
2162 X >= 1: Only X queued insns can be removed prematurely in each
2163 invocation. (flag_sched_stalled_insns = X).
2165 Otherwise: Early queue removal is disabled.
2166 (flag_sched_stalled_insns = 0)
2169 if (! flag_sched_stalled_insns
)
2172 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
2174 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
2176 if (sched_verbose
> 6)
2177 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
2182 next_link
= XEXP (link
, 1);
2183 insn
= XEXP (link
, 0);
2184 if (insn
&& sched_verbose
> 6)
2185 print_rtl_single (sched_dump
, insn
);
2187 memcpy (temp_state
, state
, dfa_state_size
);
2188 if (recog_memoized (insn
) < 0)
2189 /* non-negative to indicate that it's not ready
2190 to avoid infinite Q->R->Q->R... */
2193 cost
= state_transition (temp_state
, insn
);
2195 if (sched_verbose
>= 6)
2196 fprintf (sched_dump
, "transition cost = %d\n", cost
);
2198 move_to_ready
= false;
2201 move_to_ready
= ok_for_early_queue_removal (insn
);
2202 if (move_to_ready
== true)
2204 /* move from Q to R */
2206 ready_add (ready
, insn
, false);
2209 XEXP (prev_link
, 1) = next_link
;
2211 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
2213 free_INSN_LIST_node (link
);
2215 if (sched_verbose
>= 2)
2216 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
2217 (*current_sched_info
->print_insn
) (insn
, 0));
2220 if (insns_removed
== flag_sched_stalled_insns
)
2221 /* Remove no more than flag_sched_stalled_insns insns
2222 from Q at a time. */
2223 return insns_removed
;
2227 if (move_to_ready
== false)
2234 } /* for stalls.. */
2236 return insns_removed
;
2240 /* Print the ready list for debugging purposes. Callable from debugger. */
2243 debug_ready_list (struct ready_list
*ready
)
2248 if (ready
->n_ready
== 0)
2250 fprintf (sched_dump
, "\n");
2254 p
= ready_lastpos (ready
);
2255 for (i
= 0; i
< ready
->n_ready
; i
++)
2257 fprintf (sched_dump
, " %s:%d",
2258 (*current_sched_info
->print_insn
) (p
[i
], 0),
2260 if (sched_pressure_p
)
2261 fprintf (sched_dump
, "(cost=%d",
2262 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p
[i
]));
2263 if (INSN_TICK (p
[i
]) > clock_var
)
2264 fprintf (sched_dump
, ":delay=%d", INSN_TICK (p
[i
]) - clock_var
);
2265 if (sched_pressure_p
)
2266 fprintf (sched_dump
, ")");
2268 fprintf (sched_dump
, "\n");
2271 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
2272 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
2273 replaces the epilogue note in the correct basic block. */
2275 reemit_notes (rtx insn
)
2277 rtx note
, last
= insn
;
2279 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2281 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
2283 enum insn_note note_type
= (enum insn_note
) INTVAL (XEXP (note
, 0));
2285 last
= emit_note_before (note_type
, last
);
2286 remove_note (insn
, note
);
2291 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
2293 move_insn (rtx insn
, rtx last
, rtx nt
)
2295 if (PREV_INSN (insn
) != last
)
2301 bb
= BLOCK_FOR_INSN (insn
);
2303 /* BB_HEAD is either LABEL or NOTE. */
2304 gcc_assert (BB_HEAD (bb
) != insn
);
2306 if (BB_END (bb
) == insn
)
2307 /* If this is last instruction in BB, move end marker one
2310 /* Jumps are always placed at the end of basic block. */
2311 jump_p
= control_flow_insn_p (insn
);
2314 || ((common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
2315 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
2316 || (common_sched_info
->sched_pass_id
2317 == SCHED_EBB_PASS
));
2319 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
2321 BB_END (bb
) = PREV_INSN (insn
);
2324 gcc_assert (BB_END (bb
) != last
);
2327 /* We move the block note along with jump. */
2331 note
= NEXT_INSN (insn
);
2332 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
2333 note
= NEXT_INSN (note
);
2337 || BARRIER_P (note
)))
2338 note
= NEXT_INSN (note
);
2340 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
2345 NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
2346 PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
2348 NEXT_INSN (note
) = NEXT_INSN (last
);
2349 PREV_INSN (NEXT_INSN (last
)) = note
;
2351 NEXT_INSN (last
) = insn
;
2352 PREV_INSN (insn
) = last
;
2354 bb
= BLOCK_FOR_INSN (last
);
2358 fix_jump_move (insn
);
2360 if (BLOCK_FOR_INSN (insn
) != bb
)
2361 move_block_after_check (insn
);
2363 gcc_assert (BB_END (bb
) == last
);
2366 df_insn_change_bb (insn
, bb
);
2368 /* Update BB_END, if needed. */
2369 if (BB_END (bb
) == last
)
2373 SCHED_GROUP_P (insn
) = 0;
2376 /* Return true if scheduling INSN will finish current clock cycle. */
2378 insn_finishes_cycle_p (rtx insn
)
2380 if (SCHED_GROUP_P (insn
))
2381 /* After issuing INSN, rest of the sched_group will be forced to issue
2382 in order. Don't make any plans for the rest of cycle. */
2385 /* Finishing the block will, apparently, finish the cycle. */
2386 if (current_sched_info
->insn_finishes_block_p
2387 && current_sched_info
->insn_finishes_block_p (insn
))
2393 /* The following structure describe an entry of the stack of choices. */
2396 /* Ordinal number of the issued insn in the ready queue. */
2398 /* The number of the rest insns whose issues we should try. */
2400 /* The number of issued essential insns. */
2402 /* State after issuing the insn. */
2406 /* The following array is used to implement a stack of choices used in
2407 function max_issue. */
2408 static struct choice_entry
*choice_stack
;
2410 /* The following variable value is number of essential insns issued on
2411 the current cycle. An insn is essential one if it changes the
2412 processors state. */
2413 int cycle_issued_insns
;
2415 /* This holds the value of the target dfa_lookahead hook. */
2418 /* The following variable value is maximal number of tries of issuing
2419 insns for the first cycle multipass insn scheduling. We define
2420 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2421 need this constraint if all real insns (with non-negative codes)
2422 had reservations because in this case the algorithm complexity is
2423 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2424 might be incomplete and such insn might occur. For such
2425 descriptions, the complexity of algorithm (without the constraint)
2426 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2427 static int max_lookahead_tries
;
2429 /* The following value is value of hook
2430 `first_cycle_multipass_dfa_lookahead' at the last call of
2432 static int cached_first_cycle_multipass_dfa_lookahead
= 0;
2434 /* The following value is value of `issue_rate' at the last call of
2436 static int cached_issue_rate
= 0;
2438 /* The following function returns maximal (or close to maximal) number
2439 of insns which can be issued on the same cycle and one of which
2440 insns is insns with the best rank (the first insn in READY). To
2441 make this function tries different samples of ready insns. READY
2442 is current queue `ready'. Global array READY_TRY reflects what
2443 insns are already issued in this try. The function stops immediately,
2444 if it reached the such a solution, that all instruction can be issued.
2445 INDEX will contain index of the best insn in READY. The following
2446 function is used only for first cycle multipass scheduling.
2450 This function expects recognized insns only. All USEs,
2451 CLOBBERs, etc must be filtered elsewhere. */
2453 max_issue (struct ready_list
*ready
, int privileged_n
, state_t state
,
2456 int n
, i
, all
, n_ready
, best
, delay
, tries_num
;
2458 struct choice_entry
*top
;
2461 n_ready
= ready
->n_ready
;
2462 gcc_assert (dfa_lookahead
>= 1 && privileged_n
>= 0
2463 && privileged_n
<= n_ready
);
2465 /* Init MAX_LOOKAHEAD_TRIES. */
2466 if (cached_first_cycle_multipass_dfa_lookahead
!= dfa_lookahead
)
2468 cached_first_cycle_multipass_dfa_lookahead
= dfa_lookahead
;
2469 max_lookahead_tries
= 100;
2470 for (i
= 0; i
< issue_rate
; i
++)
2471 max_lookahead_tries
*= dfa_lookahead
;
2474 /* Init max_points. */
2475 more_issue
= issue_rate
- cycle_issued_insns
;
2476 gcc_assert (more_issue
>= 0);
2478 /* The number of the issued insns in the best solution. */
2483 /* Set initial state of the search. */
2484 memcpy (top
->state
, state
, dfa_state_size
);
2485 top
->rest
= dfa_lookahead
;
2488 /* Count the number of the insns to search among. */
2489 for (all
= i
= 0; i
< n_ready
; i
++)
2493 /* I is the index of the insn to try next. */
2498 if (/* If we've reached a dead end or searched enough of what we have
2501 /* or have nothing else to try... */
2503 /* or should not issue more. */
2504 || top
->n
>= more_issue
)
2506 /* ??? (... || i == n_ready). */
2507 gcc_assert (i
<= n_ready
);
2509 /* We should not issue more than issue_rate instructions. */
2510 gcc_assert (top
->n
<= more_issue
);
2512 if (top
== choice_stack
)
2515 if (best
< top
- choice_stack
)
2520 /* Try to find issued privileged insn. */
2521 while (n
&& !ready_try
[--n
]);
2524 if (/* If all insns are equally good... */
2526 /* Or a privileged insn will be issued. */
2528 /* Then we have a solution. */
2530 best
= top
- choice_stack
;
2531 /* This is the index of the insn issued first in this
2533 *index
= choice_stack
[1].index
;
2534 if (top
->n
== more_issue
|| best
== all
)
2539 /* Set ready-list index to point to the last insn
2540 ('i++' below will advance it to the next insn). */
2546 memcpy (state
, top
->state
, dfa_state_size
);
2548 else if (!ready_try
[i
])
2551 if (tries_num
> max_lookahead_tries
)
2553 insn
= ready_element (ready
, i
);
2554 delay
= state_transition (state
, insn
);
2557 if (state_dead_lock_p (state
)
2558 || insn_finishes_cycle_p (insn
))
2559 /* We won't issue any more instructions in the next
2566 if (memcmp (top
->state
, state
, dfa_state_size
) != 0)
2569 /* Advance to the next choice_entry. */
2571 /* Initialize it. */
2572 top
->rest
= dfa_lookahead
;
2575 memcpy (top
->state
, state
, dfa_state_size
);
2582 /* Increase ready-list index. */
2586 /* Restore the original state of the DFA. */
2587 memcpy (state
, choice_stack
->state
, dfa_state_size
);
2592 /* The following function chooses insn from READY and modifies
2593 READY. The following function is used only for first
2594 cycle multipass scheduling.
2596 -1 if cycle should be advanced,
2597 0 if INSN_PTR is set to point to the desirable insn,
2598 1 if choose_ready () should be restarted without advancing the cycle. */
2600 choose_ready (struct ready_list
*ready
, rtx
*insn_ptr
)
2604 if (dbg_cnt (sched_insn
) == false)
2608 insn
= next_nonnote_insn (last_scheduled_insn
);
2610 if (QUEUE_INDEX (insn
) == QUEUE_READY
)
2611 /* INSN is in the ready_list. */
2613 ready_remove_insn (insn
);
2618 /* INSN is in the queue. Advance cycle to move it to the ready list. */
2624 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
2625 lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
2626 if (lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0))
2627 || DEBUG_INSN_P (ready_element (ready
, 0)))
2629 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
2630 *insn_ptr
= ready_remove_first_dispatch (ready
);
2632 *insn_ptr
= ready_remove_first (ready
);
2638 /* Try to choose the better insn. */
2639 int index
= 0, i
, n
;
2641 int try_data
= 1, try_control
= 1;
2644 insn
= ready_element (ready
, 0);
2645 if (INSN_CODE (insn
) < 0)
2647 *insn_ptr
= ready_remove_first (ready
);
2652 && spec_info
->flags
& (PREFER_NON_DATA_SPEC
2653 | PREFER_NON_CONTROL_SPEC
))
2655 for (i
= 0, n
= ready
->n_ready
; i
< n
; i
++)
2660 x
= ready_element (ready
, i
);
2663 if (spec_info
->flags
& PREFER_NON_DATA_SPEC
2664 && !(s
& DATA_SPEC
))
2667 if (!(spec_info
->flags
& PREFER_NON_CONTROL_SPEC
)
2672 if (spec_info
->flags
& PREFER_NON_CONTROL_SPEC
2673 && !(s
& CONTROL_SPEC
))
2676 if (!(spec_info
->flags
& PREFER_NON_DATA_SPEC
) || !try_data
)
2682 ts
= TODO_SPEC (insn
);
2683 if ((ts
& SPECULATIVE
)
2684 && (((!try_data
&& (ts
& DATA_SPEC
))
2685 || (!try_control
&& (ts
& CONTROL_SPEC
)))
2686 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
2688 .first_cycle_multipass_dfa_lookahead_guard_spec (insn
))))
2689 /* Discard speculative instruction that stands first in the ready
2692 change_queue_index (insn
, 1);
2698 for (i
= 1; i
< ready
->n_ready
; i
++)
2700 insn
= ready_element (ready
, i
);
2703 = ((!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
2704 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
)));
2707 /* Let the target filter the search space. */
2708 for (i
= 1; i
< ready
->n_ready
; i
++)
2711 insn
= ready_element (ready
, i
);
2713 /* If this insn is recognizable we should have already
2714 recognized it earlier.
2715 ??? Not very clear where this is supposed to be done.
2717 gcc_checking_assert (INSN_CODE (insn
) >= 0
2718 || recog_memoized (insn
) < 0);
2721 = (/* INSN_CODE check can be omitted here as it is also done later
2723 INSN_CODE (insn
) < 0
2724 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2725 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2729 if (max_issue (ready
, 1, curr_state
, &index
) == 0)
2731 *insn_ptr
= ready_remove_first (ready
);
2732 if (sched_verbose
>= 4)
2733 fprintf (sched_dump
, ";;\t\tChosen insn (but can't issue) : %s \n",
2734 (*current_sched_info
->print_insn
) (*insn_ptr
, 0));
2739 if (sched_verbose
>= 4)
2740 fprintf (sched_dump
, ";;\t\tChosen insn : %s\n",
2741 (*current_sched_info
->print_insn
)
2742 (ready_element (ready
, index
), 0));
2744 *insn_ptr
= ready_remove (ready
, index
);
2750 /* Use forward list scheduling to rearrange insns of block pointed to by
2751 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2755 schedule_block (basic_block
*target_bb
)
2757 int i
, first_cycle_insn_p
;
2759 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
2760 int sort_p
, advance
, start_clock_var
;
2762 /* Head/tail info for this block. */
2763 rtx prev_head
= current_sched_info
->prev_head
;
2764 rtx next_tail
= current_sched_info
->next_tail
;
2765 rtx head
= NEXT_INSN (prev_head
);
2766 rtx tail
= PREV_INSN (next_tail
);
2768 /* We used to have code to avoid getting parameters moved from hard
2769 argument registers into pseudos.
2771 However, it was removed when it proved to be of marginal benefit
2772 and caused problems because schedule_block and compute_forward_dependences
2773 had different notions of what the "head" insn was. */
2775 gcc_assert (head
!= tail
|| INSN_P (head
));
2777 haifa_recovery_bb_recently_added_p
= false;
2781 dump_new_block_header (0, *target_bb
, head
, tail
);
2783 state_reset (curr_state
);
2785 /* Clear the ready list. */
2786 ready
.first
= ready
.veclen
- 1;
2790 /* It is used for first cycle multipass scheduling. */
2791 temp_state
= alloca (dfa_state_size
);
2793 if (targetm
.sched
.init
)
2794 targetm
.sched
.init (sched_dump
, sched_verbose
, ready
.veclen
);
2796 /* We start inserting insns after PREV_HEAD. */
2797 last_scheduled_insn
= prev_head
;
2799 gcc_assert ((NOTE_P (last_scheduled_insn
)
2800 || BOUNDARY_DEBUG_INSN_P (last_scheduled_insn
))
2801 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
2803 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2808 insn_queue
= XALLOCAVEC (rtx
, max_insn_queue_index
+ 1);
2809 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
2811 /* Start just before the beginning of time. */
2814 /* We need queue and ready lists and clock_var be initialized
2815 in try_ready () (which is called through init_ready_list ()). */
2816 (*current_sched_info
->init_ready_list
) ();
2818 /* The algorithm is O(n^2) in the number of ready insns at any given
2819 time in the worst case. Before reload we are more likely to have
2820 big lists so truncate them to a reasonable size. */
2821 if (!reload_completed
2822 && ready
.n_ready
- ready
.n_debug
> MAX_SCHED_READY_INSNS
)
2824 ready_sort (&ready
);
2826 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
2827 If there are debug insns, we know they're first. */
2828 for (i
= MAX_SCHED_READY_INSNS
+ ready
.n_debug
; i
< ready
.n_ready
; i
++)
2829 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
2832 if (sched_verbose
>= 2)
2834 fprintf (sched_dump
,
2835 ";;\t\tReady list on entry: %d insns\n", ready
.n_ready
);
2836 fprintf (sched_dump
,
2837 ";;\t\t before reload => truncated to %d insns\n", i
);
2840 /* Delay all insns past it for 1 cycle. If debug counter is
2841 activated make an exception for the insn right after
2842 last_scheduled_insn. */
2846 if (dbg_cnt (sched_insn
) == false)
2847 skip_insn
= next_nonnote_insn (last_scheduled_insn
);
2849 skip_insn
= NULL_RTX
;
2851 while (i
< ready
.n_ready
)
2855 insn
= ready_remove (&ready
, i
);
2857 if (insn
!= skip_insn
)
2858 queue_insn (insn
, 1);
2863 /* Now we can restore basic block notes and maintain precise cfg. */
2864 restore_bb_notes (*target_bb
);
2866 last_clock_var
= -1;
2871 /* Loop until all the insns in BB are scheduled. */
2872 while ((*current_sched_info
->schedule_more_p
) ())
2876 start_clock_var
= clock_var
;
2880 advance_one_cycle ();
2882 /* Add to the ready list all pending insns that can be issued now.
2883 If there are no ready insns, increment clock until one
2884 is ready and add all pending insns at that point to the ready
2886 queue_to_ready (&ready
);
2888 gcc_assert (ready
.n_ready
);
2890 if (sched_verbose
>= 2)
2892 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready: ");
2893 debug_ready_list (&ready
);
2895 advance
-= clock_var
- start_clock_var
;
2897 while (advance
> 0);
2901 /* Sort the ready list based on priority. */
2902 ready_sort (&ready
);
2904 if (sched_verbose
>= 2)
2906 fprintf (sched_dump
, ";;\t\tReady list after ready_sort: ");
2907 debug_ready_list (&ready
);
2911 /* We don't want md sched reorder to even see debug isns, so put
2912 them out right away. */
2913 if (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
2915 if (control_flow_insn_p (last_scheduled_insn
))
2917 *target_bb
= current_sched_info
->advance_target_bb
2924 x
= next_real_insn (last_scheduled_insn
);
2926 dump_new_block_header (1, *target_bb
, x
, tail
);
2929 last_scheduled_insn
= bb_note (*target_bb
);
2932 while (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
2934 rtx insn
= ready_remove_first (&ready
);
2935 gcc_assert (DEBUG_INSN_P (insn
));
2936 (*current_sched_info
->begin_schedule_ready
) (insn
,
2937 last_scheduled_insn
);
2938 move_insn (insn
, last_scheduled_insn
,
2939 current_sched_info
->next_tail
);
2940 last_scheduled_insn
= insn
;
2941 advance
= schedule_insn (insn
);
2942 gcc_assert (advance
== 0);
2943 if (ready
.n_ready
> 0)
2944 ready_sort (&ready
);
2951 /* Allow the target to reorder the list, typically for
2952 better instruction bundling. */
2953 if (sort_p
&& targetm
.sched
.reorder
2954 && (ready
.n_ready
== 0
2955 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
2957 targetm
.sched
.reorder (sched_dump
, sched_verbose
,
2958 ready_lastpos (&ready
),
2959 &ready
.n_ready
, clock_var
);
2961 can_issue_more
= issue_rate
;
2963 first_cycle_insn_p
= 1;
2964 cycle_issued_insns
= 0;
2971 if (sched_verbose
>= 2)
2973 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
2975 debug_ready_list (&ready
);
2976 if (sched_pressure_p
)
2977 print_curr_reg_pressure ();
2980 if (ready
.n_ready
== 0
2982 && reload_completed
)
2984 /* Allow scheduling insns directly from the queue in case
2985 there's nothing better to do (ready list is empty) but
2986 there are still vacant dispatch slots in the current cycle. */
2987 if (sched_verbose
>= 6)
2988 fprintf (sched_dump
,";;\t\tSecond chance\n");
2989 memcpy (temp_state
, curr_state
, dfa_state_size
);
2990 if (early_queue_to_ready (temp_state
, &ready
))
2991 ready_sort (&ready
);
2994 if (ready
.n_ready
== 0
2996 || state_dead_lock_p (curr_state
)
2997 || !(*current_sched_info
->schedule_more_p
) ())
3000 /* Select and remove the insn from the ready list. */
3006 res
= choose_ready (&ready
, &insn
);
3012 /* Restart choose_ready (). */
3015 gcc_assert (insn
!= NULL_RTX
);
3018 insn
= ready_remove_first (&ready
);
3020 if (sched_pressure_p
&& INSN_TICK (insn
) > clock_var
)
3022 ready_add (&ready
, insn
, true);
3027 if (targetm
.sched
.dfa_new_cycle
3028 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
3029 insn
, last_clock_var
,
3030 clock_var
, &sort_p
))
3031 /* SORT_P is used by the target to override sorting
3032 of the ready list. This is needed when the target
3033 has modified its internal structures expecting that
3034 the insn will be issued next. As we need the insn
3035 to have the highest priority (so it will be returned by
3036 the ready_remove_first call above), we invoke
3037 ready_add (&ready, insn, true).
3038 But, still, there is one issue: INSN can be later
3039 discarded by scheduler's front end through
3040 current_sched_info->can_schedule_ready_p, hence, won't
3043 ready_add (&ready
, insn
, true);
3048 memcpy (temp_state
, curr_state
, dfa_state_size
);
3049 if (recog_memoized (insn
) < 0)
3051 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
3052 || asm_noperands (PATTERN (insn
)) >= 0);
3053 if (!first_cycle_insn_p
&& asm_p
)
3054 /* This is asm insn which is tried to be issued on the
3055 cycle not first. Issue it on the next cycle. */
3058 /* A USE insn, or something else we don't need to
3059 understand. We can't pass these directly to
3060 state_transition because it will trigger a
3061 fatal error for unrecognizable insns. */
3064 else if (sched_pressure_p
)
3068 cost
= state_transition (temp_state
, insn
);
3077 queue_insn (insn
, cost
);
3078 if (SCHED_GROUP_P (insn
))
3087 if (current_sched_info
->can_schedule_ready_p
3088 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
3089 /* We normally get here only if we don't want to move
3090 insn from the split block. */
3092 TODO_SPEC (insn
) = (TODO_SPEC (insn
) & ~SPECULATIVE
) | HARD_DEP
;
3096 /* DECISION is made. */
3098 if (TODO_SPEC (insn
) & SPECULATIVE
)
3099 generate_recovery_code (insn
);
3101 if (control_flow_insn_p (last_scheduled_insn
)
3102 /* This is used to switch basic blocks by request
3103 from scheduler front-end (actually, sched-ebb.c only).
3104 This is used to process blocks with single fallthru
3105 edge. If succeeding block has jump, it [jump] will try
3106 move at the end of current bb, thus corrupting CFG. */
3107 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
3109 *target_bb
= current_sched_info
->advance_target_bb
3116 x
= next_real_insn (last_scheduled_insn
);
3118 dump_new_block_header (1, *target_bb
, x
, tail
);
3121 last_scheduled_insn
= bb_note (*target_bb
);
3124 /* Update counters, etc in the scheduler's front end. */
3125 (*current_sched_info
->begin_schedule_ready
) (insn
,
3126 last_scheduled_insn
);
3128 move_insn (insn
, last_scheduled_insn
, current_sched_info
->next_tail
);
3130 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
3131 targetm
.sched
.dispatch_do (insn
, ADD_TO_DISPATCH_WINDOW
);
3133 reemit_notes (insn
);
3134 last_scheduled_insn
= insn
;
3136 if (memcmp (curr_state
, temp_state
, dfa_state_size
) != 0)
3138 cycle_issued_insns
++;
3139 memcpy (curr_state
, temp_state
, dfa_state_size
);
3142 if (targetm
.sched
.variable_issue
)
3144 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
3145 insn
, can_issue_more
);
3146 /* A naked CLOBBER or USE generates no instruction, so do
3147 not count them against the issue rate. */
3148 else if (GET_CODE (PATTERN (insn
)) != USE
3149 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
3151 advance
= schedule_insn (insn
);
3153 /* After issuing an asm insn we should start a new cycle. */
3154 if (advance
== 0 && asm_p
)
3159 first_cycle_insn_p
= 0;
3161 /* Sort the ready list based on priority. This must be
3162 redone here, as schedule_insn may have readied additional
3163 insns that will not be sorted correctly. */
3164 if (ready
.n_ready
> 0)
3165 ready_sort (&ready
);
3167 /* Quickly go through debug insns such that md sched
3168 reorder2 doesn't have to deal with debug insns. */
3169 if (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0))
3170 && (*current_sched_info
->schedule_more_p
) ())
3172 if (control_flow_insn_p (last_scheduled_insn
))
3174 *target_bb
= current_sched_info
->advance_target_bb
3181 x
= next_real_insn (last_scheduled_insn
);
3183 dump_new_block_header (1, *target_bb
, x
, tail
);
3186 last_scheduled_insn
= bb_note (*target_bb
);
3189 while (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
3191 insn
= ready_remove_first (&ready
);
3192 gcc_assert (DEBUG_INSN_P (insn
));
3193 (*current_sched_info
->begin_schedule_ready
)
3194 (insn
, last_scheduled_insn
);
3195 move_insn (insn
, last_scheduled_insn
,
3196 current_sched_info
->next_tail
);
3197 advance
= schedule_insn (insn
);
3198 last_scheduled_insn
= insn
;
3199 gcc_assert (advance
== 0);
3200 if (ready
.n_ready
> 0)
3201 ready_sort (&ready
);
3205 if (targetm
.sched
.reorder2
3206 && (ready
.n_ready
== 0
3207 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
3210 targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
3212 ? ready_lastpos (&ready
) : NULL
,
3213 &ready
.n_ready
, clock_var
);
3221 fprintf (sched_dump
, ";;\tReady list (final): ");
3222 debug_ready_list (&ready
);
3225 if (current_sched_info
->queue_must_finish_empty
)
3226 /* Sanity check -- queue must be empty now. Meaningless if region has
3228 gcc_assert (!q_size
&& !ready
.n_ready
&& !ready
.n_debug
);
3231 /* We must maintain QUEUE_INDEX between blocks in region. */
3232 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
3236 x
= ready_element (&ready
, i
);
3237 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
3238 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
3242 for (i
= 0; i
<= max_insn_queue_index
; i
++)
3245 for (link
= insn_queue
[i
]; link
; link
= XEXP (link
, 1))
3250 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
3251 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
3253 free_INSN_LIST_list (&insn_queue
[i
]);
3258 fprintf (sched_dump
, ";; total time = %d\n", clock_var
);
3260 if (!current_sched_info
->queue_must_finish_empty
3261 || haifa_recovery_bb_recently_added_p
)
3263 /* INSN_TICK (minimum clock tick at which the insn becomes
3264 ready) may be not correct for the insn in the subsequent
3265 blocks of the region. We should use a correct value of
3266 `clock_var' or modify INSN_TICK. It is better to keep
3267 clock_var value equal to 0 at the start of a basic block.
3268 Therefore we modify INSN_TICK here. */
3269 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
3272 if (targetm
.sched
.finish
)
3274 targetm
.sched
.finish (sched_dump
, sched_verbose
);
3275 /* Target might have added some instructions to the scheduled block
3276 in its md_finish () hook. These new insns don't have any data
3277 initialized and to identify them we extend h_i_d so that they'll
3279 sched_init_luids (NULL
, NULL
, NULL
, NULL
);
3283 fprintf (sched_dump
, ";; new head = %d\n;; new tail = %d\n\n",
3284 INSN_UID (head
), INSN_UID (tail
));
3286 /* Update head/tail boundaries. */
3287 head
= NEXT_INSN (prev_head
);
3288 tail
= last_scheduled_insn
;
3290 head
= restore_other_notes (head
, NULL
);
3292 current_sched_info
->head
= head
;
3293 current_sched_info
->tail
= tail
;
3296 /* Set_priorities: compute priority of each insn in the block. */
3299 set_priorities (rtx head
, rtx tail
)
3303 int sched_max_insns_priority
=
3304 current_sched_info
->sched_max_insns_priority
;
3307 if (head
== tail
&& (! INSN_P (head
) || BOUNDARY_DEBUG_INSN_P (head
)))
3312 prev_head
= PREV_INSN (head
);
3313 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
3319 (void) priority (insn
);
3321 gcc_assert (INSN_PRIORITY_KNOWN (insn
));
3323 sched_max_insns_priority
= MAX (sched_max_insns_priority
,
3324 INSN_PRIORITY (insn
));
3327 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
3332 /* Set dump and sched_verbose for the desired debugging output. If no
3333 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
3334 For -fsched-verbose=N, N>=10, print everything to stderr. */
3336 setup_sched_dump (void)
3338 sched_verbose
= sched_verbose_param
;
3339 if (sched_verbose_param
== 0 && dump_file
)
3341 sched_dump
= ((sched_verbose_param
>= 10 || !dump_file
)
3342 ? stderr
: dump_file
);
3345 /* Initialize some global state for the scheduler. This function works
3346 with the common data shared between all the schedulers. It is called
3347 from the scheduler specific initialization routine. */
3352 /* Disable speculative loads in their presence if cc0 defined. */
3354 flag_schedule_speculative_load
= 0;
3357 if (targetm
.sched
.dispatch (NULL_RTX
, IS_DISPATCH_ON
))
3358 targetm
.sched
.dispatch_do (NULL_RTX
, DISPATCH_INIT
);
3360 sched_pressure_p
= (flag_sched_pressure
&& ! reload_completed
3361 && common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
);
3363 if (sched_pressure_p
)
3364 ira_setup_eliminable_regset ();
3366 /* Initialize SPEC_INFO. */
3367 if (targetm
.sched
.set_sched_flags
)
3369 spec_info
= &spec_info_var
;
3370 targetm
.sched
.set_sched_flags (spec_info
);
3372 if (spec_info
->mask
!= 0)
3374 spec_info
->data_weakness_cutoff
=
3375 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
3376 spec_info
->control_weakness_cutoff
=
3377 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
)
3378 * REG_BR_PROB_BASE
) / 100;
3381 /* So we won't read anything accidentally. */
3386 /* So we won't read anything accidentally. */
3389 /* Initialize issue_rate. */
3390 if (targetm
.sched
.issue_rate
)
3391 issue_rate
= targetm
.sched
.issue_rate ();
3395 if (cached_issue_rate
!= issue_rate
)
3397 cached_issue_rate
= issue_rate
;
3398 /* To invalidate max_lookahead_tries: */
3399 cached_first_cycle_multipass_dfa_lookahead
= 0;
3402 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
3403 dfa_lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
3407 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
3408 targetm
.sched
.init_dfa_pre_cycle_insn ();
3410 if (targetm
.sched
.init_dfa_post_cycle_insn
)
3411 targetm
.sched
.init_dfa_post_cycle_insn ();
3414 dfa_state_size
= state_size ();
3416 init_alias_analysis ();
3418 df_set_flags (DF_LR_RUN_DCE
);
3419 df_note_add_problem ();
3421 /* More problems needed for interloop dep calculation in SMS. */
3422 if (common_sched_info
->sched_pass_id
== SCHED_SMS_PASS
)
3424 df_rd_add_problem ();
3425 df_chain_add_problem (DF_DU_CHAIN
+ DF_UD_CHAIN
);
3430 /* Do not run DCE after reload, as this can kill nops inserted
3432 if (reload_completed
)
3433 df_clear_flags (DF_LR_RUN_DCE
);
3435 regstat_compute_calls_crossed ();
3437 if (targetm
.sched
.init_global
)
3438 targetm
.sched
.init_global (sched_dump
, sched_verbose
, get_max_uid () + 1);
3440 if (sched_pressure_p
)
3442 int i
, max_regno
= max_reg_num ();
3444 ira_set_pseudo_classes (sched_verbose
? sched_dump
: NULL
);
3445 sched_regno_cover_class
3446 = (enum reg_class
*) xmalloc (max_regno
* sizeof (enum reg_class
));
3447 for (i
= 0; i
< max_regno
; i
++)
3448 sched_regno_cover_class
[i
]
3449 = (i
< FIRST_PSEUDO_REGISTER
3450 ? ira_class_translate
[REGNO_REG_CLASS (i
)]
3451 : reg_cover_class (i
));
3452 curr_reg_live
= BITMAP_ALLOC (NULL
);
3453 saved_reg_live
= BITMAP_ALLOC (NULL
);
3454 region_ref_regs
= BITMAP_ALLOC (NULL
);
3457 curr_state
= xmalloc (dfa_state_size
);
3460 static void haifa_init_only_bb (basic_block
, basic_block
);
3462 /* Initialize data structures specific to the Haifa scheduler. */
3464 haifa_sched_init (void)
3466 setup_sched_dump ();
3469 if (spec_info
!= NULL
)
3471 sched_deps_info
->use_deps_list
= 1;
3472 sched_deps_info
->generate_spec_deps
= 1;
3475 /* Initialize luids, dependency caches, target and h_i_d for the
3478 bb_vec_t bbs
= VEC_alloc (basic_block
, heap
, n_basic_blocks
);
3484 VEC_quick_push (basic_block
, bbs
, bb
);
3485 sched_init_luids (bbs
, NULL
, NULL
, NULL
);
3486 sched_deps_init (true);
3487 sched_extend_target ();
3488 haifa_init_h_i_d (bbs
, NULL
, NULL
, NULL
);
3490 VEC_free (basic_block
, heap
, bbs
);
3493 sched_init_only_bb
= haifa_init_only_bb
;
3494 sched_split_block
= sched_split_block_1
;
3495 sched_create_empty_bb
= sched_create_empty_bb_1
;
3496 haifa_recovery_bb_ever_added_p
= false;
3498 #ifdef ENABLE_CHECKING
3499 /* This is used preferably for finding bugs in check_cfg () itself.
3500 We must call sched_bbs_init () before check_cfg () because check_cfg ()
3501 assumes that the last insn in the last bb has a non-null successor. */
3505 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
3506 before_recovery
= 0;
3510 /* Finish work with the data specific to the Haifa scheduler. */
3512 haifa_sched_finish (void)
3514 sched_create_empty_bb
= NULL
;
3515 sched_split_block
= NULL
;
3516 sched_init_only_bb
= NULL
;
3518 if (spec_info
&& spec_info
->dump
)
3520 char c
= reload_completed
? 'a' : 'b';
3522 fprintf (spec_info
->dump
,
3523 ";; %s:\n", current_function_name ());
3525 fprintf (spec_info
->dump
,
3526 ";; Procedure %cr-begin-data-spec motions == %d\n",
3528 fprintf (spec_info
->dump
,
3529 ";; Procedure %cr-be-in-data-spec motions == %d\n",
3531 fprintf (spec_info
->dump
,
3532 ";; Procedure %cr-begin-control-spec motions == %d\n",
3533 c
, nr_begin_control
);
3534 fprintf (spec_info
->dump
,
3535 ";; Procedure %cr-be-in-control-spec motions == %d\n",
3536 c
, nr_be_in_control
);
3539 /* Finalize h_i_d, dependency caches, and luids for the whole
3540 function. Target will be finalized in md_global_finish (). */
3541 sched_deps_finish ();
3542 sched_finish_luids ();
3543 current_sched_info
= NULL
;
3547 /* Free global data used during insn scheduling. This function works with
3548 the common data shared between the schedulers. */
3553 haifa_finish_h_i_d ();
3554 if (sched_pressure_p
)
3556 free (sched_regno_cover_class
);
3557 BITMAP_FREE (region_ref_regs
);
3558 BITMAP_FREE (saved_reg_live
);
3559 BITMAP_FREE (curr_reg_live
);
3563 if (targetm
.sched
.finish_global
)
3564 targetm
.sched
.finish_global (sched_dump
, sched_verbose
);
3566 end_alias_analysis ();
3568 regstat_free_calls_crossed ();
3572 #ifdef ENABLE_CHECKING
3573 /* After reload ia64 backend clobbers CFG, so can't check anything. */
3574 if (!reload_completed
)
3579 /* Fix INSN_TICKs of the instructions in the current block as well as
3580 INSN_TICKs of their dependents.
3581 HEAD and TAIL are the begin and the end of the current scheduled block. */
3583 fix_inter_tick (rtx head
, rtx tail
)
3585 /* Set of instructions with corrected INSN_TICK. */
3586 bitmap_head processed
;
3587 /* ??? It is doubtful if we should assume that cycle advance happens on
3588 basic block boundaries. Basically insns that are unconditionally ready
3589 on the start of the block are more preferable then those which have
3590 a one cycle dependency over insn from the previous block. */
3591 int next_clock
= clock_var
+ 1;
3593 bitmap_initialize (&processed
, 0);
3595 /* Iterates over scheduled instructions and fix their INSN_TICKs and
3596 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
3597 across different blocks. */
3598 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
3603 sd_iterator_def sd_it
;
3606 tick
= INSN_TICK (head
);
3607 gcc_assert (tick
>= MIN_TICK
);
3609 /* Fix INSN_TICK of instruction from just scheduled block. */
3610 if (bitmap_set_bit (&processed
, INSN_LUID (head
)))
3614 if (tick
< MIN_TICK
)
3617 INSN_TICK (head
) = tick
;
3620 FOR_EACH_DEP (head
, SD_LIST_RES_FORW
, sd_it
, dep
)
3624 next
= DEP_CON (dep
);
3625 tick
= INSN_TICK (next
);
3627 if (tick
!= INVALID_TICK
3628 /* If NEXT has its INSN_TICK calculated, fix it.
3629 If not - it will be properly calculated from
3630 scratch later in fix_tick_ready. */
3631 && bitmap_set_bit (&processed
, INSN_LUID (next
)))
3635 if (tick
< MIN_TICK
)
3638 if (tick
> INTER_TICK (next
))
3639 INTER_TICK (next
) = tick
;
3641 tick
= INTER_TICK (next
);
3643 INSN_TICK (next
) = tick
;
3648 bitmap_clear (&processed
);
3651 static int haifa_speculate_insn (rtx
, ds_t
, rtx
*);
3653 /* Check if NEXT is ready to be added to the ready or queue list.
3654 If "yes", add it to the proper list.
3656 -1 - is not ready yet,
3657 0 - added to the ready list,
3658 0 < N - queued for N cycles. */
3660 try_ready (rtx next
)
3664 ts
= &TODO_SPEC (next
);
3667 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
))
3668 && ((old_ts
& HARD_DEP
)
3669 || (old_ts
& SPECULATIVE
)));
3671 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
3672 /* NEXT has all its dependencies resolved. */
3674 /* Remove HARD_DEP bit from NEXT's status. */
3677 if (current_sched_info
->flags
& DO_SPECULATION
)
3678 /* Remove all speculative bits from NEXT's status. */
3679 *ts
&= ~SPECULATIVE
;
3683 /* One of the NEXT's dependencies has been resolved.
3684 Recalculate NEXT's status. */
3686 *ts
&= ~SPECULATIVE
& ~HARD_DEP
;
3688 if (sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
3689 /* Now we've got NEXT with speculative deps only.
3690 1. Look at the deps to see what we have to do.
3691 2. Check if we can do 'todo'. */
3693 sd_iterator_def sd_it
;
3695 bool first_p
= true;
3697 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
3699 ds_t ds
= DEP_STATUS (dep
) & SPECULATIVE
;
3701 if (DEBUG_INSN_P (DEP_PRO (dep
))
3702 && !DEBUG_INSN_P (next
))
3712 *ts
= ds_merge (*ts
, ds
);
3715 if (ds_weak (*ts
) < spec_info
->data_weakness_cutoff
)
3716 /* Too few points. */
3717 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
3724 gcc_assert (*ts
== old_ts
3725 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
3726 else if (current_sched_info
->new_ready
)
3727 *ts
= current_sched_info
->new_ready (next
, *ts
);
3729 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3730 have its original pattern or changed (speculative) one. This is due
3731 to changing ebb in region scheduling.
3732 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3733 has speculative pattern.
3735 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3736 control-speculative NEXT could have been discarded by sched-rgn.c
3737 (the same case as when discarded by can_schedule_ready_p ()). */
3739 if ((*ts
& SPECULATIVE
)
3740 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3741 need to change anything. */
3747 gcc_assert ((*ts
& SPECULATIVE
) && !(*ts
& ~SPECULATIVE
));
3749 res
= haifa_speculate_insn (next
, *ts
, &new_pat
);
3754 /* It would be nice to change DEP_STATUS of all dependences,
3755 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3756 so we won't reanalyze anything. */
3757 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
3761 /* We follow the rule, that every speculative insn
3762 has non-null ORIG_PAT. */
3763 if (!ORIG_PAT (next
))
3764 ORIG_PAT (next
) = PATTERN (next
);
3768 if (!ORIG_PAT (next
))
3769 /* If we gonna to overwrite the original pattern of insn,
3771 ORIG_PAT (next
) = PATTERN (next
);
3773 haifa_change_pattern (next
, new_pat
);
3781 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3782 either correct (*ts & SPECULATIVE),
3783 or we simply don't care (*ts & HARD_DEP). */
3785 gcc_assert (!ORIG_PAT (next
)
3786 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
3790 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3791 control-speculative NEXT could have been discarded by sched-rgn.c
3792 (the same case as when discarded by can_schedule_ready_p ()). */
3793 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3795 change_queue_index (next
, QUEUE_NOWHERE
);
3798 else if (!(*ts
& BEGIN_SPEC
) && ORIG_PAT (next
) && !IS_SPECULATION_CHECK_P (next
))
3799 /* We should change pattern of every previously speculative
3800 instruction - and we determine if NEXT was speculative by using
3801 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
3802 pat too, so skip them. */
3804 haifa_change_pattern (next
, ORIG_PAT (next
));
3805 ORIG_PAT (next
) = 0;
3808 if (sched_verbose
>= 2)
3810 int s
= TODO_SPEC (next
);
3812 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
3813 (*current_sched_info
->print_insn
) (next
, 0));
3815 if (spec_info
&& spec_info
->dump
)
3818 fprintf (spec_info
->dump
, "; data-spec;");
3819 if (s
& BEGIN_CONTROL
)
3820 fprintf (spec_info
->dump
, "; control-spec;");
3821 if (s
& BE_IN_CONTROL
)
3822 fprintf (spec_info
->dump
, "; in-control-spec;");
3825 fprintf (sched_dump
, "\n");
3828 adjust_priority (next
);
3830 return fix_tick_ready (next
);
3833 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3835 fix_tick_ready (rtx next
)
3839 if (!sd_lists_empty_p (next
, SD_LIST_RES_BACK
))
3842 sd_iterator_def sd_it
;
3845 tick
= INSN_TICK (next
);
3846 /* if tick is not equal to INVALID_TICK, then update
3847 INSN_TICK of NEXT with the most recent resolved dependence
3848 cost. Otherwise, recalculate from scratch. */
3849 full_p
= (tick
== INVALID_TICK
);
3851 FOR_EACH_DEP (next
, SD_LIST_RES_BACK
, sd_it
, dep
)
3853 rtx pro
= DEP_PRO (dep
);
3856 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
3858 tick1
= INSN_TICK (pro
) + dep_cost (dep
);
3869 INSN_TICK (next
) = tick
;
3871 delay
= tick
- clock_var
;
3872 if (delay
<= 0 || sched_pressure_p
)
3873 delay
= QUEUE_READY
;
3875 change_queue_index (next
, delay
);
3880 /* Move NEXT to the proper queue list with (DELAY >= 1),
3881 or add it to the ready list (DELAY == QUEUE_READY),
3882 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3884 change_queue_index (rtx next
, int delay
)
3886 int i
= QUEUE_INDEX (next
);
3888 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
3890 gcc_assert (i
!= QUEUE_SCHEDULED
);
3892 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
3893 || (delay
< 0 && delay
== i
))
3894 /* We have nothing to do. */
3897 /* Remove NEXT from wherever it is now. */
3898 if (i
== QUEUE_READY
)
3899 ready_remove_insn (next
);
3901 queue_remove (next
);
3903 /* Add it to the proper place. */
3904 if (delay
== QUEUE_READY
)
3905 ready_add (readyp
, next
, false);
3906 else if (delay
>= 1)
3907 queue_insn (next
, delay
);
3909 if (sched_verbose
>= 2)
3911 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
3912 (*current_sched_info
->print_insn
) (next
, 0));
3914 if (delay
== QUEUE_READY
)
3915 fprintf (sched_dump
, " into ready\n");
3916 else if (delay
>= 1)
3917 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
3919 fprintf (sched_dump
, " removed from ready or queue lists\n");
3923 static int sched_ready_n_insns
= -1;
3925 /* Initialize per region data structures. */
3927 sched_extend_ready_list (int new_sched_ready_n_insns
)
3931 if (sched_ready_n_insns
== -1)
3932 /* At the first call we need to initialize one more choice_stack
3936 sched_ready_n_insns
= 0;
3939 i
= sched_ready_n_insns
+ 1;
3941 ready
.veclen
= new_sched_ready_n_insns
+ issue_rate
;
3942 ready
.vec
= XRESIZEVEC (rtx
, ready
.vec
, ready
.veclen
);
3944 gcc_assert (new_sched_ready_n_insns
>= sched_ready_n_insns
);
3946 ready_try
= (char *) xrecalloc (ready_try
, new_sched_ready_n_insns
,
3947 sched_ready_n_insns
, sizeof (*ready_try
));
3949 /* We allocate +1 element to save initial state in the choice_stack[0]
3951 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
3952 new_sched_ready_n_insns
+ 1);
3954 for (; i
<= new_sched_ready_n_insns
; i
++)
3955 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
3957 sched_ready_n_insns
= new_sched_ready_n_insns
;
3960 /* Free per region data structures. */
3962 sched_finish_ready_list (void)
3973 for (i
= 0; i
<= sched_ready_n_insns
; i
++)
3974 free (choice_stack
[i
].state
);
3975 free (choice_stack
);
3976 choice_stack
= NULL
;
3978 sched_ready_n_insns
= -1;
3982 haifa_luid_for_non_insn (rtx x
)
3984 gcc_assert (NOTE_P (x
) || LABEL_P (x
));
3989 /* Generates recovery code for INSN. */
3991 generate_recovery_code (rtx insn
)
3993 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
3994 begin_speculative_block (insn
);
3996 /* Here we have insn with no dependencies to
3997 instructions other then CHECK_SPEC ones. */
3999 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
4000 add_to_speculative_block (insn
);
4004 Tries to add speculative dependencies of type FS between instructions
4005 in deps_list L and TWIN. */
4007 process_insn_forw_deps_be_in_spec (rtx insn
, rtx twin
, ds_t fs
)
4009 sd_iterator_def sd_it
;
4012 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
4017 consumer
= DEP_CON (dep
);
4019 ds
= DEP_STATUS (dep
);
4021 if (/* If we want to create speculative dep. */
4023 /* And we can do that because this is a true dep. */
4024 && (ds
& DEP_TYPES
) == DEP_TRUE
)
4026 gcc_assert (!(ds
& BE_IN_SPEC
));
4028 if (/* If this dep can be overcome with 'begin speculation'. */
4030 /* Then we have a choice: keep the dep 'begin speculative'
4031 or transform it into 'be in speculative'. */
4033 if (/* In try_ready we assert that if insn once became ready
4034 it can be removed from the ready (or queue) list only
4035 due to backend decision. Hence we can't let the
4036 probability of the speculative dep to decrease. */
4037 ds_weak (ds
) <= ds_weak (fs
))
4041 new_ds
= (ds
& ~BEGIN_SPEC
) | fs
;
4043 if (/* consumer can 'be in speculative'. */
4044 sched_insn_is_legitimate_for_speculation_p (consumer
,
4046 /* Transform it to be in speculative. */
4051 /* Mark the dep as 'be in speculative'. */
4056 dep_def _new_dep
, *new_dep
= &_new_dep
;
4058 init_dep_1 (new_dep
, twin
, consumer
, DEP_TYPE (dep
), ds
);
4059 sd_add_dep (new_dep
, false);
4064 /* Generates recovery code for BEGIN speculative INSN. */
4066 begin_speculative_block (rtx insn
)
4068 if (TODO_SPEC (insn
) & BEGIN_DATA
)
4070 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
4073 create_check_block_twin (insn
, false);
4075 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
4078 static void haifa_init_insn (rtx
);
4080 /* Generates recovery code for BE_IN speculative INSN. */
4082 add_to_speculative_block (rtx insn
)
4085 sd_iterator_def sd_it
;
4088 rtx_vec_t priorities_roots
;
4090 ts
= TODO_SPEC (insn
);
4091 gcc_assert (!(ts
& ~BE_IN_SPEC
));
4093 if (ts
& BE_IN_DATA
)
4095 if (ts
& BE_IN_CONTROL
)
4098 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
4099 gcc_assert (!TODO_SPEC (insn
));
4101 DONE_SPEC (insn
) |= ts
;
4103 /* First we convert all simple checks to branchy. */
4104 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4105 sd_iterator_cond (&sd_it
, &dep
);)
4107 rtx check
= DEP_PRO (dep
);
4109 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
4111 create_check_block_twin (check
, true);
4113 /* Restart search. */
4114 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4117 /* Continue search. */
4118 sd_iterator_next (&sd_it
);
4121 priorities_roots
= NULL
;
4122 clear_priorities (insn
, &priorities_roots
);
4129 /* Get the first backward dependency of INSN. */
4130 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4131 if (!sd_iterator_cond (&sd_it
, &dep
))
4132 /* INSN has no backward dependencies left. */
4135 gcc_assert ((DEP_STATUS (dep
) & BEGIN_SPEC
) == 0
4136 && (DEP_STATUS (dep
) & BE_IN_SPEC
) != 0
4137 && (DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
4139 check
= DEP_PRO (dep
);
4141 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
4142 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
4144 rec
= BLOCK_FOR_INSN (check
);
4146 twin
= emit_insn_before (copy_insn (PATTERN (insn
)), BB_END (rec
));
4147 haifa_init_insn (twin
);
4149 sd_copy_back_deps (twin
, insn
, true);
4151 if (sched_verbose
&& spec_info
->dump
)
4152 /* INSN_BB (insn) isn't determined for twin insns yet.
4153 So we can't use current_sched_info->print_insn. */
4154 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
4155 INSN_UID (twin
), rec
->index
);
4157 twins
= alloc_INSN_LIST (twin
, twins
);
4159 /* Add dependences between TWIN and all appropriate
4160 instructions from REC. */
4161 FOR_EACH_DEP (insn
, SD_LIST_SPEC_BACK
, sd_it
, dep
)
4163 rtx pro
= DEP_PRO (dep
);
4165 gcc_assert (DEP_TYPE (dep
) == REG_DEP_TRUE
);
4167 /* INSN might have dependencies from the instructions from
4168 several recovery blocks. At this iteration we process those
4169 producers that reside in REC. */
4170 if (BLOCK_FOR_INSN (pro
) == rec
)
4172 dep_def _new_dep
, *new_dep
= &_new_dep
;
4174 init_dep (new_dep
, pro
, twin
, REG_DEP_TRUE
);
4175 sd_add_dep (new_dep
, false);
4179 process_insn_forw_deps_be_in_spec (insn
, twin
, ts
);
4181 /* Remove all dependencies between INSN and insns in REC. */
4182 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4183 sd_iterator_cond (&sd_it
, &dep
);)
4185 rtx pro
= DEP_PRO (dep
);
4187 if (BLOCK_FOR_INSN (pro
) == rec
)
4188 sd_delete_dep (sd_it
);
4190 sd_iterator_next (&sd_it
);
4194 /* We couldn't have added the dependencies between INSN and TWINS earlier
4195 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
4200 twin
= XEXP (twins
, 0);
4203 dep_def _new_dep
, *new_dep
= &_new_dep
;
4205 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
4206 sd_add_dep (new_dep
, false);
4209 twin
= XEXP (twins
, 1);
4210 free_INSN_LIST_node (twins
);
4214 calc_priorities (priorities_roots
);
4215 VEC_free (rtx
, heap
, priorities_roots
);
4218 /* Extends and fills with zeros (only the new part) array pointed to by P. */
4220 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
4222 gcc_assert (new_nmemb
>= old_nmemb
);
4223 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
4224 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
4229 Find fallthru edge from PRED. */
4231 find_fallthru_edge_from (basic_block pred
)
4236 succ
= pred
->next_bb
;
4237 gcc_assert (succ
->prev_bb
== pred
);
4239 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
4241 e
= find_fallthru_edge (pred
->succs
);
4245 gcc_assert (e
->dest
== succ
);
4251 e
= find_fallthru_edge (succ
->preds
);
4255 gcc_assert (e
->src
== pred
);
4263 /* Extend per basic block data structures. */
4265 sched_extend_bb (void)
4269 /* The following is done to keep current_sched_info->next_tail non null. */
4270 insn
= BB_END (EXIT_BLOCK_PTR
->prev_bb
);
4271 if (NEXT_INSN (insn
) == 0
4274 /* Don't emit a NOTE if it would end up before a BARRIER. */
4275 && !BARRIER_P (NEXT_INSN (insn
))))
4277 rtx note
= emit_note_after (NOTE_INSN_DELETED
, insn
);
4278 /* Make insn appear outside BB. */
4279 set_block_for_insn (note
, NULL
);
4280 BB_END (EXIT_BLOCK_PTR
->prev_bb
) = insn
;
4284 /* Init per basic block data structures. */
4286 sched_init_bbs (void)
4291 /* Initialize BEFORE_RECOVERY variable. */
4293 init_before_recovery (basic_block
*before_recovery_ptr
)
4298 last
= EXIT_BLOCK_PTR
->prev_bb
;
4299 e
= find_fallthru_edge_from (last
);
4303 /* We create two basic blocks:
4304 1. Single instruction block is inserted right after E->SRC
4306 2. Empty block right before EXIT_BLOCK.
4307 Between these two blocks recovery blocks will be emitted. */
4309 basic_block single
, empty
;
4312 /* If the fallthrough edge to exit we've found is from the block we've
4313 created before, don't do anything more. */
4314 if (last
== after_recovery
)
4317 adding_bb_to_current_region_p
= false;
4319 single
= sched_create_empty_bb (last
);
4320 empty
= sched_create_empty_bb (single
);
4322 /* Add new blocks to the root loop. */
4323 if (current_loops
!= NULL
)
4325 add_bb_to_loop (single
, VEC_index (loop_p
, current_loops
->larray
, 0));
4326 add_bb_to_loop (empty
, VEC_index (loop_p
, current_loops
->larray
, 0));
4329 single
->count
= last
->count
;
4330 empty
->count
= last
->count
;
4331 single
->frequency
= last
->frequency
;
4332 empty
->frequency
= last
->frequency
;
4333 BB_COPY_PARTITION (single
, last
);
4334 BB_COPY_PARTITION (empty
, last
);
4336 redirect_edge_succ (e
, single
);
4337 make_single_succ_edge (single
, empty
, 0);
4338 make_single_succ_edge (empty
, EXIT_BLOCK_PTR
,
4339 EDGE_FALLTHRU
| EDGE_CAN_FALLTHRU
);
4341 label
= block_label (empty
);
4342 x
= emit_jump_insn_after (gen_jump (label
), BB_END (single
));
4343 JUMP_LABEL (x
) = label
;
4344 LABEL_NUSES (label
)++;
4345 haifa_init_insn (x
);
4347 emit_barrier_after (x
);
4349 sched_init_only_bb (empty
, NULL
);
4350 sched_init_only_bb (single
, NULL
);
4353 adding_bb_to_current_region_p
= true;
4354 before_recovery
= single
;
4355 after_recovery
= empty
;
4357 if (before_recovery_ptr
)
4358 *before_recovery_ptr
= before_recovery
;
4360 if (sched_verbose
>= 2 && spec_info
->dump
)
4361 fprintf (spec_info
->dump
,
4362 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
4363 last
->index
, single
->index
, empty
->index
);
4366 before_recovery
= last
;
4369 /* Returns new recovery block. */
4371 sched_create_recovery_block (basic_block
*before_recovery_ptr
)
4377 haifa_recovery_bb_recently_added_p
= true;
4378 haifa_recovery_bb_ever_added_p
= true;
4380 init_before_recovery (before_recovery_ptr
);
4382 barrier
= get_last_bb_insn (before_recovery
);
4383 gcc_assert (BARRIER_P (barrier
));
4385 label
= emit_label_after (gen_label_rtx (), barrier
);
4387 rec
= create_basic_block (label
, label
, before_recovery
);
4389 /* A recovery block always ends with an unconditional jump. */
4390 emit_barrier_after (BB_END (rec
));
4392 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
4393 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
4395 if (sched_verbose
&& spec_info
->dump
)
4396 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
4402 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
4403 and emit necessary jumps. */
4405 sched_create_recovery_edges (basic_block first_bb
, basic_block rec
,
4406 basic_block second_bb
)
4412 /* This is fixing of incoming edge. */
4413 /* ??? Which other flags should be specified? */
4414 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
4415 /* Partition type is the same, if it is "unpartitioned". */
4416 edge_flags
= EDGE_CROSSING
;
4420 make_edge (first_bb
, rec
, edge_flags
);
4421 label
= block_label (second_bb
);
4422 jump
= emit_jump_insn_after (gen_jump (label
), BB_END (rec
));
4423 JUMP_LABEL (jump
) = label
;
4424 LABEL_NUSES (label
)++;
4426 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
4427 /* Partition type is the same, if it is "unpartitioned". */
4429 /* Rewritten from cfgrtl.c. */
4430 if (flag_reorder_blocks_and_partition
4431 && targetm
.have_named_sections
)
4433 /* We don't need the same note for the check because
4434 any_condjump_p (check) == true. */
4435 add_reg_note (jump
, REG_CROSSING_JUMP
, NULL_RTX
);
4437 edge_flags
= EDGE_CROSSING
;
4442 make_single_succ_edge (rec
, second_bb
, edge_flags
);
4445 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
4446 INSN is a simple check, that should be converted to branchy one. */
4448 create_check_block_twin (rtx insn
, bool mutate_p
)
4451 rtx label
, check
, twin
;
4453 sd_iterator_def sd_it
;
4455 dep_def _new_dep
, *new_dep
= &_new_dep
;
4458 gcc_assert (ORIG_PAT (insn
) != NULL_RTX
);
4461 todo_spec
= TODO_SPEC (insn
);
4464 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
4465 && (TODO_SPEC (insn
) & SPECULATIVE
) == 0);
4467 todo_spec
= CHECK_SPEC (insn
);
4470 todo_spec
&= SPECULATIVE
;
4472 /* Create recovery block. */
4473 if (mutate_p
|| targetm
.sched
.needs_block_p (todo_spec
))
4475 rec
= sched_create_recovery_block (NULL
);
4476 label
= BB_HEAD (rec
);
4480 rec
= EXIT_BLOCK_PTR
;
4485 check
= targetm
.sched
.gen_spec_check (insn
, label
, todo_spec
);
4487 if (rec
!= EXIT_BLOCK_PTR
)
4489 /* To have mem_reg alive at the beginning of second_bb,
4490 we emit check BEFORE insn, so insn after splitting
4491 insn will be at the beginning of second_bb, which will
4492 provide us with the correct life information. */
4493 check
= emit_jump_insn_before (check
, insn
);
4494 JUMP_LABEL (check
) = label
;
4495 LABEL_NUSES (label
)++;
4498 check
= emit_insn_before (check
, insn
);
4500 /* Extend data structures. */
4501 haifa_init_insn (check
);
4503 /* CHECK is being added to current region. Extend ready list. */
4504 gcc_assert (sched_ready_n_insns
!= -1);
4505 sched_extend_ready_list (sched_ready_n_insns
+ 1);
4507 if (current_sched_info
->add_remove_insn
)
4508 current_sched_info
->add_remove_insn (insn
, 0);
4510 RECOVERY_BLOCK (check
) = rec
;
4512 if (sched_verbose
&& spec_info
->dump
)
4513 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
4514 (*current_sched_info
->print_insn
) (check
, 0));
4516 gcc_assert (ORIG_PAT (insn
));
4518 /* Initialize TWIN (twin is a duplicate of original instruction
4519 in the recovery block). */
4520 if (rec
!= EXIT_BLOCK_PTR
)
4522 sd_iterator_def sd_it
;
4525 FOR_EACH_DEP (insn
, SD_LIST_RES_BACK
, sd_it
, dep
)
4526 if ((DEP_STATUS (dep
) & DEP_OUTPUT
) != 0)
4528 struct _dep _dep2
, *dep2
= &_dep2
;
4530 init_dep (dep2
, DEP_PRO (dep
), check
, REG_DEP_TRUE
);
4532 sd_add_dep (dep2
, true);
4535 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
4536 haifa_init_insn (twin
);
4538 if (sched_verbose
&& spec_info
->dump
)
4539 /* INSN_BB (insn) isn't determined for twin insns yet.
4540 So we can't use current_sched_info->print_insn. */
4541 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
4542 INSN_UID (twin
), rec
->index
);
4546 ORIG_PAT (check
) = ORIG_PAT (insn
);
4547 HAS_INTERNAL_DEP (check
) = 1;
4549 /* ??? We probably should change all OUTPUT dependencies to
4553 /* Copy all resolved back dependencies of INSN to TWIN. This will
4554 provide correct value for INSN_TICK (TWIN). */
4555 sd_copy_back_deps (twin
, insn
, true);
4557 if (rec
!= EXIT_BLOCK_PTR
)
4558 /* In case of branchy check, fix CFG. */
4560 basic_block first_bb
, second_bb
;
4563 first_bb
= BLOCK_FOR_INSN (check
);
4564 second_bb
= sched_split_block (first_bb
, check
);
4566 sched_create_recovery_edges (first_bb
, rec
, second_bb
);
4568 sched_init_only_bb (second_bb
, first_bb
);
4569 sched_init_only_bb (rec
, EXIT_BLOCK_PTR
);
4571 jump
= BB_END (rec
);
4572 haifa_init_insn (jump
);
4575 /* Move backward dependences from INSN to CHECK and
4576 move forward dependences from INSN to TWIN. */
4578 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
4579 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4581 rtx pro
= DEP_PRO (dep
);
4584 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
4585 check --TRUE--> producer ??? or ANTI ???
4586 twin --TRUE--> producer
4587 twin --ANTI--> check
4589 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
4590 check --ANTI--> producer
4591 twin --ANTI--> producer
4592 twin --ANTI--> check
4594 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
4595 check ~~TRUE~~> producer
4596 twin ~~TRUE~~> producer
4597 twin --ANTI--> check */
4599 ds
= DEP_STATUS (dep
);
4601 if (ds
& BEGIN_SPEC
)
4603 gcc_assert (!mutate_p
);
4607 init_dep_1 (new_dep
, pro
, check
, DEP_TYPE (dep
), ds
);
4608 sd_add_dep (new_dep
, false);
4610 if (rec
!= EXIT_BLOCK_PTR
)
4612 DEP_CON (new_dep
) = twin
;
4613 sd_add_dep (new_dep
, false);
4617 /* Second, remove backward dependencies of INSN. */
4618 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4619 sd_iterator_cond (&sd_it
, &dep
);)
4621 if ((DEP_STATUS (dep
) & BEGIN_SPEC
)
4623 /* We can delete this dep because we overcome it with
4624 BEGIN_SPECULATION. */
4625 sd_delete_dep (sd_it
);
4627 sd_iterator_next (&sd_it
);
4630 /* Future Speculations. Determine what BE_IN speculations will be like. */
4633 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
4636 gcc_assert (!DONE_SPEC (insn
));
4640 ds_t ts
= TODO_SPEC (insn
);
4642 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
4643 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
4645 /* Luckiness of future speculations solely depends upon initial
4646 BEGIN speculation. */
4647 if (ts
& BEGIN_DATA
)
4648 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
4649 if (ts
& BEGIN_CONTROL
)
4650 fs
= set_dep_weak (fs
, BE_IN_CONTROL
,
4651 get_dep_weak (ts
, BEGIN_CONTROL
));
4654 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
4656 /* Future speculations: call the helper. */
4657 process_insn_forw_deps_be_in_spec (insn
, twin
, fs
);
4659 if (rec
!= EXIT_BLOCK_PTR
)
4661 /* Which types of dependencies should we use here is,
4662 generally, machine-dependent question... But, for now,
4667 init_dep (new_dep
, insn
, check
, REG_DEP_TRUE
);
4668 sd_add_dep (new_dep
, false);
4670 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
4671 sd_add_dep (new_dep
, false);
4675 if (spec_info
->dump
)
4676 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
4677 (*current_sched_info
->print_insn
) (insn
, 0));
4679 /* Remove all dependencies of the INSN. */
4681 sd_it
= sd_iterator_start (insn
, (SD_LIST_FORW
4683 | SD_LIST_RES_BACK
));
4684 while (sd_iterator_cond (&sd_it
, &dep
))
4685 sd_delete_dep (sd_it
);
4688 /* If former check (INSN) already was moved to the ready (or queue)
4689 list, add new check (CHECK) there too. */
4690 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
4693 /* Remove old check from instruction stream and free its
4695 sched_remove_insn (insn
);
4698 init_dep (new_dep
, check
, twin
, REG_DEP_ANTI
);
4699 sd_add_dep (new_dep
, false);
4703 init_dep_1 (new_dep
, insn
, check
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
4704 sd_add_dep (new_dep
, false);
4708 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
4709 because it'll be done later in add_to_speculative_block. */
4711 rtx_vec_t priorities_roots
= NULL
;
4713 clear_priorities (twin
, &priorities_roots
);
4714 calc_priorities (priorities_roots
);
4715 VEC_free (rtx
, heap
, priorities_roots
);
4719 /* Removes dependency between instructions in the recovery block REC
4720 and usual region instructions. It keeps inner dependences so it
4721 won't be necessary to recompute them. */
4723 fix_recovery_deps (basic_block rec
)
4725 rtx note
, insn
, jump
, ready_list
= 0;
4726 bitmap_head in_ready
;
4729 bitmap_initialize (&in_ready
, 0);
4731 /* NOTE - a basic block note. */
4732 note
= NEXT_INSN (BB_HEAD (rec
));
4733 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4734 insn
= BB_END (rec
);
4735 gcc_assert (JUMP_P (insn
));
4736 insn
= PREV_INSN (insn
);
4740 sd_iterator_def sd_it
;
4743 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4744 sd_iterator_cond (&sd_it
, &dep
);)
4746 rtx consumer
= DEP_CON (dep
);
4748 if (BLOCK_FOR_INSN (consumer
) != rec
)
4750 sd_delete_dep (sd_it
);
4752 if (bitmap_set_bit (&in_ready
, INSN_LUID (consumer
)))
4753 ready_list
= alloc_INSN_LIST (consumer
, ready_list
);
4757 gcc_assert ((DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
4759 sd_iterator_next (&sd_it
);
4763 insn
= PREV_INSN (insn
);
4765 while (insn
!= note
);
4767 bitmap_clear (&in_ready
);
4769 /* Try to add instructions to the ready or queue list. */
4770 for (link
= ready_list
; link
; link
= XEXP (link
, 1))
4771 try_ready (XEXP (link
, 0));
4772 free_INSN_LIST_list (&ready_list
);
4774 /* Fixing jump's dependences. */
4775 insn
= BB_HEAD (rec
);
4776 jump
= BB_END (rec
);
4778 gcc_assert (LABEL_P (insn
));
4779 insn
= NEXT_INSN (insn
);
4781 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
4782 add_jump_dependencies (insn
, jump
);
4785 /* Change pattern of INSN to NEW_PAT. */
4787 sched_change_pattern (rtx insn
, rtx new_pat
)
4791 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
4793 dfa_clear_single_insn_cache (insn
);
4796 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
4797 instruction data. */
4799 haifa_change_pattern (rtx insn
, rtx new_pat
)
4801 sched_change_pattern (insn
, new_pat
);
4803 /* Invalidate INSN_COST, so it'll be recalculated. */
4804 INSN_COST (insn
) = -1;
4805 /* Invalidate INSN_TICK, so it'll be recalculated. */
4806 INSN_TICK (insn
) = INVALID_TICK
;
4809 /* -1 - can't speculate,
4810 0 - for speculation with REQUEST mode it is OK to use
4811 current instruction pattern,
4812 1 - need to change pattern for *NEW_PAT to be speculative. */
4814 sched_speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
4816 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
4817 && (request
& SPECULATIVE
)
4818 && sched_insn_is_legitimate_for_speculation_p (insn
, request
));
4820 if ((request
& spec_info
->mask
) != request
)
4823 if (request
& BE_IN_SPEC
4824 && !(request
& BEGIN_SPEC
))
4827 return targetm
.sched
.speculate_insn (insn
, request
, new_pat
);
4831 haifa_speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
4833 gcc_assert (sched_deps_info
->generate_spec_deps
4834 && !IS_SPECULATION_CHECK_P (insn
));
4836 if (HAS_INTERNAL_DEP (insn
)
4837 || SCHED_GROUP_P (insn
))
4840 return sched_speculate_insn (insn
, request
, new_pat
);
4843 /* Print some information about block BB, which starts with HEAD and
4844 ends with TAIL, before scheduling it.
4845 I is zero, if scheduler is about to start with the fresh ebb. */
4847 dump_new_block_header (int i
, basic_block bb
, rtx head
, rtx tail
)
4850 fprintf (sched_dump
,
4851 ";; ======================================================\n");
4853 fprintf (sched_dump
,
4854 ";; =====================ADVANCING TO=====================\n");
4855 fprintf (sched_dump
,
4856 ";; -- basic block %d from %d to %d -- %s reload\n",
4857 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
4858 (reload_completed
? "after" : "before"));
4859 fprintf (sched_dump
,
4860 ";; ======================================================\n");
4861 fprintf (sched_dump
, "\n");
4864 /* Unlink basic block notes and labels and saves them, so they
4865 can be easily restored. We unlink basic block notes in EBB to
4866 provide back-compatibility with the previous code, as target backends
4867 assume, that there'll be only instructions between
4868 current_sched_info->{head and tail}. We restore these notes as soon
4870 FIRST (LAST) is the first (last) basic block in the ebb.
4871 NB: In usual case (FIRST == LAST) nothing is really done. */
4873 unlink_bb_notes (basic_block first
, basic_block last
)
4875 /* We DON'T unlink basic block notes of the first block in the ebb. */
4879 bb_header
= XNEWVEC (rtx
, last_basic_block
);
4881 /* Make a sentinel. */
4882 if (last
->next_bb
!= EXIT_BLOCK_PTR
)
4883 bb_header
[last
->next_bb
->index
] = 0;
4885 first
= first
->next_bb
;
4888 rtx prev
, label
, note
, next
;
4890 label
= BB_HEAD (last
);
4891 if (LABEL_P (label
))
4892 note
= NEXT_INSN (label
);
4895 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4897 prev
= PREV_INSN (label
);
4898 next
= NEXT_INSN (note
);
4899 gcc_assert (prev
&& next
);
4901 NEXT_INSN (prev
) = next
;
4902 PREV_INSN (next
) = prev
;
4904 bb_header
[last
->index
] = label
;
4909 last
= last
->prev_bb
;
4914 /* Restore basic block notes.
4915 FIRST is the first basic block in the ebb. */
4917 restore_bb_notes (basic_block first
)
4922 /* We DON'T unlink basic block notes of the first block in the ebb. */
4923 first
= first
->next_bb
;
4924 /* Remember: FIRST is actually a second basic block in the ebb. */
4926 while (first
!= EXIT_BLOCK_PTR
4927 && bb_header
[first
->index
])
4929 rtx prev
, label
, note
, next
;
4931 label
= bb_header
[first
->index
];
4932 prev
= PREV_INSN (label
);
4933 next
= NEXT_INSN (prev
);
4935 if (LABEL_P (label
))
4936 note
= NEXT_INSN (label
);
4939 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4941 bb_header
[first
->index
] = 0;
4943 NEXT_INSN (prev
) = label
;
4944 NEXT_INSN (note
) = next
;
4945 PREV_INSN (next
) = note
;
4947 first
= first
->next_bb
;
4955 Fix CFG after both in- and inter-block movement of
4956 control_flow_insn_p JUMP. */
4958 fix_jump_move (rtx jump
)
4960 basic_block bb
, jump_bb
, jump_bb_next
;
4962 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4963 jump_bb
= BLOCK_FOR_INSN (jump
);
4964 jump_bb_next
= jump_bb
->next_bb
;
4966 gcc_assert (common_sched_info
->sched_pass_id
== SCHED_EBB_PASS
4967 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
4969 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
4970 /* if jump_bb_next is not empty. */
4971 BB_END (jump_bb
) = BB_END (jump_bb_next
);
4973 if (BB_END (bb
) != PREV_INSN (jump
))
4974 /* Then there are instruction after jump that should be placed
4976 BB_END (jump_bb_next
) = BB_END (bb
);
4978 /* Otherwise jump_bb_next is empty. */
4979 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
4981 /* To make assertion in move_insn happy. */
4982 BB_END (bb
) = PREV_INSN (jump
);
4984 update_bb_for_insn (jump_bb_next
);
4987 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4989 move_block_after_check (rtx jump
)
4991 basic_block bb
, jump_bb
, jump_bb_next
;
4994 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4995 jump_bb
= BLOCK_FOR_INSN (jump
);
4996 jump_bb_next
= jump_bb
->next_bb
;
4998 update_bb_for_insn (jump_bb
);
5000 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
5001 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
5003 unlink_block (jump_bb_next
);
5004 link_block (jump_bb_next
, bb
);
5008 move_succs (&(jump_bb
->succs
), bb
);
5009 move_succs (&(jump_bb_next
->succs
), jump_bb
);
5010 move_succs (&t
, jump_bb_next
);
5012 df_mark_solutions_dirty ();
5014 common_sched_info
->fix_recovery_cfg
5015 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
5018 /* Helper function for move_block_after_check.
5019 This functions attaches edge vector pointed to by SUCCSP to
5022 move_succs (VEC(edge
,gc
) **succsp
, basic_block to
)
5027 gcc_assert (to
->succs
== 0);
5029 to
->succs
= *succsp
;
5031 FOR_EACH_EDGE (e
, ei
, to
->succs
)
5037 /* Remove INSN from the instruction stream.
5038 INSN should have any dependencies. */
5040 sched_remove_insn (rtx insn
)
5042 sd_finish_insn (insn
);
5044 change_queue_index (insn
, QUEUE_NOWHERE
);
5045 current_sched_info
->add_remove_insn (insn
, 1);
5049 /* Clear priorities of all instructions, that are forward dependent on INSN.
5050 Store in vector pointed to by ROOTS_PTR insns on which priority () should
5051 be invoked to initialize all cleared priorities. */
5053 clear_priorities (rtx insn
, rtx_vec_t
*roots_ptr
)
5055 sd_iterator_def sd_it
;
5057 bool insn_is_root_p
= true;
5059 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
5061 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
5063 rtx pro
= DEP_PRO (dep
);
5065 if (INSN_PRIORITY_STATUS (pro
) >= 0
5066 && QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
)
5068 /* If DEP doesn't contribute to priority then INSN itself should
5069 be added to priority roots. */
5070 if (contributes_to_priority_p (dep
))
5071 insn_is_root_p
= false;
5073 INSN_PRIORITY_STATUS (pro
) = -1;
5074 clear_priorities (pro
, roots_ptr
);
5079 VEC_safe_push (rtx
, heap
, *roots_ptr
, insn
);
5082 /* Recompute priorities of instructions, whose priorities might have been
5083 changed. ROOTS is a vector of instructions whose priority computation will
5084 trigger initialization of all cleared priorities. */
5086 calc_priorities (rtx_vec_t roots
)
5091 FOR_EACH_VEC_ELT (rtx
, roots
, i
, insn
)
5096 /* Add dependences between JUMP and other instructions in the recovery
5097 block. INSN is the first insn the recovery block. */
5099 add_jump_dependencies (rtx insn
, rtx jump
)
5103 insn
= NEXT_INSN (insn
);
5107 if (dep_list_size (insn
) == 0)
5109 dep_def _new_dep
, *new_dep
= &_new_dep
;
5111 init_dep (new_dep
, insn
, jump
, REG_DEP_ANTI
);
5112 sd_add_dep (new_dep
, false);
5117 gcc_assert (!sd_lists_empty_p (jump
, SD_LIST_BACK
));
5120 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
5122 bb_note (basic_block bb
)
5126 note
= BB_HEAD (bb
);
5128 note
= NEXT_INSN (note
);
5130 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5134 #ifdef ENABLE_CHECKING
5135 /* Helper function for check_cfg.
5136 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
5139 has_edge_p (VEC(edge
,gc
) *el
, int type
)
5144 FOR_EACH_EDGE (e
, ei
, el
)
5145 if (e
->flags
& type
)
5150 /* Search back, starting at INSN, for an insn that is not a
5151 NOTE_INSN_VAR_LOCATION. Don't search beyond HEAD, and return it if
5152 no such insn can be found. */
5154 prev_non_location_insn (rtx insn
, rtx head
)
5156 while (insn
!= head
&& NOTE_P (insn
)
5157 && NOTE_KIND (insn
) == NOTE_INSN_VAR_LOCATION
)
5158 insn
= PREV_INSN (insn
);
5163 /* Check few properties of CFG between HEAD and TAIL.
5164 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
5165 instruction stream. */
5167 check_cfg (rtx head
, rtx tail
)
5171 int not_first
= 0, not_last
;
5174 head
= get_insns ();
5176 tail
= get_last_insn ();
5177 next_tail
= NEXT_INSN (tail
);
5181 not_last
= head
!= tail
;
5184 gcc_assert (NEXT_INSN (PREV_INSN (head
)) == head
);
5186 gcc_assert (PREV_INSN (NEXT_INSN (head
)) == head
);
5189 || (NOTE_INSN_BASIC_BLOCK_P (head
)
5191 || (not_first
&& !LABEL_P (PREV_INSN (head
))))))
5193 gcc_assert (bb
== 0);
5194 bb
= BLOCK_FOR_INSN (head
);
5196 gcc_assert (BB_HEAD (bb
) == head
);
5198 /* This is the case of jump table. See inside_basic_block_p (). */
5199 gcc_assert (LABEL_P (head
) && !inside_basic_block_p (head
));
5204 gcc_assert (!inside_basic_block_p (head
));
5205 head
= NEXT_INSN (head
);
5209 gcc_assert (inside_basic_block_p (head
)
5211 gcc_assert (BLOCK_FOR_INSN (head
) == bb
);
5215 head
= NEXT_INSN (head
);
5216 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head
));
5220 if (control_flow_insn_p (head
))
5222 gcc_assert (prev_non_location_insn (BB_END (bb
), head
)
5225 if (any_uncondjump_p (head
))
5226 gcc_assert (EDGE_COUNT (bb
->succs
) == 1
5227 && BARRIER_P (NEXT_INSN (head
)));
5228 else if (any_condjump_p (head
))
5229 gcc_assert (/* Usual case. */
5230 (EDGE_COUNT (bb
->succs
) > 1
5231 && !BARRIER_P (NEXT_INSN (head
)))
5232 /* Or jump to the next instruction. */
5233 || (EDGE_COUNT (bb
->succs
) == 1
5234 && (BB_HEAD (EDGE_I (bb
->succs
, 0)->dest
)
5235 == JUMP_LABEL (head
))));
5237 if (BB_END (bb
) == head
)
5239 if (EDGE_COUNT (bb
->succs
) > 1)
5240 gcc_assert (control_flow_insn_p (prev_non_location_insn
5241 (head
, BB_HEAD (bb
)))
5242 || has_edge_p (bb
->succs
, EDGE_COMPLEX
));
5246 head
= NEXT_INSN (head
);
5252 while (head
!= next_tail
);
5254 gcc_assert (bb
== 0);
5257 #endif /* ENABLE_CHECKING */
5259 /* Extend per basic block data structures. */
5263 if (sched_scan_info
->extend_bb
)
5264 sched_scan_info
->extend_bb ();
5267 /* Init data for BB. */
5269 init_bb (basic_block bb
)
5271 if (sched_scan_info
->init_bb
)
5272 sched_scan_info
->init_bb (bb
);
5275 /* Extend per insn data structures. */
5279 if (sched_scan_info
->extend_insn
)
5280 sched_scan_info
->extend_insn ();
5283 /* Init data structures for INSN. */
5285 init_insn (rtx insn
)
5287 if (sched_scan_info
->init_insn
)
5288 sched_scan_info
->init_insn (insn
);
5291 /* Init all insns in BB. */
5293 init_insns_in_bb (basic_block bb
)
5297 FOR_BB_INSNS (bb
, insn
)
5301 /* A driver function to add a set of basic blocks (BBS),
5302 a single basic block (BB), a set of insns (INSNS) or a single insn (INSN)
5303 to the scheduling region. */
5305 sched_scan (const struct sched_scan_info_def
*ssi
,
5306 bb_vec_t bbs
, basic_block bb
, insn_vec_t insns
, rtx insn
)
5308 sched_scan_info
= ssi
;
5310 if (bbs
!= NULL
|| bb
!= NULL
)
5319 FOR_EACH_VEC_ELT (basic_block
, bbs
, i
, x
)
5334 FOR_EACH_VEC_ELT (basic_block
, bbs
, i
, x
)
5335 init_insns_in_bb (x
);
5339 init_insns_in_bb (bb
);
5346 FOR_EACH_VEC_ELT (rtx
, insns
, i
, x
)
5355 /* Extend data structures for logical insn UID. */
5357 luids_extend_insn (void)
5359 int new_luids_max_uid
= get_max_uid () + 1;
5361 VEC_safe_grow_cleared (int, heap
, sched_luids
, new_luids_max_uid
);
5364 /* Initialize LUID for INSN. */
5366 luids_init_insn (rtx insn
)
5368 int i
= INSN_P (insn
) ? 1 : common_sched_info
->luid_for_non_insn (insn
);
5373 luid
= sched_max_luid
;
5374 sched_max_luid
+= i
;
5379 SET_INSN_LUID (insn
, luid
);
5382 /* Initialize luids for BBS, BB, INSNS and INSN.
5383 The hook common_sched_info->luid_for_non_insn () is used to determine
5384 if notes, labels, etc. need luids. */
5386 sched_init_luids (bb_vec_t bbs
, basic_block bb
, insn_vec_t insns
, rtx insn
)
5388 const struct sched_scan_info_def ssi
=
5390 NULL
, /* extend_bb */
5392 luids_extend_insn
, /* extend_insn */
5393 luids_init_insn
/* init_insn */
5396 sched_scan (&ssi
, bbs
, bb
, insns
, insn
);
5401 sched_finish_luids (void)
5403 VEC_free (int, heap
, sched_luids
);
5407 /* Return logical uid of INSN. Helpful while debugging. */
5409 insn_luid (rtx insn
)
5411 return INSN_LUID (insn
);
5414 /* Extend per insn data in the target. */
5416 sched_extend_target (void)
5418 if (targetm
.sched
.h_i_d_extended
)
5419 targetm
.sched
.h_i_d_extended ();
5422 /* Extend global scheduler structures (those, that live across calls to
5423 schedule_block) to include information about just emitted INSN. */
5427 int reserve
= (get_max_uid () + 1
5428 - VEC_length (haifa_insn_data_def
, h_i_d
));
5430 && ! VEC_space (haifa_insn_data_def
, h_i_d
, reserve
))
5432 VEC_safe_grow_cleared (haifa_insn_data_def
, heap
, h_i_d
,
5433 3 * get_max_uid () / 2);
5434 sched_extend_target ();
5438 /* Initialize h_i_d entry of the INSN with default values.
5439 Values, that are not explicitly initialized here, hold zero. */
5441 init_h_i_d (rtx insn
)
5443 if (INSN_LUID (insn
) > 0)
5445 INSN_COST (insn
) = -1;
5446 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
5447 INSN_TICK (insn
) = INVALID_TICK
;
5448 INTER_TICK (insn
) = INVALID_TICK
;
5449 TODO_SPEC (insn
) = HARD_DEP
;
5453 /* Initialize haifa_insn_data for BBS, BB, INSNS and INSN. */
5455 haifa_init_h_i_d (bb_vec_t bbs
, basic_block bb
, insn_vec_t insns
, rtx insn
)
5457 const struct sched_scan_info_def ssi
=
5459 NULL
, /* extend_bb */
5461 extend_h_i_d
, /* extend_insn */
5462 init_h_i_d
/* init_insn */
5465 sched_scan (&ssi
, bbs
, bb
, insns
, insn
);
5468 /* Finalize haifa_insn_data. */
5470 haifa_finish_h_i_d (void)
5473 haifa_insn_data_t data
;
5474 struct reg_use_data
*use
, *next
;
5476 FOR_EACH_VEC_ELT (haifa_insn_data_def
, h_i_d
, i
, data
)
5478 if (data
->reg_pressure
!= NULL
)
5479 free (data
->reg_pressure
);
5480 for (use
= data
->reg_use_list
; use
!= NULL
; use
= next
)
5482 next
= use
->next_insn_use
;
5486 VEC_free (haifa_insn_data_def
, heap
, h_i_d
);
5489 /* Init data for the new insn INSN. */
5491 haifa_init_insn (rtx insn
)
5493 gcc_assert (insn
!= NULL
);
5495 sched_init_luids (NULL
, NULL
, NULL
, insn
);
5496 sched_extend_target ();
5497 sched_deps_init (false);
5498 haifa_init_h_i_d (NULL
, NULL
, NULL
, insn
);
5500 if (adding_bb_to_current_region_p
)
5502 sd_init_insn (insn
);
5504 /* Extend dependency caches by one element. */
5505 extend_dependency_caches (1, false);
5509 /* Init data for the new basic block BB which comes after AFTER. */
5511 haifa_init_only_bb (basic_block bb
, basic_block after
)
5513 gcc_assert (bb
!= NULL
);
5517 if (common_sched_info
->add_block
)
5518 /* This changes only data structures of the front-end. */
5519 common_sched_info
->add_block (bb
, after
);
5522 /* A generic version of sched_split_block (). */
5524 sched_split_block_1 (basic_block first_bb
, rtx after
)
5528 e
= split_block (first_bb
, after
);
5529 gcc_assert (e
->src
== first_bb
);
5531 /* sched_split_block emits note if *check == BB_END. Probably it
5532 is better to rip that note off. */
5537 /* A generic version of sched_create_empty_bb (). */
5539 sched_create_empty_bb_1 (basic_block after
)
5541 return create_empty_bb (after
);
5544 /* Insert PAT as an INSN into the schedule and update the necessary data
5545 structures to account for it. */
5547 sched_emit_insn (rtx pat
)
5549 rtx insn
= emit_insn_after (pat
, last_scheduled_insn
);
5550 last_scheduled_insn
= insn
;
5551 haifa_init_insn (insn
);
5555 /* This function returns a candidate satisfying dispatch constraints from
5559 ready_remove_first_dispatch (struct ready_list
*ready
)
5562 rtx insn
= ready_element (ready
, 0);
5564 if (ready
->n_ready
== 1
5565 || INSN_CODE (insn
) < 0
5567 || !active_insn_p (insn
)
5568 || targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
5569 return ready_remove_first (ready
);
5571 for (i
= 1; i
< ready
->n_ready
; i
++)
5573 insn
= ready_element (ready
, i
);
5575 if (INSN_CODE (insn
) < 0
5577 || !active_insn_p (insn
))
5580 if (targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
5582 /* Return ith element of ready. */
5583 insn
= ready_remove (ready
, i
);
5588 if (targetm
.sched
.dispatch (NULL_RTX
, DISPATCH_VIOLATION
))
5589 return ready_remove_first (ready
);
5591 for (i
= 1; i
< ready
->n_ready
; i
++)
5593 insn
= ready_element (ready
, i
);
5595 if (INSN_CODE (insn
) < 0
5597 || !active_insn_p (insn
))
5600 /* Return i-th element of ready. */
5601 if (targetm
.sched
.dispatch (insn
, IS_CMP
))
5602 return ready_remove (ready
, i
);
5605 return ready_remove_first (ready
);
5608 /* Get number of ready insn in the ready list. */
5611 number_in_ready (void)
5613 return ready
.n_ready
;
5616 /* Get number of ready's in the ready list. */
5619 get_ready_element (int i
)
5621 return ready_element (&ready
, i
);
5624 #endif /* INSN_SCHEDULING */