1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2019 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
58 1. choose insn with the longest path to end of bb, ties
60 2. choose insn with least contribution to register pressure,
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
127 #include "coretypes.h"
131 #include "cfghooks.h"
133 #include "memmodel.h"
135 #include "insn-config.h"
139 #include "insn-attr.h"
141 #include "cfgbuild.h"
142 #include "sched-int.h"
143 #include "common/common-target.h"
147 #include "dumpfile.h"
148 #include "print-rtl.h"
149 #include "function-abi.h"
151 #ifdef INSN_SCHEDULING
153 /* True if we do register pressure relief through live-range
155 static bool live_range_shrinkage_p
;
157 /* Switch on live range shrinkage. */
159 initialize_live_range_shrinkage (void)
161 live_range_shrinkage_p
= true;
164 /* Switch off live range shrinkage. */
166 finish_live_range_shrinkage (void)
168 live_range_shrinkage_p
= false;
171 /* issue_rate is the number of insns that can be scheduled in the same
172 machine cycle. It can be defined in the config/mach/mach.h file,
173 otherwise we set it to 1. */
177 /* This can be set to true by a backend if the scheduler should not
178 enable a DCE pass. */
181 /* The current initiation interval used when modulo scheduling. */
182 static int modulo_ii
;
184 /* The maximum number of stages we are prepared to handle. */
185 static int modulo_max_stages
;
187 /* The number of insns that exist in each iteration of the loop. We use this
188 to detect when we've scheduled all insns from the first iteration. */
189 static int modulo_n_insns
;
191 /* The current count of insns in the first iteration of the loop that have
192 already been scheduled. */
193 static int modulo_insns_scheduled
;
195 /* The maximum uid of insns from the first iteration of the loop. */
196 static int modulo_iter0_max_uid
;
198 /* The number of times we should attempt to backtrack when modulo scheduling.
199 Decreased each time we have to backtrack. */
200 static int modulo_backtracks_left
;
202 /* The stage in which the last insn from the original loop was
204 static int modulo_last_stage
;
206 /* sched-verbose controls the amount of debugging output the
207 scheduler prints. It is controlled by -fsched-verbose=N:
208 N=0: no debugging output.
210 N=2: bb's probabilities, detailed ready list info, unit/insn info.
211 N=3: rtl at abort point, control-flow, regions info.
212 N=5: dependences info. */
213 int sched_verbose
= 0;
215 /* Debugging file. All printouts are sent to dump. */
216 FILE *sched_dump
= 0;
218 /* This is a placeholder for the scheduler parameters common
219 to all schedulers. */
220 struct common_sched_info_def
*common_sched_info
;
222 #define INSN_TICK(INSN) (HID (INSN)->tick)
223 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
224 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
225 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
226 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
227 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
228 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
229 /* Cached cost of the instruction. Use insn_sched_cost to get cost of the
230 insn. -1 here means that the field is not initialized. */
231 #define INSN_COST(INSN) (HID (INSN)->cost)
233 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
234 then it should be recalculated from scratch. */
235 #define INVALID_TICK (-(max_insn_queue_index + 1))
236 /* The minimal value of the INSN_TICK of an instruction. */
237 #define MIN_TICK (-max_insn_queue_index)
239 /* Original order of insns in the ready list.
240 Used to keep order of normal insns while separating DEBUG_INSNs. */
241 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
243 /* The deciding reason for INSN's place in the ready list. */
244 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
246 /* List of important notes we must keep around. This is a pointer to the
247 last element in the list. */
250 static struct spec_info_def spec_info_var
;
251 /* Description of the speculative part of the scheduling.
252 If NULL - no speculation. */
253 spec_info_t spec_info
= NULL
;
255 /* True, if recovery block was added during scheduling of current block.
256 Used to determine, if we need to fix INSN_TICKs. */
257 static bool haifa_recovery_bb_recently_added_p
;
259 /* True, if recovery block was added during this scheduling pass.
260 Used to determine if we should have empty memory pools of dependencies
261 after finishing current region. */
262 bool haifa_recovery_bb_ever_added_p
;
264 /* Counters of different types of speculative instructions. */
265 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
267 /* Array used in {unlink, restore}_bb_notes. */
268 static rtx_insn
**bb_header
= 0;
270 /* Basic block after which recovery blocks will be created. */
271 static basic_block before_recovery
;
273 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
275 basic_block after_recovery
;
277 /* FALSE if we add bb to another region, so we don't need to initialize it. */
278 bool adding_bb_to_current_region_p
= true;
282 /* An instruction is ready to be scheduled when all insns preceding it
283 have already been scheduled. It is important to ensure that all
284 insns which use its result will not be executed until its result
285 has been computed. An insn is maintained in one of four structures:
287 (P) the "Pending" set of insns which cannot be scheduled until
288 their dependencies have been satisfied.
289 (Q) the "Queued" set of insns that can be scheduled when sufficient
291 (R) the "Ready" list of unscheduled, uncommitted insns.
292 (S) the "Scheduled" list of insns.
294 Initially, all insns are either "Pending" or "Ready" depending on
295 whether their dependencies are satisfied.
297 Insns move from the "Ready" list to the "Scheduled" list as they
298 are committed to the schedule. As this occurs, the insns in the
299 "Pending" list have their dependencies satisfied and move to either
300 the "Ready" list or the "Queued" set depending on whether
301 sufficient time has passed to make them ready. As time passes,
302 insns move from the "Queued" set to the "Ready" list.
304 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
305 unscheduled insns, i.e., those that are ready, queued, and pending.
306 The "Queued" set (Q) is implemented by the variable `insn_queue'.
307 The "Ready" list (R) is implemented by the variables `ready' and
309 The "Scheduled" list (S) is the new insn chain built by this pass.
311 The transition (R->S) is implemented in the scheduling loop in
312 `schedule_block' when the best insn to schedule is chosen.
313 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
314 insns move from the ready list to the scheduled list.
315 The transition (Q->R) is implemented in 'queue_to_insn' as time
316 passes or stalls are introduced. */
318 /* Implement a circular buffer to delay instructions until sufficient
319 time has passed. For the new pipeline description interface,
320 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
321 than maximal time of instruction execution computed by genattr.c on
322 the base maximal time of functional unit reservations and getting a
323 result. This is the longest time an insn may be queued. */
325 static rtx_insn_list
**insn_queue
;
326 static int q_ptr
= 0;
327 static int q_size
= 0;
328 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
329 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
331 #define QUEUE_SCHEDULED (-3)
332 #define QUEUE_NOWHERE (-2)
333 #define QUEUE_READY (-1)
334 /* QUEUE_SCHEDULED - INSN is scheduled.
335 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
337 QUEUE_READY - INSN is in ready list.
338 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
340 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
342 /* The following variable value refers for all current and future
343 reservations of the processor units. */
346 /* The following variable value is size of memory representing all
347 current and future reservations of the processor units. */
348 size_t dfa_state_size
;
350 /* The following array is used to find the best insn from ready when
351 the automaton pipeline interface is used. */
352 signed char *ready_try
= NULL
;
354 /* The ready list. */
355 struct ready_list ready
= {NULL
, 0, 0, 0, 0};
357 /* The pointer to the ready list (to be removed). */
358 static struct ready_list
*readyp
= &ready
;
360 /* Scheduling clock. */
361 static int clock_var
;
363 /* Clock at which the previous instruction was issued. */
364 static int last_clock_var
;
366 /* Set to true if, when queuing a shadow insn, we discover that it would be
367 scheduled too late. */
368 static bool must_backtrack
;
370 /* The following variable value is number of essential insns issued on
371 the current cycle. An insn is essential one if it changes the
373 int cycle_issued_insns
;
375 /* This records the actual schedule. It is built up during the main phase
376 of schedule_block, and afterwards used to reorder the insns in the RTL. */
377 static vec
<rtx_insn
*> scheduled_insns
;
379 static int may_trap_exp (const_rtx
, int);
381 /* Nonzero iff the address is comprised from at most 1 register. */
382 #define CONST_BASED_ADDRESS_P(x) \
384 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
385 || (GET_CODE (x) == LO_SUM)) \
386 && (CONSTANT_P (XEXP (x, 0)) \
387 || CONSTANT_P (XEXP (x, 1)))))
389 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
390 as found by analyzing insn's expression. */
393 static int haifa_luid_for_non_insn (rtx x
);
395 /* Haifa version of sched_info hooks common to all headers. */
396 const struct common_sched_info_def haifa_common_sched_info
=
398 NULL
, /* fix_recovery_cfg */
399 NULL
, /* add_block */
400 NULL
, /* estimate_number_of_insns */
401 haifa_luid_for_non_insn
, /* luid_for_non_insn */
402 SCHED_PASS_UNKNOWN
/* sched_pass_id */
405 /* Mapping from instruction UID to its Logical UID. */
406 vec
<int> sched_luids
;
408 /* Next LUID to assign to an instruction. */
409 int sched_max_luid
= 1;
411 /* Haifa Instruction Data. */
412 vec
<haifa_insn_data_def
> h_i_d
;
414 void (* sched_init_only_bb
) (basic_block
, basic_block
);
416 /* Split block function. Different schedulers might use different functions
417 to handle their internal data consistent. */
418 basic_block (* sched_split_block
) (basic_block
, rtx
);
420 /* Create empty basic block after the specified block. */
421 basic_block (* sched_create_empty_bb
) (basic_block
);
423 /* Return the number of cycles until INSN is expected to be ready.
424 Return zero if it already is. */
426 insn_delay (rtx_insn
*insn
)
428 return MAX (INSN_TICK (insn
) - clock_var
, 0);
432 may_trap_exp (const_rtx x
, int is_store
)
441 if (code
== MEM
&& may_trap_p (x
))
448 /* The insn uses memory: a volatile load. */
449 if (MEM_VOLATILE_P (x
))
451 /* An exception-free load. */
454 /* A load with 1 base register, to be further checked. */
455 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
456 return PFREE_CANDIDATE
;
457 /* No info on the load, to be further checked. */
458 return PRISKY_CANDIDATE
;
463 int i
, insn_class
= TRAP_FREE
;
465 /* Neither store nor load, check if it may cause a trap. */
468 /* Recursive step: walk the insn... */
469 fmt
= GET_RTX_FORMAT (code
);
470 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
474 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
475 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
477 else if (fmt
[i
] == 'E')
480 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
482 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
483 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
484 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
488 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
495 /* Classifies rtx X of an insn for the purpose of verifying that X can be
496 executed speculatively (and consequently the insn can be moved
497 speculatively), by examining X, returning:
498 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
499 TRAP_FREE: non-load insn.
500 IFREE: load from a globally safe location.
501 IRISKY: volatile load.
502 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
503 being either PFREE or PRISKY. */
506 haifa_classify_rtx (const_rtx x
)
508 int tmp_class
= TRAP_FREE
;
509 int insn_class
= TRAP_FREE
;
512 if (GET_CODE (x
) == PARALLEL
)
514 int i
, len
= XVECLEN (x
, 0);
516 for (i
= len
- 1; i
>= 0; i
--)
518 tmp_class
= haifa_classify_rtx (XVECEXP (x
, 0, i
));
519 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
520 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
530 /* Test if it is a 'store'. */
531 tmp_class
= may_trap_exp (XEXP (x
, 0), 1);
534 /* Test if it is a store. */
535 tmp_class
= may_trap_exp (SET_DEST (x
), 1);
536 if (tmp_class
== TRAP_RISKY
)
538 /* Test if it is a load. */
540 WORST_CLASS (tmp_class
,
541 may_trap_exp (SET_SRC (x
), 0));
544 tmp_class
= haifa_classify_rtx (COND_EXEC_CODE (x
));
545 if (tmp_class
== TRAP_RISKY
)
547 tmp_class
= WORST_CLASS (tmp_class
,
548 may_trap_exp (COND_EXEC_TEST (x
), 0));
551 tmp_class
= TRAP_RISKY
;
555 insn_class
= tmp_class
;
562 haifa_classify_insn (const_rtx insn
)
564 return haifa_classify_rtx (PATTERN (insn
));
567 /* After the scheduler initialization function has been called, this function
568 can be called to enable modulo scheduling. II is the initiation interval
569 we should use, it affects the delays for delay_pairs that were recorded as
570 separated by a given number of stages.
572 MAX_STAGES provides us with a limit
573 after which we give up scheduling; the caller must have unrolled at least
574 as many copies of the loop body and recorded delay_pairs for them.
576 INSNS is the number of real (non-debug) insns in one iteration of
577 the loop. MAX_UID can be used to test whether an insn belongs to
578 the first iteration of the loop; all of them have a uid lower than
581 set_modulo_params (int ii
, int max_stages
, int insns
, int max_uid
)
584 modulo_max_stages
= max_stages
;
585 modulo_n_insns
= insns
;
586 modulo_iter0_max_uid
= max_uid
;
587 modulo_backtracks_left
= param_max_modulo_backtrack_attempts
;
590 /* A structure to record a pair of insns where the first one is a real
591 insn that has delay slots, and the second is its delayed shadow.
592 I1 is scheduled normally and will emit an assembly instruction,
593 while I2 describes the side effect that takes place at the
594 transition between cycles CYCLES and (CYCLES + 1) after I1. */
597 struct delay_pair
*next_same_i1
;
600 /* When doing modulo scheduling, we a delay_pair can also be used to
601 show that I1 and I2 are the same insn in a different stage. If that
602 is the case, STAGES will be nonzero. */
606 /* Helpers for delay hashing. */
608 struct delay_i1_hasher
: nofree_ptr_hash
<delay_pair
>
610 typedef void *compare_type
;
611 static inline hashval_t
hash (const delay_pair
*);
612 static inline bool equal (const delay_pair
*, const void *);
615 /* Returns a hash value for X, based on hashing just I1. */
618 delay_i1_hasher::hash (const delay_pair
*x
)
620 return htab_hash_pointer (x
->i1
);
623 /* Return true if I1 of pair X is the same as that of pair Y. */
626 delay_i1_hasher::equal (const delay_pair
*x
, const void *y
)
631 struct delay_i2_hasher
: free_ptr_hash
<delay_pair
>
633 typedef void *compare_type
;
634 static inline hashval_t
hash (const delay_pair
*);
635 static inline bool equal (const delay_pair
*, const void *);
638 /* Returns a hash value for X, based on hashing just I2. */
641 delay_i2_hasher::hash (const delay_pair
*x
)
643 return htab_hash_pointer (x
->i2
);
646 /* Return true if I2 of pair X is the same as that of pair Y. */
649 delay_i2_hasher::equal (const delay_pair
*x
, const void *y
)
654 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
656 static hash_table
<delay_i1_hasher
> *delay_htab
;
657 static hash_table
<delay_i2_hasher
> *delay_htab_i2
;
659 /* Called through htab_traverse. Walk the hashtable using I2 as
660 index, and delete all elements involving an UID higher than
661 that pointed to by *DATA. */
663 haifa_htab_i2_traverse (delay_pair
**slot
, int *data
)
666 struct delay_pair
*p
= *slot
;
667 if (INSN_UID (p
->i2
) >= maxuid
|| INSN_UID (p
->i1
) >= maxuid
)
669 delay_htab_i2
->clear_slot (slot
);
674 /* Called through htab_traverse. Walk the hashtable using I2 as
675 index, and delete all elements involving an UID higher than
676 that pointed to by *DATA. */
678 haifa_htab_i1_traverse (delay_pair
**pslot
, int *data
)
681 struct delay_pair
*p
, *first
, **pprev
;
683 if (INSN_UID ((*pslot
)->i1
) >= maxuid
)
685 delay_htab
->clear_slot (pslot
);
689 for (p
= *pslot
; p
; p
= p
->next_same_i1
)
691 if (INSN_UID (p
->i2
) < maxuid
)
694 pprev
= &p
->next_same_i1
;
699 delay_htab
->clear_slot (pslot
);
705 /* Discard all delay pairs which involve an insn with an UID higher
708 discard_delay_pairs_above (int max_uid
)
710 delay_htab
->traverse
<int *, haifa_htab_i1_traverse
> (&max_uid
);
711 delay_htab_i2
->traverse
<int *, haifa_htab_i2_traverse
> (&max_uid
);
714 /* This function can be called by a port just before it starts the final
715 scheduling pass. It records the fact that an instruction with delay
716 slots has been split into two insns, I1 and I2. The first one will be
717 scheduled normally and initiates the operation. The second one is a
718 shadow which must follow a specific number of cycles after I1; its only
719 purpose is to show the side effect that occurs at that cycle in the RTL.
720 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
721 while I2 retains the original insn type.
723 There are two ways in which the number of cycles can be specified,
724 involving the CYCLES and STAGES arguments to this function. If STAGES
725 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
726 which is multiplied by MODULO_II to give the number of cycles. This is
727 only useful if the caller also calls set_modulo_params to enable modulo
731 record_delay_slot_pair (rtx_insn
*i1
, rtx_insn
*i2
, int cycles
, int stages
)
733 struct delay_pair
*p
= XNEW (struct delay_pair
);
734 struct delay_pair
**slot
;
743 delay_htab
= new hash_table
<delay_i1_hasher
> (10);
744 delay_htab_i2
= new hash_table
<delay_i2_hasher
> (10);
746 slot
= delay_htab
->find_slot_with_hash (i1
, htab_hash_pointer (i1
), INSERT
);
747 p
->next_same_i1
= *slot
;
749 slot
= delay_htab_i2
->find_slot (p
, INSERT
);
753 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
754 and return the other insn if so. Return NULL otherwise. */
756 real_insn_for_shadow (rtx_insn
*insn
)
758 struct delay_pair
*pair
;
763 pair
= delay_htab_i2
->find_with_hash (insn
, htab_hash_pointer (insn
));
764 if (!pair
|| pair
->stages
> 0)
769 /* For a pair P of insns, return the fixed distance in cycles from the first
770 insn after which the second must be scheduled. */
772 pair_delay (struct delay_pair
*p
)
777 return p
->stages
* modulo_ii
;
780 /* Given an insn INSN, add a dependence on its delayed shadow if it
781 has one. Also try to find situations where shadows depend on each other
782 and add dependencies to the real insns to limit the amount of backtracking
785 add_delay_dependencies (rtx_insn
*insn
)
787 struct delay_pair
*pair
;
788 sd_iterator_def sd_it
;
794 pair
= delay_htab_i2
->find_with_hash (insn
, htab_hash_pointer (insn
));
797 add_dependence (insn
, pair
->i1
, REG_DEP_ANTI
);
801 FOR_EACH_DEP (pair
->i2
, SD_LIST_BACK
, sd_it
, dep
)
803 rtx_insn
*pro
= DEP_PRO (dep
);
804 struct delay_pair
*other_pair
805 = delay_htab_i2
->find_with_hash (pro
, htab_hash_pointer (pro
));
806 if (!other_pair
|| other_pair
->stages
)
808 if (pair_delay (other_pair
) >= pair_delay (pair
))
810 if (sched_verbose
>= 4)
812 fprintf (sched_dump
, ";;\tadding dependence %d <- %d\n",
813 INSN_UID (other_pair
->i1
),
814 INSN_UID (pair
->i1
));
815 fprintf (sched_dump
, ";;\tpair1 %d <- %d, cost %d\n",
819 fprintf (sched_dump
, ";;\tpair2 %d <- %d, cost %d\n",
820 INSN_UID (other_pair
->i1
),
821 INSN_UID (other_pair
->i2
),
822 pair_delay (other_pair
));
824 add_dependence (pair
->i1
, other_pair
->i1
, REG_DEP_ANTI
);
829 /* Forward declarations. */
831 static int priority (rtx_insn
*, bool force_recompute
= false);
832 static int autopref_rank_for_schedule (const rtx_insn
*, const rtx_insn
*);
833 static int rank_for_schedule (const void *, const void *);
834 static void swap_sort (rtx_insn
**, int);
835 static void queue_insn (rtx_insn
*, int, const char *);
836 static int schedule_insn (rtx_insn
*);
837 static void adjust_priority (rtx_insn
*);
838 static void advance_one_cycle (void);
839 static void extend_h_i_d (void);
842 /* Notes handling mechanism:
843 =========================
844 Generally, NOTES are saved before scheduling and restored after scheduling.
845 The scheduler distinguishes between two types of notes:
847 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
848 Before scheduling a region, a pointer to the note is added to the insn
849 that follows or precedes it. (This happens as part of the data dependence
850 computation). After scheduling an insn, the pointer contained in it is
851 used for regenerating the corresponding note (in reemit_notes).
853 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
854 these notes are put in a list (in rm_other_notes() and
855 unlink_other_notes ()). After scheduling the block, these notes are
856 inserted at the beginning of the block (in schedule_block()). */
858 static void ready_add (struct ready_list
*, rtx_insn
*, bool);
859 static rtx_insn
*ready_remove_first (struct ready_list
*);
860 static rtx_insn
*ready_remove_first_dispatch (struct ready_list
*ready
);
862 static void queue_to_ready (struct ready_list
*);
863 static int early_queue_to_ready (state_t
, struct ready_list
*);
865 /* The following functions are used to implement multi-pass scheduling
866 on the first cycle. */
867 static rtx_insn
*ready_remove (struct ready_list
*, int);
868 static void ready_remove_insn (rtx_insn
*);
870 static void fix_inter_tick (rtx_insn
*, rtx_insn
*);
871 static int fix_tick_ready (rtx_insn
*);
872 static void change_queue_index (rtx_insn
*, int);
874 /* The following functions are used to implement scheduling of data/control
875 speculative instructions. */
877 static void extend_h_i_d (void);
878 static void init_h_i_d (rtx_insn
*);
879 static int haifa_speculate_insn (rtx_insn
*, ds_t
, rtx
*);
880 static void generate_recovery_code (rtx_insn
*);
881 static void process_insn_forw_deps_be_in_spec (rtx_insn
*, rtx_insn
*, ds_t
);
882 static void begin_speculative_block (rtx_insn
*);
883 static void add_to_speculative_block (rtx_insn
*);
884 static void init_before_recovery (basic_block
*);
885 static void create_check_block_twin (rtx_insn
*, bool);
886 static void fix_recovery_deps (basic_block
);
887 static bool haifa_change_pattern (rtx_insn
*, rtx
);
888 static void dump_new_block_header (int, basic_block
, rtx_insn
*, rtx_insn
*);
889 static void restore_bb_notes (basic_block
);
890 static void fix_jump_move (rtx_insn
*);
891 static void move_block_after_check (rtx_insn
*);
892 static void move_succs (vec
<edge
, va_gc
> **, basic_block
);
893 static void sched_remove_insn (rtx_insn
*);
894 static void clear_priorities (rtx_insn
*, rtx_vec_t
*);
895 static void calc_priorities (rtx_vec_t
);
896 static void add_jump_dependencies (rtx_insn
*, rtx_insn
*);
898 #endif /* INSN_SCHEDULING */
900 /* Point to state used for the current scheduling pass. */
901 struct haifa_sched_info
*current_sched_info
;
903 #ifndef INSN_SCHEDULING
905 schedule_insns (void)
910 /* Do register pressure sensitive insn scheduling if the flag is set
912 enum sched_pressure_algorithm sched_pressure
;
914 /* Map regno -> its pressure class. The map defined only when
915 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
916 enum reg_class
*sched_regno_pressure_class
;
918 /* The current register pressure. Only elements corresponding pressure
919 classes are defined. */
920 static int curr_reg_pressure
[N_REG_CLASSES
];
922 /* Saved value of the previous array. */
923 static int saved_reg_pressure
[N_REG_CLASSES
];
925 /* Register living at given scheduling point. */
926 static bitmap curr_reg_live
;
928 /* Saved value of the previous array. */
929 static bitmap saved_reg_live
;
931 /* Registers mentioned in the current region. */
932 static bitmap region_ref_regs
;
934 /* Temporary bitmap used for SCHED_PRESSURE_MODEL. */
935 static bitmap tmp_bitmap
;
937 /* Effective number of available registers of a given class (see comment
938 in sched_pressure_start_bb). */
939 static int sched_class_regs_num
[N_REG_CLASSES
];
940 /* The number of registers that the function would need to save before it
941 uses them, and the number of fixed_regs. Helpers for calculating of
942 sched_class_regs_num. */
943 static int call_saved_regs_num
[N_REG_CLASSES
];
944 static int fixed_regs_num
[N_REG_CLASSES
];
946 /* Initiate register pressure relative info for scheduling the current
947 region. Currently it is only clearing register mentioned in the
950 sched_init_region_reg_pressure_info (void)
952 bitmap_clear (region_ref_regs
);
955 /* PRESSURE[CL] describes the pressure on register class CL. Update it
956 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
957 LIVE tracks the set of live registers; if it is null, assume that
958 every birth or death is genuine. */
960 mark_regno_birth_or_death (bitmap live
, int *pressure
, int regno
, bool birth_p
)
962 enum reg_class pressure_class
;
964 pressure_class
= sched_regno_pressure_class
[regno
];
965 if (regno
>= FIRST_PSEUDO_REGISTER
)
967 if (pressure_class
!= NO_REGS
)
971 if (!live
|| bitmap_set_bit (live
, regno
))
972 pressure
[pressure_class
]
973 += (ira_reg_class_max_nregs
974 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
978 if (!live
|| bitmap_clear_bit (live
, regno
))
979 pressure
[pressure_class
]
980 -= (ira_reg_class_max_nregs
981 [pressure_class
][PSEUDO_REGNO_MODE (regno
)]);
985 else if (pressure_class
!= NO_REGS
986 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs
, regno
))
990 if (!live
|| bitmap_set_bit (live
, regno
))
991 pressure
[pressure_class
]++;
995 if (!live
|| bitmap_clear_bit (live
, regno
))
996 pressure
[pressure_class
]--;
1001 /* Initiate current register pressure related info from living
1002 registers given by LIVE. */
1004 initiate_reg_pressure_info (bitmap live
)
1010 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1011 curr_reg_pressure
[ira_pressure_classes
[i
]] = 0;
1012 bitmap_clear (curr_reg_live
);
1013 EXECUTE_IF_SET_IN_BITMAP (live
, 0, j
, bi
)
1014 if (sched_pressure
== SCHED_PRESSURE_MODEL
1015 || current_nr_blocks
== 1
1016 || bitmap_bit_p (region_ref_regs
, j
))
1017 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
, j
, true);
1020 /* Mark registers in X as mentioned in the current region. */
1022 setup_ref_regs (rtx x
)
1025 const RTX_CODE code
= GET_CODE (x
);
1030 bitmap_set_range (region_ref_regs
, REGNO (x
), REG_NREGS (x
));
1033 fmt
= GET_RTX_FORMAT (code
);
1034 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1036 setup_ref_regs (XEXP (x
, i
));
1037 else if (fmt
[i
] == 'E')
1039 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1040 setup_ref_regs (XVECEXP (x
, i
, j
));
1044 /* Initiate current register pressure related info at the start of
1047 initiate_bb_reg_pressure_info (basic_block bb
)
1049 unsigned int i ATTRIBUTE_UNUSED
;
1052 if (current_nr_blocks
> 1)
1053 FOR_BB_INSNS (bb
, insn
)
1054 if (NONDEBUG_INSN_P (insn
))
1055 setup_ref_regs (PATTERN (insn
));
1056 initiate_reg_pressure_info (df_get_live_in (bb
));
1057 if (bb_has_eh_pred (bb
))
1060 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
1062 if (regno
== INVALID_REGNUM
)
1064 if (! bitmap_bit_p (df_get_live_in (bb
), regno
))
1065 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
1070 /* Save current register pressure related info. */
1072 save_reg_pressure (void)
1076 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1077 saved_reg_pressure
[ira_pressure_classes
[i
]]
1078 = curr_reg_pressure
[ira_pressure_classes
[i
]];
1079 bitmap_copy (saved_reg_live
, curr_reg_live
);
1082 /* Restore saved register pressure related info. */
1084 restore_reg_pressure (void)
1088 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1089 curr_reg_pressure
[ira_pressure_classes
[i
]]
1090 = saved_reg_pressure
[ira_pressure_classes
[i
]];
1091 bitmap_copy (curr_reg_live
, saved_reg_live
);
1094 /* Return TRUE if the register is dying after its USE. */
1096 dying_use_p (struct reg_use_data
*use
)
1098 struct reg_use_data
*next
;
1100 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
1101 if (NONDEBUG_INSN_P (next
->insn
)
1102 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
1107 /* Print info about the current register pressure and its excess for
1108 each pressure class. */
1110 print_curr_reg_pressure (void)
1115 fprintf (sched_dump
, ";;\t");
1116 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1118 cl
= ira_pressure_classes
[i
];
1119 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1120 fprintf (sched_dump
, " %s:%d(%d)", reg_class_names
[cl
],
1121 curr_reg_pressure
[cl
],
1122 curr_reg_pressure
[cl
] - sched_class_regs_num
[cl
]);
1124 fprintf (sched_dump
, "\n");
1127 /* Determine if INSN has a condition that is clobbered if a register
1128 in SET_REGS is modified. */
1130 cond_clobbered_p (rtx_insn
*insn
, HARD_REG_SET set_regs
)
1132 rtx pat
= PATTERN (insn
);
1133 gcc_assert (GET_CODE (pat
) == COND_EXEC
);
1134 if (TEST_HARD_REG_BIT (set_regs
, REGNO (XEXP (COND_EXEC_TEST (pat
), 0))))
1136 sd_iterator_def sd_it
;
1138 haifa_change_pattern (insn
, ORIG_PAT (insn
));
1139 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
1140 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1141 TODO_SPEC (insn
) = HARD_DEP
;
1142 if (sched_verbose
>= 2)
1143 fprintf (sched_dump
,
1144 ";;\t\tdequeue insn %s because of clobbered condition\n",
1145 (*current_sched_info
->print_insn
) (insn
, 0));
1152 /* This function should be called after modifying the pattern of INSN,
1153 to update scheduler data structures as needed. */
1155 update_insn_after_change (rtx_insn
*insn
)
1157 sd_iterator_def sd_it
;
1160 dfa_clear_single_insn_cache (insn
);
1162 sd_it
= sd_iterator_start (insn
,
1163 SD_LIST_FORW
| SD_LIST_BACK
| SD_LIST_RES_BACK
);
1164 while (sd_iterator_cond (&sd_it
, &dep
))
1166 DEP_COST (dep
) = UNKNOWN_DEP_COST
;
1167 sd_iterator_next (&sd_it
);
1170 /* Invalidate INSN_COST, so it'll be recalculated. */
1171 INSN_COST (insn
) = -1;
1172 /* Invalidate INSN_TICK, so it'll be recalculated. */
1173 INSN_TICK (insn
) = INVALID_TICK
;
1175 /* Invalidate autoprefetch data entry. */
1176 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[0].status
1177 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
1178 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[1].status
1179 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
1183 /* Two VECs, one to hold dependencies for which pattern replacements
1184 need to be applied or restored at the start of the next cycle, and
1185 another to hold an integer that is either one, to apply the
1186 corresponding replacement, or zero to restore it. */
1187 static vec
<dep_t
> next_cycle_replace_deps
;
1188 static vec
<int> next_cycle_apply
;
1190 static void apply_replacement (dep_t
, bool);
1191 static void restore_pattern (dep_t
, bool);
1193 /* Look at the remaining dependencies for insn NEXT, and compute and return
1194 the TODO_SPEC value we should use for it. This is called after one of
1195 NEXT's dependencies has been resolved.
1196 We also perform pattern replacements for predication, and for broken
1197 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1201 recompute_todo_spec (rtx_insn
*next
, bool for_backtrack
)
1204 sd_iterator_def sd_it
;
1205 dep_t dep
, modify_dep
= NULL
;
1209 bool first_p
= true;
1211 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
1212 /* NEXT has all its dependencies resolved. */
1215 if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
1218 /* If NEXT is intended to sit adjacent to this instruction, we don't
1219 want to try to break any dependencies. Treat it as a HARD_DEP. */
1220 if (SCHED_GROUP_P (next
))
1223 /* Now we've got NEXT with speculative deps only.
1224 1. Look at the deps to see what we have to do.
1225 2. Check if we can do 'todo'. */
1228 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1230 rtx_insn
*pro
= DEP_PRO (dep
);
1231 ds_t ds
= DEP_STATUS (dep
) & SPECULATIVE
;
1233 if (DEBUG_INSN_P (pro
) && !DEBUG_INSN_P (next
))
1246 new_ds
= ds_merge (new_ds
, ds
);
1248 else if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
1250 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1255 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1257 else if (DEP_REPLACE (dep
) != NULL
)
1259 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
)
1264 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
1268 if (n_replace
> 0 && n_control
== 0 && n_spec
== 0)
1270 if (!dbg_cnt (sched_breakdep
))
1272 FOR_EACH_DEP (next
, SD_LIST_BACK
, sd_it
, dep
)
1274 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
1277 if (desc
->insn
== next
&& !for_backtrack
)
1279 gcc_assert (n_replace
== 1);
1280 apply_replacement (dep
, true);
1282 DEP_STATUS (dep
) |= DEP_CANCELLED
;
1288 else if (n_control
== 1 && n_replace
== 0 && n_spec
== 0)
1290 rtx_insn
*pro
, *other
;
1292 rtx cond
= NULL_RTX
;
1294 rtx_insn
*prev
= NULL
;
1298 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0
1299 || (ORIG_PAT (next
) != NULL_RTX
1300 && PREDICATED_PAT (next
) == NULL_RTX
))
1303 pro
= DEP_PRO (modify_dep
);
1304 other
= real_insn_for_shadow (pro
);
1305 if (other
!= NULL_RTX
)
1308 cond
= sched_get_reverse_condition_uncached (pro
);
1309 regno
= REGNO (XEXP (cond
, 0));
1311 /* Find the last scheduled insn that modifies the condition register.
1312 We can stop looking once we find the insn we depend on through the
1313 REG_DEP_CONTROL; if the condition register isn't modified after it,
1314 we know that it still has the right value. */
1315 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
1316 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns
, i
, prev
)
1320 find_all_hard_reg_sets (prev
, &t
, true);
1321 if (TEST_HARD_REG_BIT (t
, regno
))
1326 if (ORIG_PAT (next
) == NULL_RTX
)
1328 ORIG_PAT (next
) = PATTERN (next
);
1330 new_pat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (next
));
1331 success
= haifa_change_pattern (next
, new_pat
);
1334 PREDICATED_PAT (next
) = new_pat
;
1336 else if (PATTERN (next
) != PREDICATED_PAT (next
))
1338 bool success
= haifa_change_pattern (next
,
1339 PREDICATED_PAT (next
));
1340 gcc_assert (success
);
1342 DEP_STATUS (modify_dep
) |= DEP_CANCELLED
;
1346 if (PREDICATED_PAT (next
) != NULL_RTX
)
1348 int tick
= INSN_TICK (next
);
1349 bool success
= haifa_change_pattern (next
,
1351 INSN_TICK (next
) = tick
;
1352 gcc_assert (success
);
1355 /* We can't handle the case where there are both speculative and control
1356 dependencies, so we return HARD_DEP in such a case. Also fail if
1357 we have speculative dependencies with not enough points, or more than
1358 one control dependency. */
1359 if ((n_spec
> 0 && (n_control
> 0 || n_replace
> 0))
1361 /* Too few points? */
1362 && ds_weak (new_ds
) < spec_info
->data_weakness_cutoff
)
1370 /* Pointer to the last instruction scheduled. */
1371 static rtx_insn
*last_scheduled_insn
;
1373 /* Pointer to the last nondebug instruction scheduled within the
1374 block, or the prev_head of the scheduling block. Used by
1375 rank_for_schedule, so that insns independent of the last scheduled
1376 insn will be preferred over dependent instructions. */
1377 static rtx_insn
*last_nondebug_scheduled_insn
;
1379 /* Pointer that iterates through the list of unscheduled insns if we
1380 have a dbg_cnt enabled. It always points at an insn prior to the
1381 first unscheduled one. */
1382 static rtx_insn
*nonscheduled_insns_begin
;
1384 /* Compute cost of executing INSN.
1385 This is the number of cycles between instruction issue and
1386 instruction results. */
1388 insn_sched_cost (rtx_insn
*insn
)
1397 if (recog_memoized (insn
) < 0)
1400 cost
= insn_default_latency (insn
);
1407 cost
= INSN_COST (insn
);
1411 /* A USE insn, or something else we don't need to
1412 understand. We can't pass these directly to
1413 result_ready_cost or insn_default_latency because it will
1414 trigger a fatal error for unrecognizable insns. */
1415 if (recog_memoized (insn
) < 0)
1417 INSN_COST (insn
) = 0;
1422 cost
= insn_default_latency (insn
);
1426 INSN_COST (insn
) = cost
;
1433 /* Compute cost of dependence LINK.
1434 This is the number of cycles between instruction issue and
1435 instruction results.
1436 ??? We also use this function to call recog_memoized on all insns. */
1438 dep_cost_1 (dep_t link
, dw_t dw
)
1440 rtx_insn
*insn
= DEP_PRO (link
);
1441 rtx_insn
*used
= DEP_CON (link
);
1444 if (DEP_COST (link
) != UNKNOWN_DEP_COST
)
1445 return DEP_COST (link
);
1449 struct delay_pair
*delay_entry
;
1451 = delay_htab_i2
->find_with_hash (used
, htab_hash_pointer (used
));
1454 if (delay_entry
->i1
== insn
)
1456 DEP_COST (link
) = pair_delay (delay_entry
);
1457 return DEP_COST (link
);
1462 /* A USE insn should never require the value used to be computed.
1463 This allows the computation of a function's result and parameter
1464 values to overlap the return and call. We don't care about the
1465 dependence cost when only decreasing register pressure. */
1466 if (recog_memoized (used
) < 0)
1469 recog_memoized (insn
);
1473 enum reg_note dep_type
= DEP_TYPE (link
);
1475 cost
= insn_sched_cost (insn
);
1477 if (INSN_CODE (insn
) >= 0)
1479 if (dep_type
== REG_DEP_ANTI
)
1481 else if (dep_type
== REG_DEP_OUTPUT
)
1483 cost
= (insn_default_latency (insn
)
1484 - insn_default_latency (used
));
1488 else if (bypass_p (insn
))
1489 cost
= insn_latency (insn
, used
);
1493 if (targetm
.sched
.adjust_cost
)
1494 cost
= targetm
.sched
.adjust_cost (used
, (int) dep_type
, insn
, cost
,
1501 DEP_COST (link
) = cost
;
1505 /* Compute cost of dependence LINK.
1506 This is the number of cycles between instruction issue and
1507 instruction results. */
1509 dep_cost (dep_t link
)
1511 return dep_cost_1 (link
, 0);
1514 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1515 INSN_PRIORITY explicitly. */
1517 increase_insn_priority (rtx_insn
*insn
, int amount
)
1519 if (!sel_sched_p ())
1521 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1522 if (INSN_PRIORITY_KNOWN (insn
))
1523 INSN_PRIORITY (insn
) += amount
;
1527 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1528 Use EXPR_PRIORITY instead. */
1529 sel_add_to_insn_priority (insn
, amount
);
1533 /* Return 'true' if DEP should be included in priority calculations. */
1535 contributes_to_priority_p (dep_t dep
)
1537 if (DEBUG_INSN_P (DEP_CON (dep
))
1538 || DEBUG_INSN_P (DEP_PRO (dep
)))
1541 /* Critical path is meaningful in block boundaries only. */
1542 if (!current_sched_info
->contributes_to_priority (DEP_CON (dep
),
1546 if (DEP_REPLACE (dep
) != NULL
)
1549 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1550 then speculative instructions will less likely be
1551 scheduled. That is because the priority of
1552 their producers will increase, and, thus, the
1553 producers will more likely be scheduled, thus,
1554 resolving the dependence. */
1555 if (sched_deps_info
->generate_spec_deps
1556 && !(spec_info
->flags
& COUNT_SPEC_IN_CRITICAL_PATH
)
1557 && (DEP_STATUS (dep
) & SPECULATIVE
))
1563 /* Compute the number of nondebug deps in list LIST for INSN. */
1566 dep_list_size (rtx_insn
*insn
, sd_list_types_def list
)
1568 sd_iterator_def sd_it
;
1570 int dbgcount
= 0, nodbgcount
= 0;
1572 if (!MAY_HAVE_DEBUG_INSNS
)
1573 return sd_lists_size (insn
, list
);
1575 FOR_EACH_DEP (insn
, list
, sd_it
, dep
)
1577 if (DEBUG_INSN_P (DEP_CON (dep
)))
1579 else if (!DEBUG_INSN_P (DEP_PRO (dep
)))
1583 gcc_assert (dbgcount
+ nodbgcount
== sd_lists_size (insn
, list
));
1590 /* Compute the priority number for INSN. */
1592 priority (rtx_insn
*insn
, bool force_recompute
)
1594 if (! INSN_P (insn
))
1597 /* We should not be interested in priority of an already scheduled insn. */
1598 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
1600 if (force_recompute
|| !INSN_PRIORITY_KNOWN (insn
))
1602 int this_priority
= -1;
1606 int this_fusion_priority
;
1608 targetm
.sched
.fusion_priority (insn
, FUSION_MAX_PRIORITY
,
1609 &this_fusion_priority
, &this_priority
);
1610 INSN_FUSION_PRIORITY (insn
) = this_fusion_priority
;
1612 else if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
1613 /* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn
1614 has some forward deps but all of them are ignored by
1615 contributes_to_priority hook. At the moment we set priority of
1617 this_priority
= insn_sched_cost (insn
);
1620 rtx_insn
*prev_first
, *twin
;
1623 /* For recovery check instructions we calculate priority slightly
1624 different than that of normal instructions. Instead of walking
1625 through INSN_FORW_DEPS (check) list, we walk through
1626 INSN_FORW_DEPS list of each instruction in the corresponding
1629 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1630 rec
= sel_sched_p () ? NULL
: RECOVERY_BLOCK (insn
);
1631 if (!rec
|| rec
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
1633 prev_first
= PREV_INSN (insn
);
1638 prev_first
= NEXT_INSN (BB_HEAD (rec
));
1639 twin
= PREV_INSN (BB_END (rec
));
1644 sd_iterator_def sd_it
;
1647 FOR_EACH_DEP (twin
, SD_LIST_FORW
, sd_it
, dep
)
1652 next
= DEP_CON (dep
);
1654 if (BLOCK_FOR_INSN (next
) != rec
)
1658 if (!contributes_to_priority_p (dep
))
1662 cost
= dep_cost (dep
);
1665 struct _dep _dep1
, *dep1
= &_dep1
;
1667 init_dep (dep1
, insn
, next
, REG_DEP_ANTI
);
1669 cost
= dep_cost (dep1
);
1672 next_priority
= cost
+ priority (next
);
1674 if (next_priority
> this_priority
)
1675 this_priority
= next_priority
;
1679 twin
= PREV_INSN (twin
);
1681 while (twin
!= prev_first
);
1684 if (this_priority
< 0)
1686 gcc_assert (this_priority
== -1);
1688 this_priority
= insn_sched_cost (insn
);
1691 INSN_PRIORITY (insn
) = this_priority
;
1692 INSN_PRIORITY_STATUS (insn
) = 1;
1695 return INSN_PRIORITY (insn
);
1698 /* Macros and functions for keeping the priority queue sorted, and
1699 dealing with queuing and dequeuing of instructions. */
1701 /* For each pressure class CL, set DEATH[CL] to the number of registers
1702 in that class that die in INSN. */
1705 calculate_reg_deaths (rtx_insn
*insn
, int *death
)
1708 struct reg_use_data
*use
;
1710 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1711 death
[ira_pressure_classes
[i
]] = 0;
1712 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
1713 if (dying_use_p (use
))
1714 mark_regno_birth_or_death (0, death
, use
->regno
, true);
1717 /* Setup info about the current register pressure impact of scheduling
1718 INSN at the current scheduling point. */
1720 setup_insn_reg_pressure_info (rtx_insn
*insn
)
1722 int i
, change
, before
, after
, hard_regno
;
1723 int excess_cost_change
;
1726 struct reg_pressure_data
*pressure_info
;
1727 int *max_reg_pressure
;
1728 static int death
[N_REG_CLASSES
];
1730 gcc_checking_assert (!DEBUG_INSN_P (insn
));
1732 excess_cost_change
= 0;
1733 calculate_reg_deaths (insn
, death
);
1734 pressure_info
= INSN_REG_PRESSURE (insn
);
1735 max_reg_pressure
= INSN_MAX_REG_PRESSURE (insn
);
1736 gcc_assert (pressure_info
!= NULL
&& max_reg_pressure
!= NULL
);
1737 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
1739 cl
= ira_pressure_classes
[i
];
1740 gcc_assert (curr_reg_pressure
[cl
] >= 0);
1741 change
= (int) pressure_info
[i
].set_increase
- death
[cl
];
1742 before
= MAX (0, max_reg_pressure
[i
] - sched_class_regs_num
[cl
]);
1743 after
= MAX (0, max_reg_pressure
[i
] + change
1744 - sched_class_regs_num
[cl
]);
1745 hard_regno
= ira_class_hard_regs
[cl
][0];
1746 gcc_assert (hard_regno
>= 0);
1747 mode
= reg_raw_mode
[hard_regno
];
1748 excess_cost_change
+= ((after
- before
)
1749 * (ira_memory_move_cost
[mode
][cl
][0]
1750 + ira_memory_move_cost
[mode
][cl
][1]));
1752 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn
) = excess_cost_change
;
1755 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1756 It tries to make the scheduler take register pressure into account
1757 without introducing too many unnecessary stalls. It hooks into the
1758 main scheduling algorithm at several points:
1760 - Before scheduling starts, model_start_schedule constructs a
1761 "model schedule" for the current block. This model schedule is
1762 chosen solely to keep register pressure down. It does not take the
1763 target's pipeline or the original instruction order into account,
1764 except as a tie-breaker. It also doesn't work to a particular
1767 This model schedule gives us an idea of what pressure can be
1768 achieved for the block and gives us an example of a schedule that
1769 keeps to that pressure. It also makes the final schedule less
1770 dependent on the original instruction order. This is important
1771 because the original order can either be "wide" (many values live
1772 at once, such as in user-scheduled code) or "narrow" (few values
1773 live at once, such as after loop unrolling, where several
1774 iterations are executed sequentially).
1776 We do not apply this model schedule to the rtx stream. We simply
1777 record it in model_schedule. We also compute the maximum pressure,
1778 MP, that was seen during this schedule.
1780 - Instructions are added to the ready queue even if they require
1781 a stall. The length of the stall is instead computed as:
1783 MAX (INSN_TICK (INSN) - clock_var, 0)
1785 (= insn_delay). This allows rank_for_schedule to choose between
1786 introducing a deliberate stall or increasing pressure.
1788 - Before sorting the ready queue, model_set_excess_costs assigns
1789 a pressure-based cost to each ready instruction in the queue.
1790 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1791 (ECC for short) and is effectively measured in cycles.
1793 - rank_for_schedule ranks instructions based on:
1795 ECC (insn) + insn_delay (insn)
1801 So, for example, an instruction X1 with an ECC of 1 that can issue
1802 now will win over an instruction X0 with an ECC of zero that would
1803 introduce a stall of one cycle. However, an instruction X2 with an
1804 ECC of 2 that can issue now will lose to both X0 and X1.
1806 - When an instruction is scheduled, model_recompute updates the model
1807 schedule with the new pressures (some of which might now exceed the
1808 original maximum pressure MP). model_update_limit_points then searches
1809 for the new point of maximum pressure, if not already known. */
1811 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1812 from surrounding debug information. */
1814 ";;\t\t+------------------------------------------------------\n"
1816 /* Information about the pressure on a particular register class at a
1817 particular point of the model schedule. */
1818 struct model_pressure_data
{
1819 /* The pressure at this point of the model schedule, or -1 if the
1820 point is associated with an instruction that has already been
1824 /* The maximum pressure during or after this point of the model schedule. */
1828 /* Per-instruction information that is used while building the model
1829 schedule. Here, "schedule" refers to the model schedule rather
1830 than the main schedule. */
1831 struct model_insn_info
{
1832 /* The instruction itself. */
1835 /* If this instruction is in model_worklist, these fields link to the
1836 previous (higher-priority) and next (lower-priority) instructions
1838 struct model_insn_info
*prev
;
1839 struct model_insn_info
*next
;
1841 /* While constructing the schedule, QUEUE_INDEX describes whether an
1842 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1843 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1844 old_queue records the value that QUEUE_INDEX had before scheduling
1845 started, so that we can restore it once the schedule is complete. */
1848 /* The relative importance of an unscheduled instruction. Higher
1849 values indicate greater importance. */
1850 unsigned int model_priority
;
1852 /* The length of the longest path of satisfied true dependencies
1853 that leads to this instruction. */
1856 /* The length of the longest path of dependencies of any kind
1857 that leads from this instruction. */
1860 /* The number of predecessor nodes that must still be scheduled. */
1861 int unscheduled_preds
;
1864 /* Information about the pressure limit for a particular register class.
1865 This structure is used when applying a model schedule to the main
1867 struct model_pressure_limit
{
1868 /* The maximum register pressure seen in the original model schedule. */
1871 /* The maximum register pressure seen in the current model schedule
1872 (which excludes instructions that have already been scheduled). */
1875 /* The point of the current model schedule at which PRESSURE is first
1876 reached. It is set to -1 if the value needs to be recomputed. */
1880 /* Describes a particular way of measuring register pressure. */
1881 struct model_pressure_group
{
1882 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1883 struct model_pressure_limit limits
[N_REG_CLASSES
];
1885 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1886 on register class ira_pressure_classes[PCI] at point POINT of the
1887 current model schedule. A POINT of model_num_insns describes the
1888 pressure at the end of the schedule. */
1889 struct model_pressure_data
*model
;
1892 /* Index POINT gives the instruction at point POINT of the model schedule.
1893 This array doesn't change during main scheduling. */
1894 static vec
<rtx_insn
*> model_schedule
;
1896 /* The list of instructions in the model worklist, sorted in order of
1897 decreasing priority. */
1898 static struct model_insn_info
*model_worklist
;
1900 /* Index I describes the instruction with INSN_LUID I. */
1901 static struct model_insn_info
*model_insns
;
1903 /* The number of instructions in the model schedule. */
1904 static int model_num_insns
;
1906 /* The index of the first instruction in model_schedule that hasn't yet been
1907 added to the main schedule, or model_num_insns if all of them have. */
1908 static int model_curr_point
;
1910 /* Describes the pressure before each instruction in the model schedule. */
1911 static struct model_pressure_group model_before_pressure
;
1913 /* The first unused model_priority value (as used in model_insn_info). */
1914 static unsigned int model_next_priority
;
1917 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1918 at point POINT of the model schedule. */
1919 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1920 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1922 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1923 after point POINT of the model schedule. */
1924 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1925 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1927 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1928 of the model schedule. */
1929 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1930 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1932 /* Information about INSN that is used when creating the model schedule. */
1933 #define MODEL_INSN_INFO(INSN) \
1934 (&model_insns[INSN_LUID (INSN)])
1936 /* The instruction at point POINT of the model schedule. */
1937 #define MODEL_INSN(POINT) \
1938 (model_schedule[POINT])
1941 /* Return INSN's index in the model schedule, or model_num_insns if it
1942 doesn't belong to that schedule. */
1945 model_index (rtx_insn
*insn
)
1947 if (INSN_MODEL_INDEX (insn
) == 0)
1948 return model_num_insns
;
1949 return INSN_MODEL_INDEX (insn
) - 1;
1952 /* Make sure that GROUP->limits is up-to-date for the current point
1953 of the model schedule. */
1956 model_update_limit_points_in_group (struct model_pressure_group
*group
)
1958 int pci
, max_pressure
, point
;
1960 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
1962 /* We may have passed the final point at which the pressure in
1963 group->limits[pci].pressure was reached. Update the limit if so. */
1964 max_pressure
= MODEL_MAX_PRESSURE (group
, model_curr_point
, pci
);
1965 group
->limits
[pci
].pressure
= max_pressure
;
1967 /* Find the point at which MAX_PRESSURE is first reached. We need
1968 to search in three cases:
1970 - We've already moved past the previous pressure point.
1971 In this case we search forward from model_curr_point.
1973 - We scheduled the previous point of maximum pressure ahead of
1974 its position in the model schedule, but doing so didn't bring
1975 the pressure point earlier. In this case we search forward
1976 from that previous pressure point.
1978 - Scheduling an instruction early caused the maximum pressure
1979 to decrease. In this case we will have set the pressure
1980 point to -1, and we search forward from model_curr_point. */
1981 point
= MAX (group
->limits
[pci
].point
, model_curr_point
);
1982 while (point
< model_num_insns
1983 && MODEL_REF_PRESSURE (group
, point
, pci
) < max_pressure
)
1985 group
->limits
[pci
].point
= point
;
1987 gcc_assert (MODEL_REF_PRESSURE (group
, point
, pci
) == max_pressure
);
1988 gcc_assert (MODEL_MAX_PRESSURE (group
, point
, pci
) == max_pressure
);
1992 /* Make sure that all register-pressure limits are up-to-date for the
1993 current position in the model schedule. */
1996 model_update_limit_points (void)
1998 model_update_limit_points_in_group (&model_before_pressure
);
2001 /* Return the model_index of the last unscheduled use in chain USE
2002 outside of USE's instruction. Return -1 if there are no other uses,
2003 or model_num_insns if the register is live at the end of the block. */
2006 model_last_use_except (struct reg_use_data
*use
)
2008 struct reg_use_data
*next
;
2012 for (next
= use
->next_regno_use
; next
!= use
; next
= next
->next_regno_use
)
2013 if (NONDEBUG_INSN_P (next
->insn
)
2014 && QUEUE_INDEX (next
->insn
) != QUEUE_SCHEDULED
)
2016 index
= model_index (next
->insn
);
2017 if (index
== model_num_insns
)
2018 return model_num_insns
;
2025 /* An instruction with model_index POINT has just been scheduled, and it
2026 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2027 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2028 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2031 model_start_update_pressure (struct model_pressure_group
*group
,
2032 int point
, int pci
, int delta
)
2034 int next_max_pressure
;
2036 if (point
== model_num_insns
)
2038 /* The instruction wasn't part of the model schedule; it was moved
2039 from a different block. Update the pressure for the end of
2040 the model schedule. */
2041 MODEL_REF_PRESSURE (group
, point
, pci
) += delta
;
2042 MODEL_MAX_PRESSURE (group
, point
, pci
) += delta
;
2046 /* Record that this instruction has been scheduled. Nothing now
2047 changes between POINT and POINT + 1, so get the maximum pressure
2048 from the latter. If the maximum pressure decreases, the new
2049 pressure point may be before POINT. */
2050 MODEL_REF_PRESSURE (group
, point
, pci
) = -1;
2051 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2052 if (MODEL_MAX_PRESSURE (group
, point
, pci
) > next_max_pressure
)
2054 MODEL_MAX_PRESSURE (group
, point
, pci
) = next_max_pressure
;
2055 if (group
->limits
[pci
].point
== point
)
2056 group
->limits
[pci
].point
= -1;
2061 /* Record that scheduling a later instruction has changed the pressure
2062 at point POINT of the model schedule by DELTA (which might be 0).
2063 Update GROUP accordingly. Return nonzero if these changes might
2064 trigger changes to previous points as well. */
2067 model_update_pressure (struct model_pressure_group
*group
,
2068 int point
, int pci
, int delta
)
2070 int ref_pressure
, max_pressure
, next_max_pressure
;
2072 /* If POINT hasn't yet been scheduled, update its pressure. */
2073 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
2074 if (ref_pressure
>= 0 && delta
!= 0)
2076 ref_pressure
+= delta
;
2077 MODEL_REF_PRESSURE (group
, point
, pci
) = ref_pressure
;
2079 /* Check whether the maximum pressure in the overall schedule
2080 has increased. (This means that the MODEL_MAX_PRESSURE of
2081 every point <= POINT will need to increase too; see below.) */
2082 if (group
->limits
[pci
].pressure
< ref_pressure
)
2083 group
->limits
[pci
].pressure
= ref_pressure
;
2085 /* If we are at maximum pressure, and the maximum pressure
2086 point was previously unknown or later than POINT,
2087 bring it forward. */
2088 if (group
->limits
[pci
].pressure
== ref_pressure
2089 && !IN_RANGE (group
->limits
[pci
].point
, 0, point
))
2090 group
->limits
[pci
].point
= point
;
2092 /* If POINT used to be the point of maximum pressure, but isn't
2093 any longer, we need to recalculate it using a forward walk. */
2094 if (group
->limits
[pci
].pressure
> ref_pressure
2095 && group
->limits
[pci
].point
== point
)
2096 group
->limits
[pci
].point
= -1;
2099 /* Update the maximum pressure at POINT. Changes here might also
2100 affect the maximum pressure at POINT - 1. */
2101 next_max_pressure
= MODEL_MAX_PRESSURE (group
, point
+ 1, pci
);
2102 max_pressure
= MAX (ref_pressure
, next_max_pressure
);
2103 if (MODEL_MAX_PRESSURE (group
, point
, pci
) != max_pressure
)
2105 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
2111 /* INSN has just been scheduled. Update the model schedule accordingly. */
2114 model_recompute (rtx_insn
*insn
)
2119 } uses
[FIRST_PSEUDO_REGISTER
+ MAX_RECOG_OPERANDS
];
2120 struct reg_use_data
*use
;
2121 struct reg_pressure_data
*reg_pressure
;
2122 int delta
[N_REG_CLASSES
];
2123 int pci
, point
, mix
, new_last
, cl
, ref_pressure
, queue
;
2124 unsigned int i
, num_uses
, num_pending_births
;
2127 /* The destinations of INSN were previously live from POINT onwards, but are
2128 now live from model_curr_point onwards. Set up DELTA accordingly. */
2129 point
= model_index (insn
);
2130 reg_pressure
= INSN_REG_PRESSURE (insn
);
2131 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2133 cl
= ira_pressure_classes
[pci
];
2134 delta
[cl
] = reg_pressure
[pci
].set_increase
;
2137 /* Record which registers previously died at POINT, but which now die
2138 before POINT. Adjust DELTA so that it represents the effect of
2139 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2140 registers that will be born in the range [model_curr_point, POINT). */
2142 num_pending_births
= 0;
2143 bitmap_clear (tmp_bitmap
);
2144 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
2146 new_last
= model_last_use_except (use
);
2147 if (new_last
< point
&& bitmap_set_bit (tmp_bitmap
, use
->regno
))
2149 gcc_assert (num_uses
< ARRAY_SIZE (uses
));
2150 uses
[num_uses
].last_use
= new_last
;
2151 uses
[num_uses
].regno
= use
->regno
;
2152 /* This register is no longer live after POINT - 1. */
2153 mark_regno_birth_or_death (NULL
, delta
, use
->regno
, false);
2156 num_pending_births
++;
2160 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2161 Also set each group pressure limit for POINT. */
2162 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2164 cl
= ira_pressure_classes
[pci
];
2165 model_start_update_pressure (&model_before_pressure
,
2166 point
, pci
, delta
[cl
]);
2169 /* Walk the model schedule backwards, starting immediately before POINT. */
2171 if (point
!= model_curr_point
)
2175 insn
= MODEL_INSN (point
);
2176 queue
= QUEUE_INDEX (insn
);
2178 if (queue
!= QUEUE_SCHEDULED
)
2180 /* DELTA describes the effect of the move on the register pressure
2181 after POINT. Make it describe the effect on the pressure
2184 while (i
< num_uses
)
2186 if (uses
[i
].last_use
== point
)
2188 /* This register is now live again. */
2189 mark_regno_birth_or_death (NULL
, delta
,
2190 uses
[i
].regno
, true);
2192 /* Remove this use from the array. */
2193 uses
[i
] = uses
[num_uses
- 1];
2195 num_pending_births
--;
2201 if (sched_verbose
>= 5)
2205 fprintf (sched_dump
, MODEL_BAR
);
2206 fprintf (sched_dump
, ";;\t\t| New pressure for model"
2208 fprintf (sched_dump
, MODEL_BAR
);
2212 fprintf (sched_dump
, ";;\t\t| %3d %4d %-30s ",
2213 point
, INSN_UID (insn
),
2214 str_pattern_slim (PATTERN (insn
)));
2215 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2217 cl
= ira_pressure_classes
[pci
];
2218 ref_pressure
= MODEL_REF_PRESSURE (&model_before_pressure
,
2220 fprintf (sched_dump
, " %s:[%d->%d]",
2221 reg_class_names
[ira_pressure_classes
[pci
]],
2222 ref_pressure
, ref_pressure
+ delta
[cl
]);
2224 fprintf (sched_dump
, "\n");
2228 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2229 might have changed as well. */
2230 mix
= num_pending_births
;
2231 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2233 cl
= ira_pressure_classes
[pci
];
2235 mix
|= model_update_pressure (&model_before_pressure
,
2236 point
, pci
, delta
[cl
]);
2239 while (mix
&& point
> model_curr_point
);
2242 fprintf (sched_dump
, MODEL_BAR
);
2245 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2246 check whether the insn's pattern needs restoring. */
2248 must_restore_pattern_p (rtx_insn
*next
, dep_t dep
)
2250 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
2253 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
2255 gcc_assert (ORIG_PAT (next
) != NULL_RTX
);
2256 gcc_assert (next
== DEP_CON (dep
));
2260 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
2261 if (desc
->insn
!= next
)
2263 gcc_assert (*desc
->loc
== desc
->orig
);
2270 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2271 pressure on CL from P to P'. We use this to calculate a "base ECC",
2272 baseECC (CL, X), for each pressure class CL and each instruction X.
2273 Supposing X changes the pressure on CL from P to P', and that the
2274 maximum pressure on CL in the current model schedule is MP', then:
2276 * if X occurs before or at the next point of maximum pressure in
2277 the model schedule and P' > MP', then:
2279 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2281 The idea is that the pressure after scheduling a fixed set of
2282 instructions -- in this case, the set up to and including the
2283 next maximum pressure point -- is going to be the same regardless
2284 of the order; we simply want to keep the intermediate pressure
2285 under control. Thus X has a cost of zero unless scheduling it
2286 now would exceed MP'.
2288 If all increases in the set are by the same amount, no zero-cost
2289 instruction will ever cause the pressure to exceed MP'. However,
2290 if X is instead moved past an instruction X' with pressure in the
2291 range (MP' - (P' - P), MP'), the pressure at X' will increase
2292 beyond MP'. Since baseECC is very much a heuristic anyway,
2293 it doesn't seem worth the overhead of tracking cases like these.
2295 The cost of exceeding MP' is always based on the original maximum
2296 pressure MP. This is so that going 2 registers over the original
2297 limit has the same cost regardless of whether it comes from two
2298 separate +1 deltas or from a single +2 delta.
2300 * if X occurs after the next point of maximum pressure in the model
2301 schedule and P' > P, then:
2303 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2305 That is, if we move X forward across a point of maximum pressure,
2306 and if X increases the pressure by P' - P, then we conservatively
2307 assume that scheduling X next would increase the maximum pressure
2308 by P' - P. Again, the cost of doing this is based on the original
2309 maximum pressure MP, for the same reason as above.
2311 * if P' < P, P > MP, and X occurs at or after the next point of
2312 maximum pressure, then:
2314 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2316 That is, if we have already exceeded the original maximum pressure MP,
2317 and if X might reduce the maximum pressure again -- or at least push
2318 it further back, and thus allow more scheduling freedom -- it is given
2319 a negative cost to reflect the improvement.
2325 In this case, X is not expected to affect the maximum pressure MP',
2326 so it has zero cost.
2328 We then create a combined value baseECC (X) that is the sum of
2329 baseECC (CL, X) for each pressure class CL.
2331 baseECC (X) could itself be used as the ECC value described above.
2332 However, this is often too conservative, in the sense that it
2333 tends to make high-priority instructions that increase pressure
2334 wait too long in cases where introducing a spill would be better.
2335 For this reason the final ECC is a priority-adjusted form of
2336 baseECC (X). Specifically, we calculate:
2338 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2339 baseP = MAX { P (X) | baseECC (X) <= 0 }
2343 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2345 Thus an instruction's effect on pressure is ignored if it has a high
2346 enough priority relative to the ones that don't increase pressure.
2347 Negative values of baseECC (X) do not increase the priority of X
2348 itself, but they do make it harder for other instructions to
2349 increase the pressure further.
2351 This pressure cost is deliberately timid. The intention has been
2352 to choose a heuristic that rarely interferes with the normal list
2353 scheduler in cases where that scheduler would produce good code.
2354 We simply want to curb some of its worst excesses. */
2356 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2358 Here we use the very simplistic cost model that every register above
2359 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2360 measures instead, such as one based on MEMORY_MOVE_COST. However:
2362 (1) In order for an instruction to be scheduled, the higher cost
2363 would need to be justified in a single saving of that many stalls.
2364 This is overly pessimistic, because the benefit of spilling is
2365 often to avoid a sequence of several short stalls rather than
2368 (2) The cost is still arbitrary. Because we are not allocating
2369 registers during scheduling, we have no way of knowing for
2370 sure how many memory accesses will be required by each spill,
2371 where the spills will be placed within the block, or even
2372 which block(s) will contain the spills.
2374 So a higher cost than 1 is often too conservative in practice,
2375 forcing blocks to contain unnecessary stalls instead of spill code.
2376 The simple cost below seems to be the best compromise. It reduces
2377 the interference with the normal list scheduler, which helps make
2378 it more suitable for a default-on option. */
2381 model_spill_cost (int cl
, int from
, int to
)
2383 from
= MAX (from
, sched_class_regs_num
[cl
]);
2384 return MAX (to
, from
) - from
;
2387 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2388 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2392 model_excess_group_cost (struct model_pressure_group
*group
,
2393 int point
, int pci
, int delta
)
2397 cl
= ira_pressure_classes
[pci
];
2398 if (delta
< 0 && point
>= group
->limits
[pci
].point
)
2400 pressure
= MAX (group
->limits
[pci
].orig_pressure
,
2401 curr_reg_pressure
[cl
] + delta
);
2402 return -model_spill_cost (cl
, pressure
, curr_reg_pressure
[cl
]);
2407 if (point
> group
->limits
[pci
].point
)
2408 pressure
= group
->limits
[pci
].pressure
+ delta
;
2410 pressure
= curr_reg_pressure
[cl
] + delta
;
2412 if (pressure
> group
->limits
[pci
].pressure
)
2413 return model_spill_cost (cl
, group
->limits
[pci
].orig_pressure
,
2420 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2424 model_excess_cost (rtx_insn
*insn
, bool print_p
)
2426 int point
, pci
, cl
, cost
, this_cost
, delta
;
2427 struct reg_pressure_data
*insn_reg_pressure
;
2428 int insn_death
[N_REG_CLASSES
];
2430 calculate_reg_deaths (insn
, insn_death
);
2431 point
= model_index (insn
);
2432 insn_reg_pressure
= INSN_REG_PRESSURE (insn
);
2436 fprintf (sched_dump
, ";;\t\t| %3d %4d | %4d %+3d |", point
,
2437 INSN_UID (insn
), INSN_PRIORITY (insn
), insn_delay (insn
));
2439 /* Sum up the individual costs for each register class. */
2440 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2442 cl
= ira_pressure_classes
[pci
];
2443 delta
= insn_reg_pressure
[pci
].set_increase
- insn_death
[cl
];
2444 this_cost
= model_excess_group_cost (&model_before_pressure
,
2448 fprintf (sched_dump
, " %s:[%d base cost %d]",
2449 reg_class_names
[cl
], delta
, this_cost
);
2453 fprintf (sched_dump
, "\n");
2458 /* Dump the next points of maximum pressure for GROUP. */
2461 model_dump_pressure_points (struct model_pressure_group
*group
)
2465 fprintf (sched_dump
, ";;\t\t| pressure points");
2466 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
2468 cl
= ira_pressure_classes
[pci
];
2469 fprintf (sched_dump
, " %s:[%d->%d at ", reg_class_names
[cl
],
2470 curr_reg_pressure
[cl
], group
->limits
[pci
].pressure
);
2471 if (group
->limits
[pci
].point
< model_num_insns
)
2472 fprintf (sched_dump
, "%d:%d]", group
->limits
[pci
].point
,
2473 INSN_UID (MODEL_INSN (group
->limits
[pci
].point
)));
2475 fprintf (sched_dump
, "end]");
2477 fprintf (sched_dump
, "\n");
2480 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2483 model_set_excess_costs (rtx_insn
**insns
, int count
)
2485 int i
, cost
, priority_base
, priority
;
2488 /* Record the baseECC value for each instruction in the model schedule,
2489 except that negative costs are converted to zero ones now rather than
2490 later. Do not assign a cost to debug instructions, since they must
2491 not change code-generation decisions. Experiments suggest we also
2492 get better results by not assigning a cost to instructions from
2495 Set PRIORITY_BASE to baseP in the block comment above. This is the
2496 maximum priority of the "cheap" instructions, which should always
2497 include the next model instruction. */
2500 for (i
= 0; i
< count
; i
++)
2501 if (INSN_MODEL_INDEX (insns
[i
]))
2503 if (sched_verbose
>= 6 && !print_p
)
2505 fprintf (sched_dump
, MODEL_BAR
);
2506 fprintf (sched_dump
, ";;\t\t| Pressure costs for ready queue\n");
2507 model_dump_pressure_points (&model_before_pressure
);
2508 fprintf (sched_dump
, MODEL_BAR
);
2511 cost
= model_excess_cost (insns
[i
], print_p
);
2514 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]) - cost
;
2515 priority_base
= MAX (priority_base
, priority
);
2518 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = cost
;
2521 fprintf (sched_dump
, MODEL_BAR
);
2523 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2525 for (i
= 0; i
< count
; i
++)
2527 cost
= INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]);
2528 priority
= INSN_PRIORITY (insns
[i
]) - insn_delay (insns
[i
]);
2529 if (cost
> 0 && priority
> priority_base
)
2531 cost
+= priority_base
- priority
;
2532 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns
[i
]) = MAX (cost
, 0);
2538 /* Enum of rank_for_schedule heuristic decisions. */
2540 RFS_LIVE_RANGE_SHRINK1
, RFS_LIVE_RANGE_SHRINK2
,
2541 RFS_SCHED_GROUP
, RFS_PRESSURE_DELAY
, RFS_PRESSURE_TICK
,
2542 RFS_FEEDS_BACKTRACK_INSN
, RFS_PRIORITY
, RFS_SPECULATION
,
2543 RFS_SCHED_RANK
, RFS_LAST_INSN
, RFS_PRESSURE_INDEX
,
2544 RFS_DEP_COUNT
, RFS_TIE
, RFS_FUSION
, RFS_COST
, RFS_N
};
2546 /* Corresponding strings for print outs. */
2547 static const char *rfs_str
[RFS_N
] = {
2548 "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2549 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2550 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2551 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2552 "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" };
2554 /* Statistical breakdown of rank_for_schedule decisions. */
2555 struct rank_for_schedule_stats_t
{ unsigned stats
[RFS_N
]; };
2556 static rank_for_schedule_stats_t rank_for_schedule_stats
;
2558 /* Return the result of comparing insns TMP and TMP2 and update
2559 Rank_For_Schedule statistics. */
2561 rfs_result (enum rfs_decision decision
, int result
, rtx tmp
, rtx tmp2
)
2563 ++rank_for_schedule_stats
.stats
[decision
];
2565 INSN_LAST_RFS_WIN (tmp
) = decision
;
2566 else if (result
> 0)
2567 INSN_LAST_RFS_WIN (tmp2
) = decision
;
2573 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2574 keeping normal insns in original order. */
2577 rank_for_schedule_debug (const void *x
, const void *y
)
2579 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
2580 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
2582 /* Schedule debug insns as early as possible. */
2583 if (DEBUG_INSN_P (tmp
) && !DEBUG_INSN_P (tmp2
))
2585 else if (!DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2587 else if (DEBUG_INSN_P (tmp
) && DEBUG_INSN_P (tmp2
))
2588 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
2590 return INSN_RFS_DEBUG_ORIG_ORDER (tmp2
) - INSN_RFS_DEBUG_ORIG_ORDER (tmp
);
2593 /* Returns a positive value if x is preferred; returns a negative value if
2594 y is preferred. Should never return 0, since that will make the sort
2598 rank_for_schedule (const void *x
, const void *y
)
2600 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
2601 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
2602 int tmp_class
, tmp2_class
;
2603 int val
, priority_val
, info_val
, diff
;
2605 if (live_range_shrinkage_p
)
2607 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2609 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
2610 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
) < 0
2611 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
) < 0)
2612 && (diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
2613 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
))) != 0)
2614 return rfs_result (RFS_LIVE_RANGE_SHRINK1
, diff
, tmp
, tmp2
);
2615 /* Sort by INSN_LUID (original insn order), so that we make the
2616 sort stable. This minimizes instruction movement, thus
2617 minimizing sched's effect on debugging and cross-jumping. */
2618 return rfs_result (RFS_LIVE_RANGE_SHRINK2
,
2619 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2622 /* The insn in a schedule group should be issued the first. */
2623 if (flag_sched_group_heuristic
&&
2624 SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
2625 return rfs_result (RFS_SCHED_GROUP
, SCHED_GROUP_P (tmp2
) ? 1 : -1,
2628 /* Make sure that priority of TMP and TMP2 are initialized. */
2629 gcc_assert (INSN_PRIORITY_KNOWN (tmp
) && INSN_PRIORITY_KNOWN (tmp2
));
2633 /* The instruction that has the same fusion priority as the last
2634 instruction is the instruction we picked next. If that is not
2635 the case, we sort ready list firstly by fusion priority, then
2636 by priority, and at last by INSN_LUID. */
2637 int a
= INSN_FUSION_PRIORITY (tmp
);
2638 int b
= INSN_FUSION_PRIORITY (tmp2
);
2641 if (last_nondebug_scheduled_insn
2642 && !NOTE_P (last_nondebug_scheduled_insn
)
2643 && BLOCK_FOR_INSN (tmp
)
2644 == BLOCK_FOR_INSN (last_nondebug_scheduled_insn
))
2645 last
= INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn
);
2647 if (a
!= last
&& b
!= last
)
2651 a
= INSN_PRIORITY (tmp
);
2652 b
= INSN_PRIORITY (tmp2
);
2655 return rfs_result (RFS_FUSION
, b
- a
, tmp
, tmp2
);
2657 return rfs_result (RFS_FUSION
,
2658 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2662 gcc_assert (last_nondebug_scheduled_insn
2663 && !NOTE_P (last_nondebug_scheduled_insn
));
2664 last
= INSN_PRIORITY (last_nondebug_scheduled_insn
);
2666 a
= abs (INSN_PRIORITY (tmp
) - last
);
2667 b
= abs (INSN_PRIORITY (tmp2
) - last
);
2669 return rfs_result (RFS_FUSION
, a
- b
, tmp
, tmp2
);
2671 return rfs_result (RFS_FUSION
,
2672 INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2675 return rfs_result (RFS_FUSION
, -1, tmp
, tmp2
);
2677 return rfs_result (RFS_FUSION
, 1, tmp
, tmp2
);
2680 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
2682 /* Prefer insn whose scheduling results in the smallest register
2684 if ((diff
= (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp
)
2686 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2
)
2687 - insn_delay (tmp2
))))
2688 return rfs_result (RFS_PRESSURE_DELAY
, diff
, tmp
, tmp2
);
2691 if (sched_pressure
!= SCHED_PRESSURE_NONE
2692 && (INSN_TICK (tmp2
) > clock_var
|| INSN_TICK (tmp
) > clock_var
)
2693 && INSN_TICK (tmp2
) != INSN_TICK (tmp
))
2695 diff
= INSN_TICK (tmp
) - INSN_TICK (tmp2
);
2696 return rfs_result (RFS_PRESSURE_TICK
, diff
, tmp
, tmp2
);
2699 /* If we are doing backtracking in this schedule, prefer insns that
2700 have forward dependencies with negative cost against an insn that
2701 was already scheduled. */
2702 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2704 priority_val
= FEEDS_BACKTRACK_INSN (tmp2
) - FEEDS_BACKTRACK_INSN (tmp
);
2706 return rfs_result (RFS_FEEDS_BACKTRACK_INSN
, priority_val
, tmp
, tmp2
);
2709 /* Prefer insn with higher priority. */
2710 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
2712 if (flag_sched_critical_path_heuristic
&& priority_val
)
2713 return rfs_result (RFS_PRIORITY
, priority_val
, tmp
, tmp2
);
2715 if (param_sched_autopref_queue_depth
>= 0)
2717 int autopref
= autopref_rank_for_schedule (tmp
, tmp2
);
2722 /* Prefer speculative insn with greater dependencies weakness. */
2723 if (flag_sched_spec_insn_heuristic
&& spec_info
)
2729 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
2731 dw1
= ds_weak (ds1
);
2735 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
2737 dw2
= ds_weak (ds2
);
2742 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
2743 return rfs_result (RFS_SPECULATION
, dw
, tmp
, tmp2
);
2746 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
2747 if (flag_sched_rank_heuristic
&& info_val
)
2748 return rfs_result (RFS_SCHED_RANK
, info_val
, tmp
, tmp2
);
2750 /* Compare insns based on their relation to the last scheduled
2752 if (flag_sched_last_insn_heuristic
&& last_nondebug_scheduled_insn
)
2756 rtx_insn
*last
= last_nondebug_scheduled_insn
;
2758 /* Classify the instructions into three classes:
2759 1) Data dependent on last schedule insn.
2760 2) Anti/Output dependent on last scheduled insn.
2761 3) Independent of last scheduled insn, or has latency of one.
2762 Choose the insn from the highest numbered class if different. */
2763 dep1
= sd_find_dep_between (last
, tmp
, true);
2765 if (dep1
== NULL
|| dep_cost (dep1
) == 1)
2767 else if (/* Data dependence. */
2768 DEP_TYPE (dep1
) == REG_DEP_TRUE
)
2773 dep2
= sd_find_dep_between (last
, tmp2
, true);
2775 if (dep2
== NULL
|| dep_cost (dep2
) == 1)
2777 else if (/* Data dependence. */
2778 DEP_TYPE (dep2
) == REG_DEP_TRUE
)
2783 if ((val
= tmp2_class
- tmp_class
))
2784 return rfs_result (RFS_LAST_INSN
, val
, tmp
, tmp2
);
2787 /* Prefer instructions that occur earlier in the model schedule. */
2788 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
2790 diff
= model_index (tmp
) - model_index (tmp2
);
2792 return rfs_result (RFS_PRESSURE_INDEX
, diff
, tmp
, tmp2
);
2795 /* Prefer the insn which has more later insns that depend on it.
2796 This gives the scheduler more freedom when scheduling later
2797 instructions at the expense of added register pressure. */
2799 val
= (dep_list_size (tmp2
, SD_LIST_FORW
)
2800 - dep_list_size (tmp
, SD_LIST_FORW
));
2802 if (flag_sched_dep_count_heuristic
&& val
!= 0)
2803 return rfs_result (RFS_DEP_COUNT
, val
, tmp
, tmp2
);
2805 /* Sort by INSN_COST rather than INSN_LUID. This means that instructions
2806 which take longer to execute are prioritised and it leads to more
2807 dual-issue opportunities on in-order cores which have this feature. */
2809 if (INSN_COST (tmp
) != INSN_COST (tmp2
))
2810 return rfs_result (RFS_COST
, INSN_COST (tmp2
) - INSN_COST (tmp
),
2813 /* If insns are equally good, sort by INSN_LUID (original insn order),
2814 so that we make the sort stable. This minimizes instruction movement,
2815 thus minimizing sched's effect on debugging and cross-jumping. */
2816 return rfs_result (RFS_TIE
, INSN_LUID (tmp
) - INSN_LUID (tmp2
), tmp
, tmp2
);
2819 /* Resort the array A in which only element at index N may be out of order. */
2821 HAIFA_INLINE
static void
2822 swap_sort (rtx_insn
**a
, int n
)
2824 rtx_insn
*insn
= a
[n
- 1];
2827 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
2835 /* Add INSN to the insn queue so that it can be executed at least
2836 N_CYCLES after the currently executing insn. Preserve insns
2837 chain for debugging purposes. REASON will be printed in debugging
2840 HAIFA_INLINE
static void
2841 queue_insn (rtx_insn
*insn
, int n_cycles
, const char *reason
)
2843 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
2844 rtx_insn_list
*link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
2847 gcc_assert (n_cycles
<= max_insn_queue_index
);
2848 gcc_assert (!DEBUG_INSN_P (insn
));
2850 insn_queue
[next_q
] = link
;
2853 if (sched_verbose
>= 2)
2855 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
2856 (*current_sched_info
->print_insn
) (insn
, 0));
2858 fprintf (sched_dump
, "queued for %d cycles (%s).\n", n_cycles
, reason
);
2861 QUEUE_INDEX (insn
) = next_q
;
2863 if (current_sched_info
->flags
& DO_BACKTRACKING
)
2865 new_tick
= clock_var
+ n_cycles
;
2866 if (INSN_TICK (insn
) == INVALID_TICK
|| INSN_TICK (insn
) < new_tick
)
2867 INSN_TICK (insn
) = new_tick
;
2869 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2870 && INSN_EXACT_TICK (insn
) < clock_var
+ n_cycles
)
2872 must_backtrack
= true;
2873 if (sched_verbose
>= 2)
2874 fprintf (sched_dump
, ";;\t\tcausing a backtrack.\n");
2879 /* Remove INSN from queue. */
2881 queue_remove (rtx_insn
*insn
)
2883 gcc_assert (QUEUE_INDEX (insn
) >= 0);
2884 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
2886 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
2889 /* Return a pointer to the bottom of the ready list, i.e. the insn
2890 with the lowest priority. */
2893 ready_lastpos (struct ready_list
*ready
)
2895 gcc_assert (ready
->n_ready
>= 1);
2896 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
2899 /* Add an element INSN to the ready list so that it ends up with the
2900 lowest/highest priority depending on FIRST_P. */
2902 HAIFA_INLINE
static void
2903 ready_add (struct ready_list
*ready
, rtx_insn
*insn
, bool first_p
)
2907 if (ready
->first
== ready
->n_ready
)
2909 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
2910 ready_lastpos (ready
),
2911 ready
->n_ready
* sizeof (rtx
));
2912 ready
->first
= ready
->veclen
- 1;
2914 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
2918 if (ready
->first
== ready
->veclen
- 1)
2921 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2922 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
2923 ready_lastpos (ready
),
2924 ready
->n_ready
* sizeof (rtx
));
2925 ready
->first
= ready
->veclen
- 2;
2927 ready
->vec
[++(ready
->first
)] = insn
;
2931 if (DEBUG_INSN_P (insn
))
2934 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
2935 QUEUE_INDEX (insn
) = QUEUE_READY
;
2937 if (INSN_EXACT_TICK (insn
) != INVALID_TICK
2938 && INSN_EXACT_TICK (insn
) < clock_var
)
2940 must_backtrack
= true;
2944 /* Remove the element with the highest priority from the ready list and
2947 HAIFA_INLINE
static rtx_insn
*
2948 ready_remove_first (struct ready_list
*ready
)
2952 gcc_assert (ready
->n_ready
);
2953 t
= ready
->vec
[ready
->first
--];
2955 if (DEBUG_INSN_P (t
))
2957 /* If the queue becomes empty, reset it. */
2958 if (ready
->n_ready
== 0)
2959 ready
->first
= ready
->veclen
- 1;
2961 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
2962 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
2967 /* The following code implements multi-pass scheduling for the first
2968 cycle. In other words, we will try to choose ready insn which
2969 permits to start maximum number of insns on the same cycle. */
2971 /* Return a pointer to the element INDEX from the ready. INDEX for
2972 insn with the highest priority is 0, and the lowest priority has
2976 ready_element (struct ready_list
*ready
, int index
)
2978 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
2980 return ready
->vec
[ready
->first
- index
];
2983 /* Remove the element INDEX from the ready list and return it. INDEX
2984 for insn with the highest priority is 0, and the lowest priority
2987 HAIFA_INLINE
static rtx_insn
*
2988 ready_remove (struct ready_list
*ready
, int index
)
2994 return ready_remove_first (ready
);
2995 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
2996 t
= ready
->vec
[ready
->first
- index
];
2998 if (DEBUG_INSN_P (t
))
3000 for (i
= index
; i
< ready
->n_ready
; i
++)
3001 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
3002 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
3006 /* Remove INSN from the ready list. */
3008 ready_remove_insn (rtx_insn
*insn
)
3012 for (i
= 0; i
< readyp
->n_ready
; i
++)
3013 if (ready_element (readyp
, i
) == insn
)
3015 ready_remove (readyp
, i
);
3021 /* Calculate difference of two statistics set WAS and NOW.
3022 Result returned in WAS. */
3024 rank_for_schedule_stats_diff (rank_for_schedule_stats_t
*was
,
3025 const rank_for_schedule_stats_t
*now
)
3027 for (int i
= 0; i
< RFS_N
; ++i
)
3028 was
->stats
[i
] = now
->stats
[i
] - was
->stats
[i
];
3031 /* Print rank_for_schedule statistics. */
3033 print_rank_for_schedule_stats (const char *prefix
,
3034 const rank_for_schedule_stats_t
*stats
,
3035 struct ready_list
*ready
)
3037 for (int i
= 0; i
< RFS_N
; ++i
)
3038 if (stats
->stats
[i
])
3040 fprintf (sched_dump
, "%s%20s: %u", prefix
, rfs_str
[i
], stats
->stats
[i
]);
3043 /* Print out insns that won due to RFS_<I>. */
3045 rtx_insn
**p
= ready_lastpos (ready
);
3047 fprintf (sched_dump
, ":");
3048 /* Start with 1 since least-priority insn didn't have any wins. */
3049 for (int j
= 1; j
< ready
->n_ready
; ++j
)
3050 if (INSN_LAST_RFS_WIN (p
[j
]) == i
)
3051 fprintf (sched_dump
, " %s",
3052 (*current_sched_info
->print_insn
) (p
[j
], 0));
3054 fprintf (sched_dump
, "\n");
3058 /* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end
3061 ready_sort_debug (struct ready_list
*ready
)
3064 rtx_insn
**first
= ready_lastpos (ready
);
3066 for (i
= 0; i
< ready
->n_ready
; ++i
)
3067 if (!DEBUG_INSN_P (first
[i
]))
3068 INSN_RFS_DEBUG_ORIG_ORDER (first
[i
]) = i
;
3070 qsort (first
, ready
->n_ready
, sizeof (rtx
), rank_for_schedule_debug
);
3073 /* Sort non-debug insns in the ready list READY by ascending priority.
3074 Assumes that all debug insns are separated from the real insns. */
3076 ready_sort_real (struct ready_list
*ready
)
3079 rtx_insn
**first
= ready_lastpos (ready
);
3080 int n_ready_real
= ready
->n_ready
- ready
->n_debug
;
3082 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
3083 for (i
= 0; i
< n_ready_real
; ++i
)
3084 setup_insn_reg_pressure_info (first
[i
]);
3085 else if (sched_pressure
== SCHED_PRESSURE_MODEL
3086 && model_curr_point
< model_num_insns
)
3087 model_set_excess_costs (first
, n_ready_real
);
3089 rank_for_schedule_stats_t stats1
;
3090 if (sched_verbose
>= 4)
3091 stats1
= rank_for_schedule_stats
;
3093 if (n_ready_real
== 2)
3094 swap_sort (first
, n_ready_real
);
3095 else if (n_ready_real
> 2)
3096 qsort (first
, n_ready_real
, sizeof (rtx
), rank_for_schedule
);
3098 if (sched_verbose
>= 4)
3100 rank_for_schedule_stats_diff (&stats1
, &rank_for_schedule_stats
);
3101 print_rank_for_schedule_stats (";;\t\t", &stats1
, ready
);
3105 /* Sort the ready list READY by ascending priority. */
3107 ready_sort (struct ready_list
*ready
)
3109 if (ready
->n_debug
> 0)
3110 ready_sort_debug (ready
);
3112 ready_sort_real (ready
);
3115 /* PREV is an insn that is ready to execute. Adjust its priority if that
3116 will help shorten or lengthen register lifetimes as appropriate. Also
3117 provide a hook for the target to tweak itself. */
3119 HAIFA_INLINE
static void
3120 adjust_priority (rtx_insn
*prev
)
3122 /* ??? There used to be code here to try and estimate how an insn
3123 affected register lifetimes, but it did it by looking at REG_DEAD
3124 notes, which we removed in schedule_region. Nor did it try to
3125 take into account register pressure or anything useful like that.
3127 Revisit when we have a machine model to work with and not before. */
3129 if (targetm
.sched
.adjust_priority
)
3130 INSN_PRIORITY (prev
) =
3131 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
3134 /* Advance DFA state STATE on one cycle. */
3136 advance_state (state_t state
)
3138 if (targetm
.sched
.dfa_pre_advance_cycle
)
3139 targetm
.sched
.dfa_pre_advance_cycle ();
3141 if (targetm
.sched
.dfa_pre_cycle_insn
)
3142 state_transition (state
,
3143 targetm
.sched
.dfa_pre_cycle_insn ());
3145 state_transition (state
, NULL
);
3147 if (targetm
.sched
.dfa_post_cycle_insn
)
3148 state_transition (state
,
3149 targetm
.sched
.dfa_post_cycle_insn ());
3151 if (targetm
.sched
.dfa_post_advance_cycle
)
3152 targetm
.sched
.dfa_post_advance_cycle ();
3155 /* Advance time on one cycle. */
3156 HAIFA_INLINE
static void
3157 advance_one_cycle (void)
3159 advance_state (curr_state
);
3160 if (sched_verbose
>= 4)
3161 fprintf (sched_dump
, ";;\tAdvance the current state.\n");
3164 /* Update register pressure after scheduling INSN. */
3166 update_register_pressure (rtx_insn
*insn
)
3168 struct reg_use_data
*use
;
3169 struct reg_set_data
*set
;
3171 gcc_checking_assert (!DEBUG_INSN_P (insn
));
3173 for (use
= INSN_REG_USE_LIST (insn
); use
!= NULL
; use
= use
->next_insn_use
)
3174 if (dying_use_p (use
))
3175 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
3177 for (set
= INSN_REG_SET_LIST (insn
); set
!= NULL
; set
= set
->next_insn_set
)
3178 mark_regno_birth_or_death (curr_reg_live
, curr_reg_pressure
,
3182 /* Set up or update (if UPDATE_P) max register pressure (see its
3183 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3184 after insn AFTER. */
3186 setup_insn_max_reg_pressure (rtx_insn
*after
, bool update_p
)
3191 static int max_reg_pressure
[N_REG_CLASSES
];
3193 save_reg_pressure ();
3194 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3195 max_reg_pressure
[ira_pressure_classes
[i
]]
3196 = curr_reg_pressure
[ira_pressure_classes
[i
]];
3197 for (insn
= NEXT_INSN (after
);
3198 insn
!= NULL_RTX
&& ! BARRIER_P (insn
)
3199 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (after
);
3200 insn
= NEXT_INSN (insn
))
3201 if (NONDEBUG_INSN_P (insn
))
3204 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3206 p
= max_reg_pressure
[ira_pressure_classes
[i
]];
3207 if (INSN_MAX_REG_PRESSURE (insn
)[i
] != p
)
3210 INSN_MAX_REG_PRESSURE (insn
)[i
]
3211 = max_reg_pressure
[ira_pressure_classes
[i
]];
3214 if (update_p
&& eq_p
)
3216 update_register_pressure (insn
);
3217 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3218 if (max_reg_pressure
[ira_pressure_classes
[i
]]
3219 < curr_reg_pressure
[ira_pressure_classes
[i
]])
3220 max_reg_pressure
[ira_pressure_classes
[i
]]
3221 = curr_reg_pressure
[ira_pressure_classes
[i
]];
3223 restore_reg_pressure ();
3226 /* Update the current register pressure after scheduling INSN. Update
3227 also max register pressure for unscheduled insns of the current
3230 update_reg_and_insn_max_reg_pressure (rtx_insn
*insn
)
3233 int before
[N_REG_CLASSES
];
3235 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3236 before
[i
] = curr_reg_pressure
[ira_pressure_classes
[i
]];
3237 update_register_pressure (insn
);
3238 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
3239 if (curr_reg_pressure
[ira_pressure_classes
[i
]] != before
[i
])
3241 if (i
< ira_pressure_classes_num
)
3242 setup_insn_max_reg_pressure (insn
, true);
3245 /* Set up register pressure at the beginning of basic block BB whose
3246 insns starting after insn AFTER. Set up also max register pressure
3247 for all insns of the basic block. */
3249 sched_setup_bb_reg_pressure_info (basic_block bb
, rtx_insn
*after
)
3251 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
3252 initiate_bb_reg_pressure_info (bb
);
3253 setup_insn_max_reg_pressure (after
, false);
3256 /* If doing predication while scheduling, verify whether INSN, which
3257 has just been scheduled, clobbers the conditions of any
3258 instructions that must be predicated in order to break their
3259 dependencies. If so, remove them from the queues so that they will
3260 only be scheduled once their control dependency is resolved. */
3263 check_clobbered_conditions (rtx_insn
*insn
)
3268 if ((current_sched_info
->flags
& DO_PREDICATION
) == 0)
3271 find_all_hard_reg_sets (insn
, &t
, true);
3274 for (i
= 0; i
< ready
.n_ready
; i
++)
3276 rtx_insn
*x
= ready_element (&ready
, i
);
3277 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3279 ready_remove_insn (x
);
3283 for (i
= 0; i
<= max_insn_queue_index
; i
++)
3285 rtx_insn_list
*link
;
3286 int q
= NEXT_Q_AFTER (q_ptr
, i
);
3289 for (link
= insn_queue
[q
]; link
; link
= link
->next ())
3291 rtx_insn
*x
= link
->insn ();
3292 if (TODO_SPEC (x
) == DEP_CONTROL
&& cond_clobbered_p (x
, t
))
3301 /* Return (in order):
3303 - positive if INSN adversely affects the pressure on one
3306 - negative if INSN reduces the pressure on one register class
3308 - 0 if INSN doesn't affect the pressure on any register class. */
3311 model_classify_pressure (struct model_insn_info
*insn
)
3313 struct reg_pressure_data
*reg_pressure
;
3314 int death
[N_REG_CLASSES
];
3317 calculate_reg_deaths (insn
->insn
, death
);
3318 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3320 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3322 cl
= ira_pressure_classes
[pci
];
3323 if (death
[cl
] < reg_pressure
[pci
].set_increase
)
3325 sum
+= reg_pressure
[pci
].set_increase
- death
[cl
];
3330 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3333 model_order_p (struct model_insn_info
*insn1
, struct model_insn_info
*insn2
)
3335 unsigned int height1
, height2
;
3336 unsigned int priority1
, priority2
;
3338 /* Prefer instructions with a higher model priority. */
3339 if (insn1
->model_priority
!= insn2
->model_priority
)
3340 return insn1
->model_priority
> insn2
->model_priority
;
3342 /* Combine the length of the longest path of satisfied true dependencies
3343 that leads to each instruction (depth) with the length of the longest
3344 path of any dependencies that leads from the instruction (alap).
3345 Prefer instructions with the greatest combined length. If the combined
3346 lengths are equal, prefer instructions with the greatest depth.
3348 The idea is that, if we have a set S of "equal" instructions that each
3349 have ALAP value X, and we pick one such instruction I, any true-dependent
3350 successors of I that have ALAP value X - 1 should be preferred over S.
3351 This encourages the schedule to be "narrow" rather than "wide".
3352 However, if I is a low-priority instruction that we decided to
3353 schedule because of its model_classify_pressure, and if there
3354 is a set of higher-priority instructions T, the aforementioned
3355 successors of I should not have the edge over T. */
3356 height1
= insn1
->depth
+ insn1
->alap
;
3357 height2
= insn2
->depth
+ insn2
->alap
;
3358 if (height1
!= height2
)
3359 return height1
> height2
;
3360 if (insn1
->depth
!= insn2
->depth
)
3361 return insn1
->depth
> insn2
->depth
;
3363 /* We have no real preference between INSN1 an INSN2 as far as attempts
3364 to reduce pressure go. Prefer instructions with higher priorities. */
3365 priority1
= INSN_PRIORITY (insn1
->insn
);
3366 priority2
= INSN_PRIORITY (insn2
->insn
);
3367 if (priority1
!= priority2
)
3368 return priority1
> priority2
;
3370 /* Use the original rtl sequence as a tie-breaker. */
3371 return insn1
< insn2
;
3374 /* Add INSN to the model worklist immediately after PREV. Add it to the
3375 beginning of the list if PREV is null. */
3378 model_add_to_worklist_at (struct model_insn_info
*insn
,
3379 struct model_insn_info
*prev
)
3381 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_NOWHERE
);
3382 QUEUE_INDEX (insn
->insn
) = QUEUE_READY
;
3387 insn
->next
= prev
->next
;
3392 insn
->next
= model_worklist
;
3393 model_worklist
= insn
;
3396 insn
->next
->prev
= insn
;
3399 /* Remove INSN from the model worklist. */
3402 model_remove_from_worklist (struct model_insn_info
*insn
)
3404 gcc_assert (QUEUE_INDEX (insn
->insn
) == QUEUE_READY
);
3405 QUEUE_INDEX (insn
->insn
) = QUEUE_NOWHERE
;
3408 insn
->prev
->next
= insn
->next
;
3410 model_worklist
= insn
->next
;
3412 insn
->next
->prev
= insn
->prev
;
3415 /* Add INSN to the model worklist. Start looking for a suitable position
3416 between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
3417 insns either side. A null PREV indicates the beginning of the list and
3418 a null NEXT indicates the end. */
3421 model_add_to_worklist (struct model_insn_info
*insn
,
3422 struct model_insn_info
*prev
,
3423 struct model_insn_info
*next
)
3427 count
= param_max_sched_ready_insns
;
3428 if (count
> 0 && prev
&& model_order_p (insn
, prev
))
3434 while (count
> 0 && prev
&& model_order_p (insn
, prev
));
3436 while (count
> 0 && next
&& model_order_p (next
, insn
))
3442 model_add_to_worklist_at (insn
, prev
);
3445 /* INSN may now have a higher priority (in the model_order_p sense)
3446 than before. Move it up the worklist if necessary. */
3449 model_promote_insn (struct model_insn_info
*insn
)
3451 struct model_insn_info
*prev
;
3455 count
= param_max_sched_ready_insns
;
3456 while (count
> 0 && prev
&& model_order_p (insn
, prev
))
3461 if (prev
!= insn
->prev
)
3463 model_remove_from_worklist (insn
);
3464 model_add_to_worklist_at (insn
, prev
);
3468 /* Add INSN to the end of the model schedule. */
3471 model_add_to_schedule (rtx_insn
*insn
)
3475 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
3476 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
3478 point
= model_schedule
.length ();
3479 model_schedule
.quick_push (insn
);
3480 INSN_MODEL_INDEX (insn
) = point
+ 1;
3483 /* Analyze the instructions that are to be scheduled, setting up
3484 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3485 instructions to model_worklist. */
3488 model_analyze_insns (void)
3490 rtx_insn
*start
, *end
, *iter
;
3491 sd_iterator_def sd_it
;
3493 struct model_insn_info
*insn
, *con
;
3495 model_num_insns
= 0;
3496 start
= PREV_INSN (current_sched_info
->next_tail
);
3497 end
= current_sched_info
->prev_head
;
3498 for (iter
= start
; iter
!= end
; iter
= PREV_INSN (iter
))
3499 if (NONDEBUG_INSN_P (iter
))
3501 insn
= MODEL_INSN_INFO (iter
);
3503 FOR_EACH_DEP (iter
, SD_LIST_FORW
, sd_it
, dep
)
3505 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3506 if (con
->insn
&& insn
->alap
< con
->alap
+ 1)
3507 insn
->alap
= con
->alap
+ 1;
3510 insn
->old_queue
= QUEUE_INDEX (iter
);
3511 QUEUE_INDEX (iter
) = QUEUE_NOWHERE
;
3513 insn
->unscheduled_preds
= dep_list_size (iter
, SD_LIST_HARD_BACK
);
3514 if (insn
->unscheduled_preds
== 0)
3515 model_add_to_worklist (insn
, NULL
, model_worklist
);
3521 /* The global state describes the register pressure at the start of the
3522 model schedule. Initialize GROUP accordingly. */
3525 model_init_pressure_group (struct model_pressure_group
*group
)
3529 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3531 cl
= ira_pressure_classes
[pci
];
3532 group
->limits
[pci
].pressure
= curr_reg_pressure
[cl
];
3533 group
->limits
[pci
].point
= 0;
3535 /* Use index model_num_insns to record the state after the last
3536 instruction in the model schedule. */
3537 group
->model
= XNEWVEC (struct model_pressure_data
,
3538 (model_num_insns
+ 1) * ira_pressure_classes_num
);
3541 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3542 Update the maximum pressure for the whole schedule. */
3545 model_record_pressure (struct model_pressure_group
*group
,
3546 int point
, int pci
, int pressure
)
3548 MODEL_REF_PRESSURE (group
, point
, pci
) = pressure
;
3549 if (group
->limits
[pci
].pressure
< pressure
)
3551 group
->limits
[pci
].pressure
= pressure
;
3552 group
->limits
[pci
].point
= point
;
3556 /* INSN has just been added to the end of the model schedule. Record its
3557 register-pressure information. */
3560 model_record_pressures (struct model_insn_info
*insn
)
3562 struct reg_pressure_data
*reg_pressure
;
3563 int point
, pci
, cl
, delta
;
3564 int death
[N_REG_CLASSES
];
3566 point
= model_index (insn
->insn
);
3567 if (sched_verbose
>= 2)
3571 fprintf (sched_dump
, "\n;;\tModel schedule:\n;;\n");
3572 fprintf (sched_dump
, ";;\t| idx insn | mpri hght dpth prio |\n");
3574 fprintf (sched_dump
, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3575 point
, INSN_UID (insn
->insn
), insn
->model_priority
,
3576 insn
->depth
+ insn
->alap
, insn
->depth
,
3577 INSN_PRIORITY (insn
->insn
),
3578 str_pattern_slim (PATTERN (insn
->insn
)));
3580 calculate_reg_deaths (insn
->insn
, death
);
3581 reg_pressure
= INSN_REG_PRESSURE (insn
->insn
);
3582 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3584 cl
= ira_pressure_classes
[pci
];
3585 delta
= reg_pressure
[pci
].set_increase
- death
[cl
];
3586 if (sched_verbose
>= 2)
3587 fprintf (sched_dump
, " %s:[%d,%+d]", reg_class_names
[cl
],
3588 curr_reg_pressure
[cl
], delta
);
3589 model_record_pressure (&model_before_pressure
, point
, pci
,
3590 curr_reg_pressure
[cl
]);
3592 if (sched_verbose
>= 2)
3593 fprintf (sched_dump
, "\n");
3596 /* All instructions have been added to the model schedule. Record the
3597 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3600 model_record_final_pressures (struct model_pressure_group
*group
)
3602 int point
, pci
, max_pressure
, ref_pressure
, cl
;
3604 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3606 /* Record the final pressure for this class. */
3607 cl
= ira_pressure_classes
[pci
];
3608 point
= model_num_insns
;
3609 ref_pressure
= curr_reg_pressure
[cl
];
3610 model_record_pressure (group
, point
, pci
, ref_pressure
);
3612 /* Record the original maximum pressure. */
3613 group
->limits
[pci
].orig_pressure
= group
->limits
[pci
].pressure
;
3615 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3616 max_pressure
= ref_pressure
;
3617 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3621 ref_pressure
= MODEL_REF_PRESSURE (group
, point
, pci
);
3622 max_pressure
= MAX (max_pressure
, ref_pressure
);
3623 MODEL_MAX_PRESSURE (group
, point
, pci
) = max_pressure
;
3628 /* Update all successors of INSN, given that INSN has just been scheduled. */
3631 model_add_successors_to_worklist (struct model_insn_info
*insn
)
3633 sd_iterator_def sd_it
;
3634 struct model_insn_info
*con
;
3637 FOR_EACH_DEP (insn
->insn
, SD_LIST_FORW
, sd_it
, dep
)
3639 con
= MODEL_INSN_INFO (DEP_CON (dep
));
3640 /* Ignore debug instructions, and instructions from other blocks. */
3643 con
->unscheduled_preds
--;
3645 /* Update the depth field of each true-dependent successor.
3646 Increasing the depth gives them a higher priority than
3648 if (DEP_TYPE (dep
) == REG_DEP_TRUE
&& con
->depth
< insn
->depth
+ 1)
3650 con
->depth
= insn
->depth
+ 1;
3651 if (QUEUE_INDEX (con
->insn
) == QUEUE_READY
)
3652 model_promote_insn (con
);
3655 /* If this is a true dependency, or if there are no remaining
3656 dependencies for CON (meaning that CON only had non-true
3657 dependencies), make sure that CON is on the worklist.
3658 We don't bother otherwise because it would tend to fill the
3659 worklist with a lot of low-priority instructions that are not
3660 yet ready to issue. */
3661 if ((con
->depth
> 0 || con
->unscheduled_preds
== 0)
3662 && QUEUE_INDEX (con
->insn
) == QUEUE_NOWHERE
)
3663 model_add_to_worklist (con
, insn
, insn
->next
);
3668 /* Give INSN a higher priority than any current instruction, then give
3669 unscheduled predecessors of INSN a higher priority still. If any of
3670 those predecessors are not on the model worklist, do the same for its
3671 predecessors, and so on. */
3674 model_promote_predecessors (struct model_insn_info
*insn
)
3676 struct model_insn_info
*pro
, *first
;
3677 sd_iterator_def sd_it
;
3680 if (sched_verbose
>= 7)
3681 fprintf (sched_dump
, ";;\t+--- priority of %d = %d, priority of",
3682 INSN_UID (insn
->insn
), model_next_priority
);
3683 insn
->model_priority
= model_next_priority
++;
3684 model_remove_from_worklist (insn
);
3685 model_add_to_worklist_at (insn
, NULL
);
3690 FOR_EACH_DEP (insn
->insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
3692 pro
= MODEL_INSN_INFO (DEP_PRO (dep
));
3693 /* The first test is to ignore debug instructions, and instructions
3694 from other blocks. */
3696 && pro
->model_priority
!= model_next_priority
3697 && QUEUE_INDEX (pro
->insn
) != QUEUE_SCHEDULED
)
3699 pro
->model_priority
= model_next_priority
;
3700 if (sched_verbose
>= 7)
3701 fprintf (sched_dump
, " %d", INSN_UID (pro
->insn
));
3702 if (QUEUE_INDEX (pro
->insn
) == QUEUE_READY
)
3704 /* PRO is already in the worklist, but it now has
3705 a higher priority than before. Move it at the
3706 appropriate place. */
3707 model_remove_from_worklist (pro
);
3708 model_add_to_worklist (pro
, NULL
, model_worklist
);
3712 /* PRO isn't in the worklist. Recursively process
3713 its predecessors until we find one that is. */
3724 if (sched_verbose
>= 7)
3725 fprintf (sched_dump
, " = %d\n", model_next_priority
);
3726 model_next_priority
++;
3729 /* Pick one instruction from model_worklist and process it. */
3732 model_choose_insn (void)
3734 struct model_insn_info
*insn
, *fallback
;
3737 if (sched_verbose
>= 7)
3739 fprintf (sched_dump
, ";;\t+--- worklist:\n");
3740 insn
= model_worklist
;
3741 count
= param_max_sched_ready_insns
;
3742 while (count
> 0 && insn
)
3744 fprintf (sched_dump
, ";;\t+--- %d [%d, %d, %d, %d]\n",
3745 INSN_UID (insn
->insn
), insn
->model_priority
,
3746 insn
->depth
+ insn
->alap
, insn
->depth
,
3747 INSN_PRIORITY (insn
->insn
));
3753 /* Look for a ready instruction whose model_classify_priority is zero
3754 or negative, picking the highest-priority one. Adding such an
3755 instruction to the schedule now should do no harm, and may actually
3758 Failing that, see whether there is an instruction with the highest
3759 extant model_priority that is not yet ready, but which would reduce
3760 pressure if it became ready. This is designed to catch cases like:
3762 (set (mem (reg R1)) (reg R2))
3764 where the instruction is the last remaining use of R1 and where the
3765 value of R2 is not yet available (or vice versa). The death of R1
3766 means that this instruction already reduces pressure. It is of
3767 course possible that the computation of R2 involves other registers
3768 that are hard to kill, but such cases are rare enough for this
3769 heuristic to be a win in general.
3771 Failing that, just pick the highest-priority instruction in the
3773 count
= param_max_sched_ready_insns
;
3774 insn
= model_worklist
;
3778 if (count
== 0 || !insn
)
3780 insn
= fallback
? fallback
: model_worklist
;
3783 if (insn
->unscheduled_preds
)
3785 if (model_worklist
->model_priority
== insn
->model_priority
3787 && model_classify_pressure (insn
) < 0)
3792 if (model_classify_pressure (insn
) <= 0)
3799 if (sched_verbose
>= 7 && insn
!= model_worklist
)
3801 if (insn
->unscheduled_preds
)
3802 fprintf (sched_dump
, ";;\t+--- promoting insn %d, with dependencies\n",
3803 INSN_UID (insn
->insn
));
3805 fprintf (sched_dump
, ";;\t+--- promoting insn %d, which is ready\n",
3806 INSN_UID (insn
->insn
));
3808 if (insn
->unscheduled_preds
)
3809 /* INSN isn't yet ready to issue. Give all its predecessors the
3810 highest priority. */
3811 model_promote_predecessors (insn
);
3814 /* INSN is ready. Add it to the end of model_schedule and
3815 process its successors. */
3816 model_add_successors_to_worklist (insn
);
3817 model_remove_from_worklist (insn
);
3818 model_add_to_schedule (insn
->insn
);
3819 model_record_pressures (insn
);
3820 update_register_pressure (insn
->insn
);
3824 /* Restore all QUEUE_INDEXs to the values that they had before
3825 model_start_schedule was called. */
3828 model_reset_queue_indices (void)
3833 FOR_EACH_VEC_ELT (model_schedule
, i
, insn
)
3834 QUEUE_INDEX (insn
) = MODEL_INSN_INFO (insn
)->old_queue
;
3837 /* We have calculated the model schedule and spill costs. Print a summary
3841 model_dump_pressure_summary (void)
3845 fprintf (sched_dump
, ";; Pressure summary:");
3846 for (pci
= 0; pci
< ira_pressure_classes_num
; pci
++)
3848 cl
= ira_pressure_classes
[pci
];
3849 fprintf (sched_dump
, " %s:%d", reg_class_names
[cl
],
3850 model_before_pressure
.limits
[pci
].pressure
);
3852 fprintf (sched_dump
, "\n\n");
3855 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3856 scheduling region. */
3859 model_start_schedule (basic_block bb
)
3861 model_next_priority
= 1;
3862 model_schedule
.create (sched_max_luid
);
3863 model_insns
= XCNEWVEC (struct model_insn_info
, sched_max_luid
);
3865 gcc_assert (bb
== BLOCK_FOR_INSN (NEXT_INSN (current_sched_info
->prev_head
)));
3866 initiate_reg_pressure_info (df_get_live_in (bb
));
3868 model_analyze_insns ();
3869 model_init_pressure_group (&model_before_pressure
);
3870 while (model_worklist
)
3871 model_choose_insn ();
3872 gcc_assert (model_num_insns
== (int) model_schedule
.length ());
3873 if (sched_verbose
>= 2)
3874 fprintf (sched_dump
, "\n");
3876 model_record_final_pressures (&model_before_pressure
);
3877 model_reset_queue_indices ();
3879 XDELETEVEC (model_insns
);
3881 model_curr_point
= 0;
3882 initiate_reg_pressure_info (df_get_live_in (bb
));
3883 if (sched_verbose
>= 1)
3884 model_dump_pressure_summary ();
3887 /* Free the information associated with GROUP. */
3890 model_finalize_pressure_group (struct model_pressure_group
*group
)
3892 XDELETEVEC (group
->model
);
3895 /* Free the information created by model_start_schedule. */
3898 model_end_schedule (void)
3900 model_finalize_pressure_group (&model_before_pressure
);
3901 model_schedule
.release ();
3904 /* Prepare reg pressure scheduling for basic block BB. */
3906 sched_pressure_start_bb (basic_block bb
)
3908 /* Set the number of available registers for each class taking into account
3909 relative probability of current basic block versus function prologue and
3911 * If the basic block executes much more often than the prologue/epilogue
3912 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3913 nil, so the effective number of available registers is
3914 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl] - 0).
3915 * If the basic block executes as often as the prologue/epilogue,
3916 then spill in the block is as costly as in the prologue, so the effective
3917 number of available registers is
3918 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3919 - call_saved_regs_num[cl]).
3920 Note that all-else-equal, we prefer to spill in the prologue, since that
3921 allows "extra" registers for other basic blocks of the function.
3922 * If the basic block is on the cold path of the function and executes
3923 rarely, then we should always prefer to spill in the block, rather than
3924 in the prologue/epilogue. The effective number of available register is
3925 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3926 - call_saved_regs_num[cl]). */
3929 int entry_freq
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->count
.to_frequency (cfun
);
3930 int bb_freq
= bb
->count
.to_frequency (cfun
);
3934 if (entry_freq
== 0)
3935 entry_freq
= bb_freq
= 1;
3937 if (bb_freq
< entry_freq
)
3938 bb_freq
= entry_freq
;
3940 for (i
= 0; i
< ira_pressure_classes_num
; ++i
)
3942 enum reg_class cl
= ira_pressure_classes
[i
];
3943 sched_class_regs_num
[cl
] = ira_class_hard_regs_num
[cl
]
3944 - fixed_regs_num
[cl
];
3945 sched_class_regs_num
[cl
]
3946 -= (call_saved_regs_num
[cl
] * entry_freq
) / bb_freq
;
3950 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
3951 model_start_schedule (bb
);
3954 /* A structure that holds local state for the loop in schedule_block. */
3955 struct sched_block_state
3957 /* True if no real insns have been scheduled in the current cycle. */
3958 bool first_cycle_insn_p
;
3959 /* True if a shadow insn has been scheduled in the current cycle, which
3960 means that no more normal insns can be issued. */
3961 bool shadows_only_p
;
3962 /* True if we're winding down a modulo schedule, which means that we only
3963 issue insns with INSN_EXACT_TICK set. */
3964 bool modulo_epilogue
;
3965 /* Initialized with the machine's issue rate every cycle, and updated
3966 by calls to the variable_issue hook. */
3970 /* INSN is the "currently executing insn". Launch each insn which was
3971 waiting on INSN. READY is the ready list which contains the insns
3972 that are ready to fire. CLOCK is the current cycle. The function
3973 returns necessary cycle advance after issuing the insn (it is not
3974 zero for insns in a schedule group). */
3977 schedule_insn (rtx_insn
*insn
)
3979 sd_iterator_def sd_it
;
3984 if (sched_verbose
>= 1)
3986 struct reg_pressure_data
*pressure_info
;
3987 fprintf (sched_dump
, ";;\t%3i--> %s %-40s:",
3988 clock_var
, (*current_sched_info
->print_insn
) (insn
, 1),
3989 str_pattern_slim (PATTERN (insn
)));
3991 if (recog_memoized (insn
) < 0)
3992 fprintf (sched_dump
, "nothing");
3994 print_reservation (sched_dump
, insn
);
3995 pressure_info
= INSN_REG_PRESSURE (insn
);
3996 if (pressure_info
!= NULL
)
3998 fputc (':', sched_dump
);
3999 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
4000 fprintf (sched_dump
, "%s%s%+d(%d)",
4001 scheduled_insns
.length () > 1
4003 < INSN_LUID (scheduled_insns
[scheduled_insns
.length () - 2]) ? "@" : "",
4004 reg_class_names
[ira_pressure_classes
[i
]],
4005 pressure_info
[i
].set_increase
, pressure_info
[i
].change
);
4007 if (sched_pressure
== SCHED_PRESSURE_MODEL
4008 && model_curr_point
< model_num_insns
4009 && model_index (insn
) == model_curr_point
)
4010 fprintf (sched_dump
, ":model %d", model_curr_point
);
4011 fputc ('\n', sched_dump
);
4014 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
&& !DEBUG_INSN_P (insn
))
4015 update_reg_and_insn_max_reg_pressure (insn
);
4017 /* Scheduling instruction should have all its dependencies resolved and
4018 should have been removed from the ready list. */
4019 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_HARD_BACK
));
4021 /* Reset debug insns invalidated by moving this insn. */
4022 if (MAY_HAVE_DEBUG_BIND_INSNS
&& !DEBUG_INSN_P (insn
))
4023 for (sd_it
= sd_iterator_start (insn
, SD_LIST_BACK
);
4024 sd_iterator_cond (&sd_it
, &dep
);)
4026 rtx_insn
*dbg
= DEP_PRO (dep
);
4027 struct reg_use_data
*use
, *next
;
4029 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
4031 sd_iterator_next (&sd_it
);
4035 gcc_assert (DEBUG_BIND_INSN_P (dbg
));
4037 if (sched_verbose
>= 6)
4038 fprintf (sched_dump
, ";;\t\tresetting: debug insn %d\n",
4041 /* ??? Rather than resetting the debug insn, we might be able
4042 to emit a debug temp before the just-scheduled insn, but
4043 this would involve checking that the expression at the
4044 point of the debug insn is equivalent to the expression
4045 before the just-scheduled insn. They might not be: the
4046 expression in the debug insn may depend on other insns not
4047 yet scheduled that set MEMs, REGs or even other debug
4048 insns. It's not clear that attempting to preserve debug
4049 information in these cases is worth the effort, given how
4050 uncommon these resets are and the likelihood that the debug
4051 temps introduced won't survive the schedule change. */
4052 INSN_VAR_LOCATION_LOC (dbg
) = gen_rtx_UNKNOWN_VAR_LOC ();
4053 df_insn_rescan (dbg
);
4055 /* Unknown location doesn't use any registers. */
4056 for (use
= INSN_REG_USE_LIST (dbg
); use
!= NULL
; use
= next
)
4058 struct reg_use_data
*prev
= use
;
4060 /* Remove use from the cyclic next_regno_use chain first. */
4061 while (prev
->next_regno_use
!= use
)
4062 prev
= prev
->next_regno_use
;
4063 prev
->next_regno_use
= use
->next_regno_use
;
4064 next
= use
->next_insn_use
;
4067 INSN_REG_USE_LIST (dbg
) = NULL
;
4069 /* We delete rather than resolve these deps, otherwise we
4070 crash in sched_free_deps(), because forward deps are
4071 expected to be released before backward deps. */
4072 sd_delete_dep (sd_it
);
4075 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
4076 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
4078 if (sched_pressure
== SCHED_PRESSURE_MODEL
4079 && model_curr_point
< model_num_insns
4080 && NONDEBUG_INSN_P (insn
))
4082 if (model_index (insn
) == model_curr_point
)
4085 while (model_curr_point
< model_num_insns
4086 && (QUEUE_INDEX (MODEL_INSN (model_curr_point
))
4087 == QUEUE_SCHEDULED
));
4089 model_recompute (insn
);
4090 model_update_limit_points ();
4091 update_register_pressure (insn
);
4092 if (sched_verbose
>= 2)
4093 print_curr_reg_pressure ();
4096 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
4097 if (INSN_TICK (insn
) > clock_var
)
4098 /* INSN has been prematurely moved from the queue to the ready list.
4099 This is possible only if following flags are set. */
4100 gcc_assert (flag_sched_stalled_insns
|| sched_fusion
);
4102 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4103 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4104 INSN_TICK (insn
) = clock_var
;
4106 check_clobbered_conditions (insn
);
4108 /* Update dependent instructions. First, see if by scheduling this insn
4109 now we broke a dependence in a way that requires us to change another
4111 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
4112 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
4114 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4115 rtx_insn
*pro
= DEP_PRO (dep
);
4116 if (QUEUE_INDEX (pro
) != QUEUE_SCHEDULED
4117 && desc
!= NULL
&& desc
->insn
== pro
)
4118 apply_replacement (dep
, false);
4121 /* Go through and resolve forward dependencies. */
4122 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4123 sd_iterator_cond (&sd_it
, &dep
);)
4125 rtx_insn
*next
= DEP_CON (dep
);
4126 bool cancelled
= (DEP_STATUS (dep
) & DEP_CANCELLED
) != 0;
4128 /* Resolve the dependence between INSN and NEXT.
4129 sd_resolve_dep () moves current dep to another list thus
4130 advancing the iterator. */
4131 sd_resolve_dep (sd_it
);
4135 if (must_restore_pattern_p (next
, dep
))
4136 restore_pattern (dep
, false);
4140 /* Don't bother trying to mark next as ready if insn is a debug
4141 insn. If insn is the last hard dependency, it will have
4142 already been discounted. */
4143 if (DEBUG_INSN_P (insn
) && !DEBUG_INSN_P (next
))
4146 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
4150 effective_cost
= try_ready (next
);
4152 if (effective_cost
>= 0
4153 && SCHED_GROUP_P (next
)
4154 && advance
< effective_cost
)
4155 advance
= effective_cost
;
4158 /* Check always has only one forward dependence (to the first insn in
4159 the recovery block), therefore, this will be executed only once. */
4161 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
4162 fix_recovery_deps (RECOVERY_BLOCK (insn
));
4166 /* Annotate the instruction with issue information -- TImode
4167 indicates that the instruction is expected not to be able
4168 to issue on the same cycle as the previous insn. A machine
4169 may use this information to decide how the instruction should
4172 && GET_CODE (PATTERN (insn
)) != USE
4173 && GET_CODE (PATTERN (insn
)) != CLOBBER
4174 && !DEBUG_INSN_P (insn
))
4176 if (reload_completed
)
4177 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
4178 last_clock_var
= clock_var
;
4181 if (nonscheduled_insns_begin
!= NULL_RTX
)
4182 /* Indicate to debug counters that INSN is scheduled. */
4183 nonscheduled_insns_begin
= insn
;
4188 /* Functions for handling of notes. */
4190 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4192 concat_note_lists (rtx_insn
*from_end
, rtx_insn
**to_endp
)
4194 rtx_insn
*from_start
;
4196 /* It's easy when have nothing to concat. */
4197 if (from_end
== NULL
)
4200 /* It's also easy when destination is empty. */
4201 if (*to_endp
== NULL
)
4203 *to_endp
= from_end
;
4207 from_start
= from_end
;
4208 while (PREV_INSN (from_start
) != NULL
)
4209 from_start
= PREV_INSN (from_start
);
4211 SET_PREV_INSN (from_start
) = *to_endp
;
4212 SET_NEXT_INSN (*to_endp
) = from_start
;
4213 *to_endp
= from_end
;
4216 /* Delete notes between HEAD and TAIL and put them in the chain
4217 of notes ended by NOTE_LIST. */
4219 remove_notes (rtx_insn
*head
, rtx_insn
*tail
)
4221 rtx_insn
*next_tail
, *insn
, *next
;
4224 if (head
== tail
&& !INSN_P (head
))
4227 next_tail
= NEXT_INSN (tail
);
4228 for (insn
= head
; insn
!= next_tail
; insn
= next
)
4230 next
= NEXT_INSN (insn
);
4234 switch (NOTE_KIND (insn
))
4236 case NOTE_INSN_BASIC_BLOCK
:
4239 case NOTE_INSN_EPILOGUE_BEG
:
4243 add_reg_note (next
, REG_SAVE_NOTE
,
4244 GEN_INT (NOTE_INSN_EPILOGUE_BEG
));
4252 /* Add the note to list that ends at NOTE_LIST. */
4253 SET_PREV_INSN (insn
) = note_list
;
4254 SET_NEXT_INSN (insn
) = NULL_RTX
;
4256 SET_NEXT_INSN (note_list
) = insn
;
4261 gcc_assert ((sel_sched_p () || insn
!= tail
) && insn
!= head
);
4265 /* A structure to record enough data to allow us to backtrack the scheduler to
4266 a previous state. */
4267 struct haifa_saved_data
4269 /* Next entry on the list. */
4270 struct haifa_saved_data
*next
;
4272 /* Backtracking is associated with scheduling insns that have delay slots.
4273 DELAY_PAIR points to the structure that contains the insns involved, and
4274 the number of cycles between them. */
4275 struct delay_pair
*delay_pair
;
4277 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4278 void *fe_saved_data
;
4279 /* Data used by the backend. */
4280 void *be_saved_data
;
4282 /* Copies of global state. */
4283 int clock_var
, last_clock_var
;
4284 struct ready_list ready
;
4287 rtx_insn
*last_scheduled_insn
;
4288 rtx_insn
*last_nondebug_scheduled_insn
;
4289 rtx_insn
*nonscheduled_insns_begin
;
4290 int cycle_issued_insns
;
4292 /* Copies of state used in the inner loop of schedule_block. */
4293 struct sched_block_state sched_block
;
4295 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4296 to 0 when restoring. */
4298 rtx_insn_list
**insn_queue
;
4300 /* Describe pattern replacements that occurred since this backtrack point
4302 vec
<dep_t
> replacement_deps
;
4303 vec
<int> replace_apply
;
4305 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4307 vec
<dep_t
> next_cycle_deps
;
4308 vec
<int> next_cycle_apply
;
4311 /* A record, in reverse order, of all scheduled insns which have delay slots
4312 and may require backtracking. */
4313 static struct haifa_saved_data
*backtrack_queue
;
4315 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4318 mark_backtrack_feeds (rtx_insn
*insn
, int set_p
)
4320 sd_iterator_def sd_it
;
4322 FOR_EACH_DEP (insn
, SD_LIST_HARD_BACK
, sd_it
, dep
)
4324 FEEDS_BACKTRACK_INSN (DEP_PRO (dep
)) = set_p
;
4328 /* Save the current scheduler state so that we can backtrack to it
4329 later if necessary. PAIR gives the insns that make it necessary to
4330 save this point. SCHED_BLOCK is the local state of schedule_block
4331 that need to be saved. */
4333 save_backtrack_point (struct delay_pair
*pair
,
4334 struct sched_block_state sched_block
)
4337 struct haifa_saved_data
*save
= XNEW (struct haifa_saved_data
);
4339 save
->curr_state
= xmalloc (dfa_state_size
);
4340 memcpy (save
->curr_state
, curr_state
, dfa_state_size
);
4342 save
->ready
.first
= ready
.first
;
4343 save
->ready
.n_ready
= ready
.n_ready
;
4344 save
->ready
.n_debug
= ready
.n_debug
;
4345 save
->ready
.veclen
= ready
.veclen
;
4346 save
->ready
.vec
= XNEWVEC (rtx_insn
*, ready
.veclen
);
4347 memcpy (save
->ready
.vec
, ready
.vec
, ready
.veclen
* sizeof (rtx
));
4349 save
->insn_queue
= XNEWVEC (rtx_insn_list
*, max_insn_queue_index
+ 1);
4350 save
->q_size
= q_size
;
4351 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4353 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4354 save
->insn_queue
[i
] = copy_INSN_LIST (insn_queue
[q
]);
4357 save
->clock_var
= clock_var
;
4358 save
->last_clock_var
= last_clock_var
;
4359 save
->cycle_issued_insns
= cycle_issued_insns
;
4360 save
->last_scheduled_insn
= last_scheduled_insn
;
4361 save
->last_nondebug_scheduled_insn
= last_nondebug_scheduled_insn
;
4362 save
->nonscheduled_insns_begin
= nonscheduled_insns_begin
;
4364 save
->sched_block
= sched_block
;
4366 save
->replacement_deps
.create (0);
4367 save
->replace_apply
.create (0);
4368 save
->next_cycle_deps
= next_cycle_replace_deps
.copy ();
4369 save
->next_cycle_apply
= next_cycle_apply
.copy ();
4371 if (current_sched_info
->save_state
)
4372 save
->fe_saved_data
= (*current_sched_info
->save_state
) ();
4374 if (targetm
.sched
.alloc_sched_context
)
4376 save
->be_saved_data
= targetm
.sched
.alloc_sched_context ();
4377 targetm
.sched
.init_sched_context (save
->be_saved_data
, false);
4380 save
->be_saved_data
= NULL
;
4382 save
->delay_pair
= pair
;
4384 save
->next
= backtrack_queue
;
4385 backtrack_queue
= save
;
4389 mark_backtrack_feeds (pair
->i2
, 1);
4390 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4391 INSN_EXACT_TICK (pair
->i2
) = clock_var
+ pair_delay (pair
);
4392 SHADOW_P (pair
->i2
) = pair
->stages
== 0;
4393 pair
= pair
->next_same_i1
;
4397 /* Walk the ready list and all queues. If any insns have unresolved backwards
4398 dependencies, these must be cancelled deps, broken by predication. Set or
4399 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4402 toggle_cancelled_flags (bool set
)
4405 sd_iterator_def sd_it
;
4408 if (ready
.n_ready
> 0)
4410 rtx_insn
**first
= ready_lastpos (&ready
);
4411 for (i
= 0; i
< ready
.n_ready
; i
++)
4412 FOR_EACH_DEP (first
[i
], SD_LIST_BACK
, sd_it
, dep
)
4413 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4416 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4418 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4421 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4423 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4424 rtx_insn_list
*link
;
4425 for (link
= insn_queue
[q
]; link
; link
= link
->next ())
4427 rtx_insn
*insn
= link
->insn ();
4428 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4429 if (!DEBUG_INSN_P (DEP_PRO (dep
)))
4432 DEP_STATUS (dep
) |= DEP_CANCELLED
;
4434 DEP_STATUS (dep
) &= ~DEP_CANCELLED
;
4440 /* Undo the replacements that have occurred after backtrack point SAVE
4443 undo_replacements_for_backtrack (struct haifa_saved_data
*save
)
4445 while (!save
->replacement_deps
.is_empty ())
4447 dep_t dep
= save
->replacement_deps
.pop ();
4448 int apply_p
= save
->replace_apply
.pop ();
4451 restore_pattern (dep
, true);
4453 apply_replacement (dep
, true);
4455 save
->replacement_deps
.release ();
4456 save
->replace_apply
.release ();
4459 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4460 Restore their dependencies to an unresolved state, and mark them as
4464 unschedule_insns_until (rtx_insn
*insn
)
4466 auto_vec
<rtx_insn
*> recompute_vec
;
4468 /* Make two passes over the insns to be unscheduled. First, we clear out
4469 dependencies and other trivial bookkeeping. */
4473 sd_iterator_def sd_it
;
4476 last
= scheduled_insns
.pop ();
4478 /* This will be changed by restore_backtrack_point if the insn is in
4480 QUEUE_INDEX (last
) = QUEUE_NOWHERE
;
4482 INSN_TICK (last
) = INVALID_TICK
;
4484 if (modulo_ii
> 0 && INSN_UID (last
) < modulo_iter0_max_uid
)
4485 modulo_insns_scheduled
--;
4487 for (sd_it
= sd_iterator_start (last
, SD_LIST_RES_FORW
);
4488 sd_iterator_cond (&sd_it
, &dep
);)
4490 rtx_insn
*con
= DEP_CON (dep
);
4491 sd_unresolve_dep (sd_it
);
4492 if (!MUST_RECOMPUTE_SPEC_P (con
))
4494 MUST_RECOMPUTE_SPEC_P (con
) = 1;
4495 recompute_vec
.safe_push (con
);
4503 /* A second pass, to update ready and speculation status for insns
4504 depending on the unscheduled ones. The first pass must have
4505 popped the scheduled_insns vector up to the point where we
4506 restart scheduling, as recompute_todo_spec requires it to be
4508 while (!recompute_vec
.is_empty ())
4512 con
= recompute_vec
.pop ();
4513 MUST_RECOMPUTE_SPEC_P (con
) = 0;
4514 if (!sd_lists_empty_p (con
, SD_LIST_HARD_BACK
))
4516 TODO_SPEC (con
) = HARD_DEP
;
4517 INSN_TICK (con
) = INVALID_TICK
;
4518 if (PREDICATED_PAT (con
) != NULL_RTX
)
4519 haifa_change_pattern (con
, ORIG_PAT (con
));
4521 else if (QUEUE_INDEX (con
) != QUEUE_SCHEDULED
)
4522 TODO_SPEC (con
) = recompute_todo_spec (con
, true);
4526 /* Restore scheduler state from the topmost entry on the backtracking queue.
4527 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4528 overwrite with the saved data.
4529 The caller must already have called unschedule_insns_until. */
4532 restore_last_backtrack_point (struct sched_block_state
*psched_block
)
4535 struct haifa_saved_data
*save
= backtrack_queue
;
4537 backtrack_queue
= save
->next
;
4539 if (current_sched_info
->restore_state
)
4540 (*current_sched_info
->restore_state
) (save
->fe_saved_data
);
4542 if (targetm
.sched
.alloc_sched_context
)
4544 targetm
.sched
.set_sched_context (save
->be_saved_data
);
4545 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4548 /* Do this first since it clobbers INSN_TICK of the involved
4550 undo_replacements_for_backtrack (save
);
4552 /* Clear the QUEUE_INDEX of everything in the ready list or one
4554 if (ready
.n_ready
> 0)
4556 rtx_insn
**first
= ready_lastpos (&ready
);
4557 for (i
= 0; i
< ready
.n_ready
; i
++)
4559 rtx_insn
*insn
= first
[i
];
4560 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
4561 INSN_TICK (insn
) = INVALID_TICK
;
4564 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4566 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4568 for (rtx_insn_list
*link
= insn_queue
[q
]; link
; link
= link
->next ())
4570 rtx_insn
*x
= link
->insn ();
4571 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
4572 INSN_TICK (x
) = INVALID_TICK
;
4574 free_INSN_LIST_list (&insn_queue
[q
]);
4578 ready
= save
->ready
;
4580 if (ready
.n_ready
> 0)
4582 rtx_insn
**first
= ready_lastpos (&ready
);
4583 for (i
= 0; i
< ready
.n_ready
; i
++)
4585 rtx_insn
*insn
= first
[i
];
4586 QUEUE_INDEX (insn
) = QUEUE_READY
;
4587 TODO_SPEC (insn
) = recompute_todo_spec (insn
, true);
4588 INSN_TICK (insn
) = save
->clock_var
;
4593 q_size
= save
->q_size
;
4594 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4596 int q
= NEXT_Q_AFTER (q_ptr
, i
);
4598 insn_queue
[q
] = save
->insn_queue
[q
];
4600 for (rtx_insn_list
*link
= insn_queue
[q
]; link
; link
= link
->next ())
4602 rtx_insn
*x
= link
->insn ();
4603 QUEUE_INDEX (x
) = i
;
4604 TODO_SPEC (x
) = recompute_todo_spec (x
, true);
4605 INSN_TICK (x
) = save
->clock_var
+ i
;
4608 free (save
->insn_queue
);
4610 toggle_cancelled_flags (true);
4612 clock_var
= save
->clock_var
;
4613 last_clock_var
= save
->last_clock_var
;
4614 cycle_issued_insns
= save
->cycle_issued_insns
;
4615 last_scheduled_insn
= save
->last_scheduled_insn
;
4616 last_nondebug_scheduled_insn
= save
->last_nondebug_scheduled_insn
;
4617 nonscheduled_insns_begin
= save
->nonscheduled_insns_begin
;
4619 *psched_block
= save
->sched_block
;
4621 memcpy (curr_state
, save
->curr_state
, dfa_state_size
);
4622 free (save
->curr_state
);
4624 mark_backtrack_feeds (save
->delay_pair
->i2
, 0);
4626 gcc_assert (next_cycle_replace_deps
.is_empty ());
4627 next_cycle_replace_deps
= save
->next_cycle_deps
.copy ();
4628 next_cycle_apply
= save
->next_cycle_apply
.copy ();
4632 for (save
= backtrack_queue
; save
; save
= save
->next
)
4634 mark_backtrack_feeds (save
->delay_pair
->i2
, 1);
4638 /* Discard all data associated with the topmost entry in the backtrack
4639 queue. If RESET_TICK is false, we just want to free the data. If true,
4640 we are doing this because we discovered a reason to backtrack. In the
4641 latter case, also reset the INSN_TICK for the shadow insn. */
4643 free_topmost_backtrack_point (bool reset_tick
)
4645 struct haifa_saved_data
*save
= backtrack_queue
;
4648 backtrack_queue
= save
->next
;
4652 struct delay_pair
*pair
= save
->delay_pair
;
4655 INSN_TICK (pair
->i2
) = INVALID_TICK
;
4656 INSN_EXACT_TICK (pair
->i2
) = INVALID_TICK
;
4657 pair
= pair
->next_same_i1
;
4659 undo_replacements_for_backtrack (save
);
4663 save
->replacement_deps
.release ();
4664 save
->replace_apply
.release ();
4667 if (targetm
.sched
.free_sched_context
)
4668 targetm
.sched
.free_sched_context (save
->be_saved_data
);
4669 if (current_sched_info
->restore_state
)
4670 free (save
->fe_saved_data
);
4671 for (i
= 0; i
<= max_insn_queue_index
; i
++)
4672 free_INSN_LIST_list (&save
->insn_queue
[i
]);
4673 free (save
->insn_queue
);
4674 free (save
->curr_state
);
4675 free (save
->ready
.vec
);
4679 /* Free the entire backtrack queue. */
4681 free_backtrack_queue (void)
4683 while (backtrack_queue
)
4684 free_topmost_backtrack_point (false);
4687 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4688 may have to postpone the replacement until the start of the next cycle,
4689 at which point we will be called again with IMMEDIATELY true. This is
4690 only done for machines which have instruction packets with explicit
4691 parallelism however. */
4693 apply_replacement (dep_t dep
, bool immediately
)
4695 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4696 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4698 next_cycle_replace_deps
.safe_push (dep
);
4699 next_cycle_apply
.safe_push (1);
4705 if (QUEUE_INDEX (desc
->insn
) == QUEUE_SCHEDULED
)
4708 if (sched_verbose
>= 5)
4709 fprintf (sched_dump
, "applying replacement for insn %d\n",
4710 INSN_UID (desc
->insn
));
4712 success
= validate_change (desc
->insn
, desc
->loc
, desc
->newval
, 0);
4713 gcc_assert (success
);
4715 rtx_insn
*insn
= DEP_PRO (dep
);
4717 /* Recompute priority since dependent priorities may have changed. */
4718 priority (insn
, true);
4719 update_insn_after_change (desc
->insn
);
4721 if ((TODO_SPEC (desc
->insn
) & (HARD_DEP
| DEP_POSTPONED
)) == 0)
4722 fix_tick_ready (desc
->insn
);
4724 if (backtrack_queue
!= NULL
)
4726 backtrack_queue
->replacement_deps
.safe_push (dep
);
4727 backtrack_queue
->replace_apply
.safe_push (1);
4732 /* We have determined that a pattern involved in DEP must be restored.
4733 If IMMEDIATELY is false, we may have to postpone the replacement
4734 until the start of the next cycle, at which point we will be called
4735 again with IMMEDIATELY true. */
4737 restore_pattern (dep_t dep
, bool immediately
)
4739 rtx_insn
*next
= DEP_CON (dep
);
4740 int tick
= INSN_TICK (next
);
4742 /* If we already scheduled the insn, the modified version is
4744 if (QUEUE_INDEX (next
) == QUEUE_SCHEDULED
)
4747 if (!immediately
&& targetm
.sched
.exposed_pipeline
&& reload_completed
)
4749 next_cycle_replace_deps
.safe_push (dep
);
4750 next_cycle_apply
.safe_push (0);
4755 if (DEP_TYPE (dep
) == REG_DEP_CONTROL
)
4757 if (sched_verbose
>= 5)
4758 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4760 haifa_change_pattern (next
, ORIG_PAT (next
));
4764 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
4767 if (sched_verbose
>= 5)
4768 fprintf (sched_dump
, "restoring pattern for insn %d\n",
4769 INSN_UID (desc
->insn
));
4770 tick
= INSN_TICK (desc
->insn
);
4772 success
= validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
4773 gcc_assert (success
);
4775 rtx_insn
*insn
= DEP_PRO (dep
);
4777 if (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
)
4779 /* Recompute priority since dependent priorities may have changed. */
4780 priority (insn
, true);
4783 update_insn_after_change (desc
->insn
);
4785 if (backtrack_queue
!= NULL
)
4787 backtrack_queue
->replacement_deps
.safe_push (dep
);
4788 backtrack_queue
->replace_apply
.safe_push (0);
4791 INSN_TICK (next
) = tick
;
4792 if (TODO_SPEC (next
) == DEP_POSTPONED
)
4795 if (sd_lists_empty_p (next
, SD_LIST_BACK
))
4796 TODO_SPEC (next
) = 0;
4797 else if (!sd_lists_empty_p (next
, SD_LIST_HARD_BACK
))
4798 TODO_SPEC (next
) = HARD_DEP
;
4801 /* Perform pattern replacements that were queued up until the next
4804 perform_replacements_new_cycle (void)
4808 FOR_EACH_VEC_ELT (next_cycle_replace_deps
, i
, dep
)
4810 int apply_p
= next_cycle_apply
[i
];
4812 apply_replacement (dep
, true);
4814 restore_pattern (dep
, true);
4816 next_cycle_replace_deps
.truncate (0);
4817 next_cycle_apply
.truncate (0);
4820 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4821 instructions we've previously encountered, a set bit prevents
4822 recursion. BUDGET is a limit on how far ahead we look, it is
4823 reduced on recursive calls. Return true if we produced a good
4824 estimate, or false if we exceeded the budget. */
4826 estimate_insn_tick (bitmap processed
, rtx_insn
*insn
, int budget
)
4828 sd_iterator_def sd_it
;
4830 int earliest
= INSN_TICK (insn
);
4832 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
4834 rtx_insn
*pro
= DEP_PRO (dep
);
4837 if (DEP_STATUS (dep
) & DEP_CANCELLED
)
4840 if (QUEUE_INDEX (pro
) == QUEUE_SCHEDULED
)
4841 gcc_assert (INSN_TICK (pro
) + dep_cost (dep
) <= INSN_TICK (insn
));
4844 int cost
= dep_cost (dep
);
4847 if (!bitmap_bit_p (processed
, INSN_LUID (pro
)))
4849 if (!estimate_insn_tick (processed
, pro
, budget
- cost
))
4852 gcc_assert (INSN_TICK_ESTIMATE (pro
) != INVALID_TICK
);
4853 t
= INSN_TICK_ESTIMATE (pro
) + cost
;
4854 if (earliest
== INVALID_TICK
|| t
> earliest
)
4858 bitmap_set_bit (processed
, INSN_LUID (insn
));
4859 INSN_TICK_ESTIMATE (insn
) = earliest
;
4863 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4864 infinite resources) the cycle in which the delayed shadow can be issued.
4865 Return the number of cycles that must pass before the real insn can be
4866 issued in order to meet this constraint. */
4868 estimate_shadow_tick (struct delay_pair
*p
)
4870 auto_bitmap processed
;
4874 cutoff
= !estimate_insn_tick (processed
, p
->i2
,
4875 max_insn_queue_index
+ pair_delay (p
));
4877 return max_insn_queue_index
;
4878 t
= INSN_TICK_ESTIMATE (p
->i2
) - (clock_var
+ pair_delay (p
) + 1);
4884 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4885 recursively resolve all its forward dependencies. */
4887 resolve_dependencies (rtx_insn
*insn
)
4889 sd_iterator_def sd_it
;
4892 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4893 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn
)) != NULL
4894 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn
)) != NULL
)
4897 if (sched_verbose
>= 4)
4898 fprintf (sched_dump
, ";;\tquickly resolving %d\n", INSN_UID (insn
));
4900 if (QUEUE_INDEX (insn
) >= 0)
4901 queue_remove (insn
);
4903 scheduled_insns
.safe_push (insn
);
4905 /* Update dependent instructions. */
4906 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
4907 sd_iterator_cond (&sd_it
, &dep
);)
4909 rtx_insn
*next
= DEP_CON (dep
);
4911 if (sched_verbose
>= 4)
4912 fprintf (sched_dump
, ";;\t\tdep %d against %d\n", INSN_UID (insn
),
4915 /* Resolve the dependence between INSN and NEXT.
4916 sd_resolve_dep () moves current dep to another list thus
4917 advancing the iterator. */
4918 sd_resolve_dep (sd_it
);
4920 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
4922 resolve_dependencies (next
);
4925 /* Check always has only one forward dependence (to the first insn in
4926 the recovery block), therefore, this will be executed only once. */
4928 gcc_assert (sd_lists_empty_p (insn
, SD_LIST_FORW
));
4934 /* Return the head and tail pointers of ebb starting at BEG and ending
4937 get_ebb_head_tail (basic_block beg
, basic_block end
,
4938 rtx_insn
**headp
, rtx_insn
**tailp
)
4940 rtx_insn
*beg_head
= BB_HEAD (beg
);
4941 rtx_insn
* beg_tail
= BB_END (beg
);
4942 rtx_insn
* end_head
= BB_HEAD (end
);
4943 rtx_insn
* end_tail
= BB_END (end
);
4945 /* Don't include any notes or labels at the beginning of the BEG
4946 basic block, or notes at the end of the END basic blocks. */
4948 if (LABEL_P (beg_head
))
4949 beg_head
= NEXT_INSN (beg_head
);
4951 while (beg_head
!= beg_tail
)
4952 if (NOTE_P (beg_head
))
4953 beg_head
= NEXT_INSN (beg_head
);
4954 else if (DEBUG_INSN_P (beg_head
))
4956 rtx_insn
* note
, *next
;
4958 for (note
= NEXT_INSN (beg_head
);
4962 next
= NEXT_INSN (note
);
4965 if (sched_verbose
>= 9)
4966 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
4968 reorder_insns_nobb (note
, note
, PREV_INSN (beg_head
));
4970 if (BLOCK_FOR_INSN (note
) != beg
)
4971 df_insn_change_bb (note
, beg
);
4973 else if (!DEBUG_INSN_P (note
))
4985 end_head
= beg_head
;
4986 else if (LABEL_P (end_head
))
4987 end_head
= NEXT_INSN (end_head
);
4989 while (end_head
!= end_tail
)
4990 if (NOTE_P (end_tail
))
4991 end_tail
= PREV_INSN (end_tail
);
4992 else if (DEBUG_INSN_P (end_tail
))
4994 rtx_insn
* note
, *prev
;
4996 for (note
= PREV_INSN (end_tail
);
5000 prev
= PREV_INSN (note
);
5003 if (sched_verbose
>= 9)
5004 fprintf (sched_dump
, "reorder %i\n", INSN_UID (note
));
5006 reorder_insns_nobb (note
, note
, end_tail
);
5008 if (end_tail
== BB_END (end
))
5009 BB_END (end
) = note
;
5011 if (BLOCK_FOR_INSN (note
) != end
)
5012 df_insn_change_bb (note
, end
);
5014 else if (!DEBUG_INSN_P (note
))
5026 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
5029 no_real_insns_p (const rtx_insn
*head
, const rtx_insn
*tail
)
5031 while (head
!= NEXT_INSN (tail
))
5033 if (!NOTE_P (head
) && !LABEL_P (head
))
5035 head
= NEXT_INSN (head
);
5040 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5041 previously found among the insns. Insert them just before HEAD. */
5043 restore_other_notes (rtx_insn
*head
, basic_block head_bb
)
5047 rtx_insn
*note_head
= note_list
;
5050 head_bb
= BLOCK_FOR_INSN (head
);
5052 head
= NEXT_INSN (bb_note (head_bb
));
5054 while (PREV_INSN (note_head
))
5056 set_block_for_insn (note_head
, head_bb
);
5057 note_head
= PREV_INSN (note_head
);
5059 /* In the above cycle we've missed this note. */
5060 set_block_for_insn (note_head
, head_bb
);
5062 SET_PREV_INSN (note_head
) = PREV_INSN (head
);
5063 SET_NEXT_INSN (PREV_INSN (head
)) = note_head
;
5064 SET_PREV_INSN (head
) = note_list
;
5065 SET_NEXT_INSN (note_list
) = head
;
5067 if (BLOCK_FOR_INSN (head
) != head_bb
)
5068 BB_END (head_bb
) = note_list
;
5076 /* When we know we are going to discard the schedule due to a failed attempt
5077 at modulo scheduling, undo all replacements. */
5079 undo_all_replacements (void)
5084 FOR_EACH_VEC_ELT (scheduled_insns
, i
, insn
)
5086 sd_iterator_def sd_it
;
5089 /* See if we must undo a replacement. */
5090 for (sd_it
= sd_iterator_start (insn
, SD_LIST_RES_FORW
);
5091 sd_iterator_cond (&sd_it
, &dep
); sd_iterator_next (&sd_it
))
5093 struct dep_replacement
*desc
= DEP_REPLACE (dep
);
5095 validate_change (desc
->insn
, desc
->loc
, desc
->orig
, 0);
5100 /* Return first non-scheduled insn in the current scheduling block.
5101 This is mostly used for debug-counter purposes. */
5103 first_nonscheduled_insn (void)
5105 rtx_insn
*insn
= (nonscheduled_insns_begin
!= NULL_RTX
5106 ? nonscheduled_insns_begin
5107 : current_sched_info
->prev_head
);
5111 insn
= next_nonnote_nondebug_insn (insn
);
5113 while (QUEUE_INDEX (insn
) == QUEUE_SCHEDULED
);
5118 /* Move insns that became ready to fire from queue to ready list. */
5121 queue_to_ready (struct ready_list
*ready
)
5124 rtx_insn_list
*link
;
5125 rtx_insn
*skip_insn
;
5127 q_ptr
= NEXT_Q (q_ptr
);
5129 if (dbg_cnt (sched_insn
) == false)
5130 /* If debug counter is activated do not requeue the first
5131 nonscheduled insn. */
5132 skip_insn
= first_nonscheduled_insn ();
5136 /* Add all pending insns that can be scheduled without stalls to the
5138 for (link
= insn_queue
[q_ptr
]; link
; link
= link
->next ())
5140 insn
= link
->insn ();
5143 if (sched_verbose
>= 2)
5144 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
5145 (*current_sched_info
->print_insn
) (insn
, 0));
5147 /* If the ready list is full, delay the insn for 1 cycle.
5148 See the comment in schedule_block for the rationale. */
5149 if (!reload_completed
5150 && (ready
->n_ready
- ready
->n_debug
> param_max_sched_ready_insns
5151 || (sched_pressure
== SCHED_PRESSURE_MODEL
5152 /* Limit pressure recalculations to
5153 param_max_sched_ready_insns instructions too. */
5154 && model_index (insn
) > (model_curr_point
5155 + param_max_sched_ready_insns
)))
5156 && !(sched_pressure
== SCHED_PRESSURE_MODEL
5157 && model_curr_point
< model_num_insns
5158 /* Always allow the next model instruction to issue. */
5159 && model_index (insn
) == model_curr_point
)
5160 && !SCHED_GROUP_P (insn
)
5161 && insn
!= skip_insn
)
5163 if (sched_verbose
>= 2)
5164 fprintf (sched_dump
, "keeping in queue, ready full\n");
5165 queue_insn (insn
, 1, "ready full");
5169 ready_add (ready
, insn
, false);
5170 if (sched_verbose
>= 2)
5171 fprintf (sched_dump
, "moving to ready without stalls\n");
5174 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
5176 /* If there are no ready insns, stall until one is ready and add all
5177 of the pending insns at that point to the ready list. */
5178 if (ready
->n_ready
== 0)
5182 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
5184 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
5186 for (; link
; link
= link
->next ())
5188 insn
= link
->insn ();
5191 if (sched_verbose
>= 2)
5192 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
5193 (*current_sched_info
->print_insn
) (insn
, 0));
5195 ready_add (ready
, insn
, false);
5196 if (sched_verbose
>= 2)
5197 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
5199 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
5201 advance_one_cycle ();
5206 advance_one_cycle ();
5209 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
5210 clock_var
+= stalls
;
5211 if (sched_verbose
>= 2)
5212 fprintf (sched_dump
, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5217 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5218 prematurely move INSN from the queue to the ready list. Currently,
5219 if a target defines the hook 'is_costly_dependence', this function
5220 uses the hook to check whether there exist any dependences which are
5221 considered costly by the target, between INSN and other insns that
5222 have already been scheduled. Dependences are checked up to Y cycles
5223 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5224 controlling this value.
5225 (Other considerations could be taken into account instead (or in
5226 addition) depending on user flags and target hooks. */
5229 ok_for_early_queue_removal (rtx_insn
*insn
)
5231 if (targetm
.sched
.is_costly_dependence
)
5234 int i
= scheduled_insns
.length ();
5235 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
5241 rtx_insn
*prev_insn
= scheduled_insns
[i
];
5243 if (!NOTE_P (prev_insn
))
5247 dep
= sd_find_dep_between (prev_insn
, insn
, true);
5251 cost
= dep_cost (dep
);
5253 if (targetm
.sched
.is_costly_dependence (dep
, cost
,
5254 flag_sched_stalled_insns_dep
- n_cycles
))
5259 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
5272 /* Remove insns from the queue, before they become "ready" with respect
5273 to FU latency considerations. */
5276 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
5279 rtx_insn_list
*link
;
5280 rtx_insn_list
*next_link
;
5281 rtx_insn_list
*prev_link
;
5284 state_t temp_state
= alloca (dfa_state_size
);
5286 int insns_removed
= 0;
5289 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5292 X == 0: There is no limit on how many queued insns can be removed
5293 prematurely. (flag_sched_stalled_insns = -1).
5295 X >= 1: Only X queued insns can be removed prematurely in each
5296 invocation. (flag_sched_stalled_insns = X).
5298 Otherwise: Early queue removal is disabled.
5299 (flag_sched_stalled_insns = 0)
5302 if (! flag_sched_stalled_insns
)
5305 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
5307 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
5309 if (sched_verbose
> 6)
5310 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
5315 next_link
= link
->next ();
5316 insn
= link
->insn ();
5317 if (insn
&& sched_verbose
> 6)
5318 print_rtl_single (sched_dump
, insn
);
5320 memcpy (temp_state
, state
, dfa_state_size
);
5321 if (recog_memoized (insn
) < 0)
5322 /* non-negative to indicate that it's not ready
5323 to avoid infinite Q->R->Q->R... */
5326 cost
= state_transition (temp_state
, insn
);
5328 if (sched_verbose
>= 6)
5329 fprintf (sched_dump
, "transition cost = %d\n", cost
);
5331 move_to_ready
= false;
5334 move_to_ready
= ok_for_early_queue_removal (insn
);
5335 if (move_to_ready
== true)
5337 /* move from Q to R */
5339 ready_add (ready
, insn
, false);
5342 XEXP (prev_link
, 1) = next_link
;
5344 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
5346 free_INSN_LIST_node (link
);
5348 if (sched_verbose
>= 2)
5349 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
5350 (*current_sched_info
->print_insn
) (insn
, 0));
5353 if (insns_removed
== flag_sched_stalled_insns
)
5354 /* Remove no more than flag_sched_stalled_insns insns
5355 from Q at a time. */
5356 return insns_removed
;
5360 if (move_to_ready
== false)
5367 } /* for stalls.. */
5369 return insns_removed
;
5373 /* Print the ready list for debugging purposes.
5374 If READY_TRY is non-zero then only print insns that max_issue
5377 debug_ready_list_1 (struct ready_list
*ready
, signed char *ready_try
)
5382 if (ready
->n_ready
== 0)
5384 fprintf (sched_dump
, "\n");
5388 p
= ready_lastpos (ready
);
5389 for (i
= 0; i
< ready
->n_ready
; i
++)
5391 if (ready_try
!= NULL
&& ready_try
[ready
->n_ready
- i
- 1])
5394 fprintf (sched_dump
, " %s:%d",
5395 (*current_sched_info
->print_insn
) (p
[i
], 0),
5397 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5398 fprintf (sched_dump
, "(cost=%d",
5399 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p
[i
]));
5400 fprintf (sched_dump
, ":prio=%d", INSN_PRIORITY (p
[i
]));
5401 if (INSN_TICK (p
[i
]) > clock_var
)
5402 fprintf (sched_dump
, ":delay=%d", INSN_TICK (p
[i
]) - clock_var
);
5403 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
5404 fprintf (sched_dump
, ":idx=%d",
5405 model_index (p
[i
]));
5406 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
5407 fprintf (sched_dump
, ")");
5409 fprintf (sched_dump
, "\n");
5412 /* Print the ready list. Callable from debugger. */
5414 debug_ready_list (struct ready_list
*ready
)
5416 debug_ready_list_1 (ready
, NULL
);
5419 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5420 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5421 replaces the epilogue note in the correct basic block. */
5423 reemit_notes (rtx_insn
*insn
)
5426 rtx_insn
*last
= insn
;
5428 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
5430 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
5432 enum insn_note note_type
= (enum insn_note
) INTVAL (XEXP (note
, 0));
5434 last
= emit_note_before (note_type
, last
);
5435 remove_note (insn
, note
);
5440 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5442 move_insn (rtx_insn
*insn
, rtx_insn
*last
, rtx nt
)
5444 if (PREV_INSN (insn
) != last
)
5450 bb
= BLOCK_FOR_INSN (insn
);
5452 /* BB_HEAD is either LABEL or NOTE. */
5453 gcc_assert (BB_HEAD (bb
) != insn
);
5455 if (BB_END (bb
) == insn
)
5456 /* If this is last instruction in BB, move end marker one
5459 /* Jumps are always placed at the end of basic block. */
5460 jump_p
= control_flow_insn_p (insn
);
5463 || ((common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
5464 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
5465 || (common_sched_info
->sched_pass_id
5466 == SCHED_EBB_PASS
));
5468 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
5470 BB_END (bb
) = PREV_INSN (insn
);
5473 gcc_assert (BB_END (bb
) != last
);
5476 /* We move the block note along with jump. */
5480 note
= NEXT_INSN (insn
);
5481 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
5482 note
= NEXT_INSN (note
);
5486 || BARRIER_P (note
)))
5487 note
= NEXT_INSN (note
);
5489 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
5494 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
5495 SET_PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
5497 SET_NEXT_INSN (note
) = NEXT_INSN (last
);
5498 SET_PREV_INSN (NEXT_INSN (last
)) = note
;
5500 SET_NEXT_INSN (last
) = insn
;
5501 SET_PREV_INSN (insn
) = last
;
5503 bb
= BLOCK_FOR_INSN (last
);
5507 fix_jump_move (insn
);
5509 if (BLOCK_FOR_INSN (insn
) != bb
)
5510 move_block_after_check (insn
);
5512 gcc_assert (BB_END (bb
) == last
);
5515 df_insn_change_bb (insn
, bb
);
5517 /* Update BB_END, if needed. */
5518 if (BB_END (bb
) == last
)
5522 SCHED_GROUP_P (insn
) = 0;
5525 /* Return true if scheduling INSN will finish current clock cycle. */
5527 insn_finishes_cycle_p (rtx_insn
*insn
)
5529 if (SCHED_GROUP_P (insn
))
5530 /* After issuing INSN, rest of the sched_group will be forced to issue
5531 in order. Don't make any plans for the rest of cycle. */
5534 /* Finishing the block will, apparently, finish the cycle. */
5535 if (current_sched_info
->insn_finishes_block_p
5536 && current_sched_info
->insn_finishes_block_p (insn
))
5542 /* Helper for autopref_multipass_init. Given a SET in PAT and whether
5543 we're expecting a memory WRITE or not, check that the insn is relevant to
5544 the autoprefetcher modelling code. Return true iff that is the case.
5545 If it is relevant, record the base register of the memory op in BASE and
5546 the offset in OFFSET. */
5549 analyze_set_insn_for_autopref (rtx pat
, bool write
, rtx
*base
, int *offset
)
5551 if (GET_CODE (pat
) != SET
)
5554 rtx mem
= write
? SET_DEST (pat
) : SET_SRC (pat
);
5558 struct address_info info
;
5559 decompose_mem_address (&info
, mem
);
5561 /* TODO: Currently only (base+const) addressing is supported. */
5562 if (info
.base
== NULL
|| !REG_P (*info
.base
)
5563 || (info
.disp
!= NULL
&& !CONST_INT_P (*info
.disp
)))
5567 *offset
= info
.disp
? INTVAL (*info
.disp
) : 0;
5571 /* Functions to model cache auto-prefetcher.
5573 Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5574 memory prefetches if it sees instructions with consequitive memory accesses
5575 in the instruction stream. Details of such hardware units are not published,
5576 so we can only guess what exactly is going on there.
5577 In the scheduler, we model abstract auto-prefetcher. If there are memory
5578 insns in the ready list (or the queue) that have same memory base, but
5579 different offsets, then we delay the insns with larger offsets until insns
5580 with smaller offsets get scheduled. If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5581 is "1", then we look at the ready list; if it is N>1, then we also look
5582 through N-1 queue entries.
5583 If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5584 among its heuristics.
5585 Param value of "-1" disables modelling of the auto-prefetcher. */
5587 /* Initialize autoprefetcher model data for INSN. */
5589 autopref_multipass_init (const rtx_insn
*insn
, int write
)
5591 autopref_multipass_data_t data
= &INSN_AUTOPREF_MULTIPASS_DATA (insn
)[write
];
5593 gcc_assert (data
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
);
5594 data
->base
= NULL_RTX
;
5596 /* Set insn entry initialized, but not relevant for auto-prefetcher. */
5597 data
->status
= AUTOPREF_MULTIPASS_DATA_IRRELEVANT
;
5599 rtx pat
= PATTERN (insn
);
5601 /* We have a multi-set insn like a load-multiple or store-multiple.
5602 We care about these as long as all the memory ops inside the PARALLEL
5603 have the same base register. We care about the minimum and maximum
5604 offsets from that base but don't check for the order of those offsets
5605 within the PARALLEL insn itself. */
5606 if (GET_CODE (pat
) == PARALLEL
)
5608 int n_elems
= XVECLEN (pat
, 0);
5611 rtx base
, prev_base
= NULL_RTX
;
5612 int min_offset
= INT_MAX
;
5614 for (i
= 0; i
< n_elems
; i
++)
5616 rtx set
= XVECEXP (pat
, 0, i
);
5617 if (GET_CODE (set
) != SET
)
5620 if (!analyze_set_insn_for_autopref (set
, write
, &base
, &offset
))
5623 /* Ensure that all memory operations in the PARALLEL use the same
5625 if (i
> 0 && REGNO (base
) != REGNO (prev_base
))
5628 min_offset
= MIN (min_offset
, offset
);
5631 /* If we reached here then we have a valid PARALLEL of multiple memory ops
5632 with prev_base as the base and min_offset containing the offset. */
5633 gcc_assert (prev_base
);
5634 data
->base
= prev_base
;
5635 data
->offset
= min_offset
;
5636 data
->status
= AUTOPREF_MULTIPASS_DATA_NORMAL
;
5640 /* Otherwise this is a single set memory operation. */
5641 rtx set
= single_set (insn
);
5642 if (set
== NULL_RTX
)
5645 if (!analyze_set_insn_for_autopref (set
, write
, &data
->base
,
5649 /* This insn is relevant for the auto-prefetcher.
5650 The base and offset fields will have been filled in the
5651 analyze_set_insn_for_autopref call above. */
5652 data
->status
= AUTOPREF_MULTIPASS_DATA_NORMAL
;
5655 /* Helper function for rank_for_schedule sorting. */
5657 autopref_rank_for_schedule (const rtx_insn
*insn1
, const rtx_insn
*insn2
)
5660 for (int write
= 0; write
< 2 && !r
; ++write
)
5662 autopref_multipass_data_t data1
5663 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5664 autopref_multipass_data_t data2
5665 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2
)[write
];
5667 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5668 autopref_multipass_init (insn1
, write
);
5670 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5671 autopref_multipass_init (insn2
, write
);
5673 int irrel1
= data1
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
;
5674 int irrel2
= data2
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
;
5676 if (!irrel1
&& !irrel2
)
5677 r
= data1
->offset
- data2
->offset
;
5679 r
= irrel2
- irrel1
;
5685 /* True if header of debug dump was printed. */
5686 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p
;
5688 /* Helper for autopref_multipass_dfa_lookahead_guard.
5689 Return "1" if INSN1 should be delayed in favor of INSN2. */
5691 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn
*insn1
,
5692 const rtx_insn
*insn2
, int write
)
5694 autopref_multipass_data_t data1
5695 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5696 autopref_multipass_data_t data2
5697 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2
)[write
];
5699 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5700 autopref_multipass_init (insn2
, write
);
5701 if (data2
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5704 if (rtx_equal_p (data1
->base
, data2
->base
)
5705 && data1
->offset
> data2
->offset
)
5707 if (sched_verbose
>= 2)
5709 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p
)
5711 fprintf (sched_dump
,
5712 ";;\t\tnot trying in max_issue due to autoprefetch "
5714 autopref_multipass_dfa_lookahead_guard_started_dump_p
= true;
5717 fprintf (sched_dump
, " %d(%d)", INSN_UID (insn1
), INSN_UID (insn2
));
5728 We could have also hooked autoprefetcher model into
5729 first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5730 to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5731 (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5732 unblocked). We don't bother about this yet because target of interest
5733 (ARM Cortex-A15) can issue only 1 memory operation per cycle. */
5735 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5736 Return "1" if INSN1 should not be considered in max_issue due to
5737 auto-prefetcher considerations. */
5739 autopref_multipass_dfa_lookahead_guard (rtx_insn
*insn1
, int ready_index
)
5743 /* Exit early if the param forbids this or if we're not entering here through
5744 normal haifa scheduling. This can happen if selective scheduling is
5745 explicitly enabled. */
5746 if (!insn_queue
|| param_sched_autopref_queue_depth
<= 0)
5749 if (sched_verbose
>= 2 && ready_index
== 0)
5750 autopref_multipass_dfa_lookahead_guard_started_dump_p
= false;
5752 for (int write
= 0; write
< 2; ++write
)
5754 autopref_multipass_data_t data1
5755 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1
)[write
];
5757 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
)
5758 autopref_multipass_init (insn1
, write
);
5759 if (data1
->status
== AUTOPREF_MULTIPASS_DATA_IRRELEVANT
)
5762 if (ready_index
== 0
5763 && data1
->status
== AUTOPREF_MULTIPASS_DATA_DONT_DELAY
)
5764 /* We allow only a single delay on priviledged instructions.
5765 Doing otherwise would cause infinite loop. */
5767 if (sched_verbose
>= 2)
5769 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p
)
5771 fprintf (sched_dump
,
5772 ";;\t\tnot trying in max_issue due to autoprefetch "
5774 autopref_multipass_dfa_lookahead_guard_started_dump_p
= true;
5777 fprintf (sched_dump
, " *%d*", INSN_UID (insn1
));
5782 for (int i2
= 0; i2
< ready
.n_ready
; ++i2
)
5784 rtx_insn
*insn2
= get_ready_element (i2
);
5787 r
= autopref_multipass_dfa_lookahead_guard_1 (insn1
, insn2
, write
);
5790 if (ready_index
== 0)
5793 data1
->status
= AUTOPREF_MULTIPASS_DATA_DONT_DELAY
;
5799 if (param_sched_autopref_queue_depth
== 1)
5802 /* Everything from the current queue slot should have been moved to
5804 gcc_assert (insn_queue
[NEXT_Q_AFTER (q_ptr
, 0)] == NULL_RTX
);
5806 int n_stalls
= param_sched_autopref_queue_depth
- 1;
5807 if (n_stalls
> max_insn_queue_index
)
5808 n_stalls
= max_insn_queue_index
;
5810 for (int stalls
= 1; stalls
<= n_stalls
; ++stalls
)
5812 for (rtx_insn_list
*link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)];
5814 link
= link
->next ())
5816 rtx_insn
*insn2
= link
->insn ();
5817 r
= autopref_multipass_dfa_lookahead_guard_1 (insn1
, insn2
,
5821 /* Queue INSN1 until INSN2 can issue. */
5823 if (ready_index
== 0)
5824 data1
->status
= AUTOPREF_MULTIPASS_DATA_DONT_DELAY
;
5832 if (sched_verbose
>= 2
5833 && autopref_multipass_dfa_lookahead_guard_started_dump_p
5834 && (ready_index
== ready
.n_ready
- 1 || r
< 0))
5835 /* This does not /always/ trigger. We don't output EOL if the last
5836 insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5837 called. We can live with this. */
5838 fprintf (sched_dump
, "\n");
5843 /* Define type for target data used in multipass scheduling. */
5844 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5845 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5847 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t
;
5849 /* The following structure describe an entry of the stack of choices. */
5852 /* Ordinal number of the issued insn in the ready queue. */
5854 /* The number of the rest insns whose issues we should try. */
5856 /* The number of issued essential insns. */
5858 /* State after issuing the insn. */
5860 /* Target-specific data. */
5861 first_cycle_multipass_data_t target_data
;
5864 /* The following array is used to implement a stack of choices used in
5865 function max_issue. */
5866 static struct choice_entry
*choice_stack
;
5868 /* This holds the value of the target dfa_lookahead hook. */
5871 /* The following variable value is maximal number of tries of issuing
5872 insns for the first cycle multipass insn scheduling. We define
5873 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5874 need this constraint if all real insns (with non-negative codes)
5875 had reservations because in this case the algorithm complexity is
5876 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5877 might be incomplete and such insn might occur. For such
5878 descriptions, the complexity of algorithm (without the constraint)
5879 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5880 static int max_lookahead_tries
;
5882 /* The following function returns maximal (or close to maximal) number
5883 of insns which can be issued on the same cycle and one of which
5884 insns is insns with the best rank (the first insn in READY). To
5885 make this function tries different samples of ready insns. READY
5886 is current queue `ready'. Global array READY_TRY reflects what
5887 insns are already issued in this try. The function stops immediately,
5888 if it reached the such a solution, that all instruction can be issued.
5889 INDEX will contain index of the best insn in READY. The following
5890 function is used only for first cycle multipass scheduling.
5894 This function expects recognized insns only. All USEs,
5895 CLOBBERs, etc must be filtered elsewhere. */
5897 max_issue (struct ready_list
*ready
, int privileged_n
, state_t state
,
5898 bool first_cycle_insn_p
, int *index
)
5900 int n
, i
, all
, n_ready
, best
, delay
, tries_num
;
5902 struct choice_entry
*top
;
5908 n_ready
= ready
->n_ready
;
5909 gcc_assert (dfa_lookahead
>= 1 && privileged_n
>= 0
5910 && privileged_n
<= n_ready
);
5912 /* Init MAX_LOOKAHEAD_TRIES. */
5913 if (max_lookahead_tries
== 0)
5915 max_lookahead_tries
= 100;
5916 for (i
= 0; i
< issue_rate
; i
++)
5917 max_lookahead_tries
*= dfa_lookahead
;
5920 /* Init max_points. */
5921 more_issue
= issue_rate
- cycle_issued_insns
;
5922 gcc_assert (more_issue
>= 0);
5924 /* The number of the issued insns in the best solution. */
5929 /* Set initial state of the search. */
5930 memcpy (top
->state
, state
, dfa_state_size
);
5931 top
->rest
= dfa_lookahead
;
5933 if (targetm
.sched
.first_cycle_multipass_begin
)
5934 targetm
.sched
.first_cycle_multipass_begin (&top
->target_data
,
5936 first_cycle_insn_p
);
5938 /* Count the number of the insns to search among. */
5939 for (all
= i
= 0; i
< n_ready
; i
++)
5943 if (sched_verbose
>= 2)
5945 fprintf (sched_dump
, ";;\t\tmax_issue among %d insns:", all
);
5946 debug_ready_list_1 (ready
, ready_try
);
5949 /* I is the index of the insn to try next. */
5954 if (/* If we've reached a dead end or searched enough of what we have
5957 /* or have nothing else to try... */
5959 /* or should not issue more. */
5960 || top
->n
>= more_issue
)
5962 /* ??? (... || i == n_ready). */
5963 gcc_assert (i
<= n_ready
);
5965 /* We should not issue more than issue_rate instructions. */
5966 gcc_assert (top
->n
<= more_issue
);
5968 if (top
== choice_stack
)
5971 if (best
< top
- choice_stack
)
5976 /* Try to find issued privileged insn. */
5977 while (n
&& !ready_try
[--n
])
5981 if (/* If all insns are equally good... */
5983 /* Or a privileged insn will be issued. */
5985 /* Then we have a solution. */
5987 best
= top
- choice_stack
;
5988 /* This is the index of the insn issued first in this
5990 *index
= choice_stack
[1].index
;
5991 if (top
->n
== more_issue
|| best
== all
)
5996 /* Set ready-list index to point to the last insn
5997 ('i++' below will advance it to the next insn). */
6003 if (targetm
.sched
.first_cycle_multipass_backtrack
)
6004 targetm
.sched
.first_cycle_multipass_backtrack (&top
->target_data
,
6005 ready_try
, n_ready
);
6008 memcpy (state
, top
->state
, dfa_state_size
);
6010 else if (!ready_try
[i
])
6013 if (tries_num
> max_lookahead_tries
)
6015 insn
= ready_element (ready
, i
);
6016 delay
= state_transition (state
, insn
);
6019 if (state_dead_lock_p (state
)
6020 || insn_finishes_cycle_p (insn
))
6021 /* We won't issue any more instructions in the next
6028 if (memcmp (top
->state
, state
, dfa_state_size
) != 0)
6031 /* Advance to the next choice_entry. */
6033 /* Initialize it. */
6034 top
->rest
= dfa_lookahead
;
6037 memcpy (top
->state
, state
, dfa_state_size
);
6040 if (targetm
.sched
.first_cycle_multipass_issue
)
6041 targetm
.sched
.first_cycle_multipass_issue (&top
->target_data
,
6051 /* Increase ready-list index. */
6055 if (targetm
.sched
.first_cycle_multipass_end
)
6056 targetm
.sched
.first_cycle_multipass_end (best
!= 0
6057 ? &choice_stack
[1].target_data
6060 /* Restore the original state of the DFA. */
6061 memcpy (state
, choice_stack
->state
, dfa_state_size
);
6066 /* The following function chooses insn from READY and modifies
6067 READY. The following function is used only for first
6068 cycle multipass scheduling.
6070 -1 if cycle should be advanced,
6071 0 if INSN_PTR is set to point to the desirable insn,
6072 1 if choose_ready () should be restarted without advancing the cycle. */
6074 choose_ready (struct ready_list
*ready
, bool first_cycle_insn_p
,
6075 rtx_insn
**insn_ptr
)
6077 if (dbg_cnt (sched_insn
) == false)
6079 if (nonscheduled_insns_begin
== NULL_RTX
)
6080 nonscheduled_insns_begin
= current_sched_info
->prev_head
;
6082 rtx_insn
*insn
= first_nonscheduled_insn ();
6084 if (QUEUE_INDEX (insn
) == QUEUE_READY
)
6085 /* INSN is in the ready_list. */
6087 ready_remove_insn (insn
);
6092 /* INSN is in the queue. Advance cycle to move it to the ready list. */
6093 gcc_assert (QUEUE_INDEX (insn
) >= 0);
6097 if (dfa_lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0))
6098 || DEBUG_INSN_P (ready_element (ready
, 0)))
6100 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
6101 *insn_ptr
= ready_remove_first_dispatch (ready
);
6103 *insn_ptr
= ready_remove_first (ready
);
6109 /* Try to choose the best insn. */
6113 insn
= ready_element (ready
, 0);
6114 if (INSN_CODE (insn
) < 0)
6116 *insn_ptr
= ready_remove_first (ready
);
6120 /* Filter the search space. */
6121 for (i
= 0; i
< ready
->n_ready
; i
++)
6125 insn
= ready_element (ready
, i
);
6127 /* If this insn is recognizable we should have already
6128 recognized it earlier.
6129 ??? Not very clear where this is supposed to be done.
6131 gcc_checking_assert (INSN_CODE (insn
) >= 0
6132 || recog_memoized (insn
) < 0);
6133 if (INSN_CODE (insn
) < 0)
6135 /* Non-recognized insns at position 0 are handled above. */
6141 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
)
6144 = (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
6147 if (ready_try
[i
] < 0)
6148 /* Queue instruction for several cycles.
6149 We need to restart choose_ready as we have changed
6152 change_queue_index (insn
, -ready_try
[i
]);
6156 /* Make sure that we didn't end up with 0'th insn filtered out.
6157 Don't be tempted to make life easier for backends and just
6158 requeue 0'th insn if (ready_try[0] == 0) and restart
6159 choose_ready. Backends should be very considerate about
6160 requeueing instructions -- especially the highest priority
6161 one at position 0. */
6162 gcc_assert (ready_try
[i
] == 0 || i
> 0);
6167 gcc_assert (ready_try
[i
] == 0);
6168 /* INSN made it through the scrutiny of filters! */
6171 if (max_issue (ready
, 1, curr_state
, first_cycle_insn_p
, &index
) == 0)
6173 *insn_ptr
= ready_remove_first (ready
);
6174 if (sched_verbose
>= 4)
6175 fprintf (sched_dump
, ";;\t\tChosen insn (but can't issue) : %s \n",
6176 (*current_sched_info
->print_insn
) (*insn_ptr
, 0));
6181 if (sched_verbose
>= 4)
6182 fprintf (sched_dump
, ";;\t\tChosen insn : %s\n",
6183 (*current_sched_info
->print_insn
)
6184 (ready_element (ready
, index
), 0));
6186 *insn_ptr
= ready_remove (ready
, index
);
6192 /* This function is called when we have successfully scheduled a
6193 block. It uses the schedule stored in the scheduled_insns vector
6194 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
6195 append the scheduled insns; TAIL is the insn after the scheduled
6196 block. TARGET_BB is the argument passed to schedule_block. */
6199 commit_schedule (rtx_insn
*prev_head
, rtx_insn
*tail
, basic_block
*target_bb
)
6204 last_scheduled_insn
= prev_head
;
6206 scheduled_insns
.iterate (i
, &insn
);
6209 if (control_flow_insn_p (last_scheduled_insn
)
6210 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
6212 *target_bb
= current_sched_info
->advance_target_bb (*target_bb
, 0);
6218 x
= next_real_insn (last_scheduled_insn
);
6220 dump_new_block_header (1, *target_bb
, x
, tail
);
6223 last_scheduled_insn
= bb_note (*target_bb
);
6226 if (current_sched_info
->begin_move_insn
)
6227 (*current_sched_info
->begin_move_insn
) (insn
, last_scheduled_insn
);
6228 move_insn (insn
, last_scheduled_insn
,
6229 current_sched_info
->next_tail
);
6230 if (!DEBUG_INSN_P (insn
))
6231 reemit_notes (insn
);
6232 last_scheduled_insn
= insn
;
6235 scheduled_insns
.truncate (0);
6238 /* Examine all insns on the ready list and queue those which can't be
6239 issued in this cycle. TEMP_STATE is temporary scheduler state we
6240 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
6241 have been issued for the current cycle, which means it is valid to
6242 issue an asm statement.
6244 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6245 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
6246 we only leave insns which have an INSN_EXACT_TICK. */
6249 prune_ready_list (state_t temp_state
, bool first_cycle_insn_p
,
6250 bool shadows_only_p
, bool modulo_epilogue_p
)
6253 bool sched_group_found
= false;
6254 int min_cost_group
= 0;
6259 for (i
= 0; i
< ready
.n_ready
; i
++)
6261 rtx_insn
*insn
= ready_element (&ready
, i
);
6262 if (SCHED_GROUP_P (insn
))
6264 sched_group_found
= true;
6269 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6270 such an insn first and note its cost. If at least one SCHED_GROUP_P insn
6271 gets queued, then all other insns get queued for one cycle later. */
6272 for (pass
= sched_group_found
? 0 : 1; pass
< 2; )
6274 int n
= ready
.n_ready
;
6275 for (i
= 0; i
< n
; i
++)
6277 rtx_insn
*insn
= ready_element (&ready
, i
);
6279 const char *reason
= "resource conflict";
6281 if (DEBUG_INSN_P (insn
))
6284 if (sched_group_found
&& !SCHED_GROUP_P (insn
)
6285 && ((pass
== 0) || (min_cost_group
>= 1)))
6289 cost
= min_cost_group
;
6290 reason
= "not in sched group";
6292 else if (modulo_epilogue_p
6293 && INSN_EXACT_TICK (insn
) == INVALID_TICK
)
6295 cost
= max_insn_queue_index
;
6296 reason
= "not an epilogue insn";
6298 else if (shadows_only_p
&& !SHADOW_P (insn
))
6301 reason
= "not a shadow";
6303 else if (recog_memoized (insn
) < 0)
6305 if (!first_cycle_insn_p
6306 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6307 || asm_noperands (PATTERN (insn
)) >= 0))
6311 else if (sched_pressure
!= SCHED_PRESSURE_NONE
)
6313 if (sched_pressure
== SCHED_PRESSURE_MODEL
6314 && INSN_TICK (insn
) <= clock_var
)
6316 memcpy (temp_state
, curr_state
, dfa_state_size
);
6317 if (state_transition (temp_state
, insn
) >= 0)
6318 INSN_TICK (insn
) = clock_var
+ 1;
6328 struct delay_pair
*delay_entry
;
6330 = delay_htab
->find_with_hash (insn
,
6331 htab_hash_pointer (insn
));
6332 while (delay_entry
&& delay_cost
== 0)
6334 delay_cost
= estimate_shadow_tick (delay_entry
);
6335 if (delay_cost
> max_insn_queue_index
)
6336 delay_cost
= max_insn_queue_index
;
6337 delay_entry
= delay_entry
->next_same_i1
;
6341 memcpy (temp_state
, curr_state
, dfa_state_size
);
6342 cost
= state_transition (temp_state
, insn
);
6347 if (cost
< delay_cost
)
6350 reason
= "shadow tick";
6355 if (SCHED_GROUP_P (insn
) && cost
> min_cost_group
)
6356 min_cost_group
= cost
;
6357 ready_remove (&ready
, i
);
6358 /* Normally we'd want to queue INSN for COST cycles. However,
6359 if SCHED_GROUP_P is set, then we must ensure that nothing
6360 else comes between INSN and its predecessor. If there is
6361 some other insn ready to fire on the next cycle, then that
6362 invariant would be broken.
6364 So when SCHED_GROUP_P is set, just queue this insn for a
6366 queue_insn (insn
, SCHED_GROUP_P (insn
) ? 1 : cost
, reason
);
6376 /* Called when we detect that the schedule is impossible. We examine the
6377 backtrack queue to find the earliest insn that caused this condition. */
6379 static struct haifa_saved_data
*
6380 verify_shadows (void)
6382 struct haifa_saved_data
*save
, *earliest_fail
= NULL
;
6383 for (save
= backtrack_queue
; save
; save
= save
->next
)
6386 struct delay_pair
*pair
= save
->delay_pair
;
6387 rtx_insn
*i1
= pair
->i1
;
6389 for (; pair
; pair
= pair
->next_same_i1
)
6391 rtx_insn
*i2
= pair
->i2
;
6393 if (QUEUE_INDEX (i2
) == QUEUE_SCHEDULED
)
6396 t
= INSN_TICK (i1
) + pair_delay (pair
);
6399 if (sched_verbose
>= 2)
6400 fprintf (sched_dump
,
6401 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6403 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
6404 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
6405 earliest_fail
= save
;
6408 if (QUEUE_INDEX (i2
) >= 0)
6410 int queued_for
= INSN_TICK (i2
);
6414 if (sched_verbose
>= 2)
6415 fprintf (sched_dump
,
6416 ";;\t\tfailed delay requirements for %d/%d"
6417 " (%d->%d), queued too late\n",
6418 INSN_UID (pair
->i1
), INSN_UID (pair
->i2
),
6419 INSN_TICK (pair
->i1
), INSN_EXACT_TICK (pair
->i2
));
6420 earliest_fail
= save
;
6427 return earliest_fail
;
6430 /* Print instructions together with useful scheduling information between
6431 HEAD and TAIL (inclusive). */
6433 dump_insn_stream (rtx_insn
*head
, rtx_insn
*tail
)
6435 fprintf (sched_dump
, ";;\t| insn | prio |\n");
6437 rtx_insn
*next_tail
= NEXT_INSN (tail
);
6438 for (rtx_insn
*insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
6440 int priority
= NOTE_P (insn
) ? 0 : INSN_PRIORITY (insn
);
6441 const char *pattern
= (NOTE_P (insn
)
6443 : str_pattern_slim (PATTERN (insn
)));
6445 fprintf (sched_dump
, ";;\t| %4d | %4d | %-30s ",
6446 INSN_UID (insn
), priority
, pattern
);
6448 if (sched_verbose
>= 4)
6450 if (NOTE_P (insn
) || LABEL_P (insn
) || recog_memoized (insn
) < 0)
6451 fprintf (sched_dump
, "nothing");
6453 print_reservation (sched_dump
, insn
);
6455 fprintf (sched_dump
, "\n");
6459 /* Use forward list scheduling to rearrange insns of block pointed to by
6460 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6464 schedule_block (basic_block
*target_bb
, state_t init_state
)
6467 bool success
= modulo_ii
== 0;
6468 struct sched_block_state ls
;
6469 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
6470 int sort_p
, advance
, start_clock_var
;
6472 /* Head/tail info for this block. */
6473 rtx_insn
*prev_head
= current_sched_info
->prev_head
;
6474 rtx_insn
*next_tail
= current_sched_info
->next_tail
;
6475 rtx_insn
*head
= NEXT_INSN (prev_head
);
6476 rtx_insn
*tail
= PREV_INSN (next_tail
);
6478 if ((current_sched_info
->flags
& DONT_BREAK_DEPENDENCIES
) == 0
6479 && sched_pressure
!= SCHED_PRESSURE_MODEL
&& !sched_fusion
)
6480 find_modifiable_mems (head
, tail
);
6482 /* We used to have code to avoid getting parameters moved from hard
6483 argument registers into pseudos.
6485 However, it was removed when it proved to be of marginal benefit
6486 and caused problems because schedule_block and compute_forward_dependences
6487 had different notions of what the "head" insn was. */
6489 gcc_assert (head
!= tail
|| INSN_P (head
));
6491 haifa_recovery_bb_recently_added_p
= false;
6493 backtrack_queue
= NULL
;
6498 dump_new_block_header (0, *target_bb
, head
, tail
);
6500 if (sched_verbose
>= 2)
6502 dump_insn_stream (head
, tail
);
6503 memset (&rank_for_schedule_stats
, 0,
6504 sizeof (rank_for_schedule_stats
));
6508 if (init_state
== NULL
)
6509 state_reset (curr_state
);
6511 memcpy (curr_state
, init_state
, dfa_state_size
);
6513 /* Clear the ready list. */
6514 ready
.first
= ready
.veclen
- 1;
6518 /* It is used for first cycle multipass scheduling. */
6519 temp_state
= alloca (dfa_state_size
);
6521 if (targetm
.sched
.init
)
6522 targetm
.sched
.init (sched_dump
, sched_verbose
, ready
.veclen
);
6524 /* We start inserting insns after PREV_HEAD. */
6525 last_scheduled_insn
= prev_head
;
6526 last_nondebug_scheduled_insn
= NULL
;
6527 nonscheduled_insns_begin
= NULL
;
6529 gcc_assert ((NOTE_P (last_scheduled_insn
)
6530 || DEBUG_INSN_P (last_scheduled_insn
))
6531 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
6533 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6538 insn_queue
= XALLOCAVEC (rtx_insn_list
*, max_insn_queue_index
+ 1);
6539 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
6541 /* Start just before the beginning of time. */
6544 /* We need queue and ready lists and clock_var be initialized
6545 in try_ready () (which is called through init_ready_list ()). */
6546 (*current_sched_info
->init_ready_list
) ();
6549 sched_pressure_start_bb (*target_bb
);
6551 /* The algorithm is O(n^2) in the number of ready insns at any given
6552 time in the worst case. Before reload we are more likely to have
6553 big lists so truncate them to a reasonable size. */
6554 if (!reload_completed
6555 && ready
.n_ready
- ready
.n_debug
> param_max_sched_ready_insns
)
6557 ready_sort_debug (&ready
);
6558 ready_sort_real (&ready
);
6560 /* Find first free-standing insn past param_max_sched_ready_insns.
6561 If there are debug insns, we know they're first. */
6562 for (i
= param_max_sched_ready_insns
+ ready
.n_debug
; i
< ready
.n_ready
;
6564 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
6567 if (sched_verbose
>= 2)
6569 fprintf (sched_dump
,
6570 ";;\t\tReady list on entry: %d insns: ", ready
.n_ready
);
6571 debug_ready_list (&ready
);
6572 fprintf (sched_dump
,
6573 ";;\t\t before reload => truncated to %d insns\n", i
);
6576 /* Delay all insns past it for 1 cycle. If debug counter is
6577 activated make an exception for the insn right after
6578 nonscheduled_insns_begin. */
6580 rtx_insn
*skip_insn
;
6582 if (dbg_cnt (sched_insn
) == false)
6583 skip_insn
= first_nonscheduled_insn ();
6587 while (i
< ready
.n_ready
)
6591 insn
= ready_remove (&ready
, i
);
6593 if (insn
!= skip_insn
)
6594 queue_insn (insn
, 1, "list truncated");
6597 ready_add (&ready
, skip_insn
, true);
6601 /* Now we can restore basic block notes and maintain precise cfg. */
6602 restore_bb_notes (*target_bb
);
6604 last_clock_var
= -1;
6608 gcc_assert (scheduled_insns
.length () == 0);
6610 must_backtrack
= false;
6611 modulo_insns_scheduled
= 0;
6613 ls
.modulo_epilogue
= false;
6614 ls
.first_cycle_insn_p
= true;
6616 /* Loop until all the insns in BB are scheduled. */
6617 while ((*current_sched_info
->schedule_more_p
) ())
6619 perform_replacements_new_cycle ();
6622 start_clock_var
= clock_var
;
6626 advance_one_cycle ();
6628 /* Add to the ready list all pending insns that can be issued now.
6629 If there are no ready insns, increment clock until one
6630 is ready and add all pending insns at that point to the ready
6632 queue_to_ready (&ready
);
6634 gcc_assert (ready
.n_ready
);
6636 if (sched_verbose
>= 2)
6638 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready:");
6639 debug_ready_list (&ready
);
6641 advance
-= clock_var
- start_clock_var
;
6643 while (advance
> 0);
6645 if (ls
.modulo_epilogue
)
6647 int stage
= clock_var
/ modulo_ii
;
6648 if (stage
> modulo_last_stage
* 2 + 2)
6650 if (sched_verbose
>= 2)
6651 fprintf (sched_dump
,
6652 ";;\t\tmodulo scheduled succeeded at II %d\n",
6658 else if (modulo_ii
> 0)
6660 int stage
= clock_var
/ modulo_ii
;
6661 if (stage
> modulo_max_stages
)
6663 if (sched_verbose
>= 2)
6664 fprintf (sched_dump
,
6665 ";;\t\tfailing schedule due to excessive stages\n");
6668 if (modulo_n_insns
== modulo_insns_scheduled
6669 && stage
> modulo_last_stage
)
6671 if (sched_verbose
>= 2)
6672 fprintf (sched_dump
,
6673 ";;\t\tfound kernel after %d stages, II %d\n",
6675 ls
.modulo_epilogue
= true;
6679 prune_ready_list (temp_state
, true, false, ls
.modulo_epilogue
);
6680 if (ready
.n_ready
== 0)
6685 ls
.shadows_only_p
= false;
6686 cycle_issued_insns
= 0;
6687 ls
.can_issue_more
= issue_rate
;
6694 if (sort_p
&& ready
.n_ready
> 0)
6696 /* Sort the ready list based on priority. This must be
6697 done every iteration through the loop, as schedule_insn
6698 may have readied additional insns that will not be
6699 sorted correctly. */
6700 ready_sort (&ready
);
6702 if (sched_verbose
>= 2)
6704 fprintf (sched_dump
,
6705 ";;\t\tReady list after ready_sort: ");
6706 debug_ready_list (&ready
);
6710 /* We don't want md sched reorder to even see debug isns, so put
6711 them out right away. */
6712 if (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0))
6713 && (*current_sched_info
->schedule_more_p
) ())
6715 while (ready
.n_ready
&& DEBUG_INSN_P (ready_element (&ready
, 0)))
6717 rtx_insn
*insn
= ready_remove_first (&ready
);
6718 gcc_assert (DEBUG_INSN_P (insn
));
6719 (*current_sched_info
->begin_schedule_ready
) (insn
);
6720 scheduled_insns
.safe_push (insn
);
6721 last_scheduled_insn
= insn
;
6722 advance
= schedule_insn (insn
);
6723 gcc_assert (advance
== 0);
6724 if (ready
.n_ready
> 0)
6725 ready_sort (&ready
);
6729 if (ls
.first_cycle_insn_p
&& !ready
.n_ready
)
6732 resume_after_backtrack
:
6733 /* Allow the target to reorder the list, typically for
6734 better instruction bundling. */
6736 && (ready
.n_ready
== 0
6737 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
6739 if (ls
.first_cycle_insn_p
&& targetm
.sched
.reorder
)
6741 = targetm
.sched
.reorder (sched_dump
, sched_verbose
,
6742 ready_lastpos (&ready
),
6743 &ready
.n_ready
, clock_var
);
6744 else if (!ls
.first_cycle_insn_p
&& targetm
.sched
.reorder2
)
6746 = targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
6748 ? ready_lastpos (&ready
) : NULL
,
6749 &ready
.n_ready
, clock_var
);
6752 restart_choose_ready
:
6753 if (sched_verbose
>= 2)
6755 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
6757 debug_ready_list (&ready
);
6758 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
6759 print_curr_reg_pressure ();
6762 if (ready
.n_ready
== 0
6763 && ls
.can_issue_more
6764 && reload_completed
)
6766 /* Allow scheduling insns directly from the queue in case
6767 there's nothing better to do (ready list is empty) but
6768 there are still vacant dispatch slots in the current cycle. */
6769 if (sched_verbose
>= 6)
6770 fprintf (sched_dump
,";;\t\tSecond chance\n");
6771 memcpy (temp_state
, curr_state
, dfa_state_size
);
6772 if (early_queue_to_ready (temp_state
, &ready
))
6773 ready_sort (&ready
);
6776 if (ready
.n_ready
== 0
6777 || !ls
.can_issue_more
6778 || state_dead_lock_p (curr_state
)
6779 || !(*current_sched_info
->schedule_more_p
) ())
6782 /* Select and remove the insn from the ready list. */
6788 res
= choose_ready (&ready
, ls
.first_cycle_insn_p
, &insn
);
6794 goto restart_choose_ready
;
6796 gcc_assert (insn
!= NULL_RTX
);
6799 insn
= ready_remove_first (&ready
);
6801 if (sched_pressure
!= SCHED_PRESSURE_NONE
6802 && INSN_TICK (insn
) > clock_var
)
6804 ready_add (&ready
, insn
, true);
6809 if (targetm
.sched
.dfa_new_cycle
6810 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
6811 insn
, last_clock_var
,
6812 clock_var
, &sort_p
))
6813 /* SORT_P is used by the target to override sorting
6814 of the ready list. This is needed when the target
6815 has modified its internal structures expecting that
6816 the insn will be issued next. As we need the insn
6817 to have the highest priority (so it will be returned by
6818 the ready_remove_first call above), we invoke
6819 ready_add (&ready, insn, true).
6820 But, still, there is one issue: INSN can be later
6821 discarded by scheduler's front end through
6822 current_sched_info->can_schedule_ready_p, hence, won't
6825 ready_add (&ready
, insn
, true);
6831 if (current_sched_info
->can_schedule_ready_p
6832 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
6833 /* We normally get here only if we don't want to move
6834 insn from the split block. */
6836 TODO_SPEC (insn
) = DEP_POSTPONED
;
6837 goto restart_choose_ready
;
6842 /* If this insn is the first part of a delay-slot pair, record a
6844 struct delay_pair
*delay_entry
;
6846 = delay_htab
->find_with_hash (insn
, htab_hash_pointer (insn
));
6849 save_backtrack_point (delay_entry
, ls
);
6850 if (sched_verbose
>= 2)
6851 fprintf (sched_dump
, ";;\t\tsaving backtrack point\n");
6855 /* DECISION is made. */
6857 if (modulo_ii
> 0 && INSN_UID (insn
) < modulo_iter0_max_uid
)
6859 modulo_insns_scheduled
++;
6860 modulo_last_stage
= clock_var
/ modulo_ii
;
6862 if (TODO_SPEC (insn
) & SPECULATIVE
)
6863 generate_recovery_code (insn
);
6865 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
6866 targetm
.sched
.dispatch_do (insn
, ADD_TO_DISPATCH_WINDOW
);
6868 /* Update counters, etc in the scheduler's front end. */
6869 (*current_sched_info
->begin_schedule_ready
) (insn
);
6870 scheduled_insns
.safe_push (insn
);
6871 gcc_assert (NONDEBUG_INSN_P (insn
));
6872 last_nondebug_scheduled_insn
= last_scheduled_insn
= insn
;
6874 if (recog_memoized (insn
) >= 0)
6876 memcpy (temp_state
, curr_state
, dfa_state_size
);
6877 cost
= state_transition (curr_state
, insn
);
6878 if (sched_pressure
!= SCHED_PRESSURE_WEIGHTED
&& !sched_fusion
)
6879 gcc_assert (cost
< 0);
6880 if (memcmp (temp_state
, curr_state
, dfa_state_size
) != 0)
6881 cycle_issued_insns
++;
6885 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6886 || asm_noperands (PATTERN (insn
)) >= 0);
6888 if (targetm
.sched
.variable_issue
)
6890 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
6891 insn
, ls
.can_issue_more
);
6892 /* A naked CLOBBER or USE generates no instruction, so do
6893 not count them against the issue rate. */
6894 else if (GET_CODE (PATTERN (insn
)) != USE
6895 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
6896 ls
.can_issue_more
--;
6897 advance
= schedule_insn (insn
);
6899 if (SHADOW_P (insn
))
6900 ls
.shadows_only_p
= true;
6902 /* After issuing an asm insn we should start a new cycle. */
6903 if (advance
== 0 && asm_p
)
6912 ls
.first_cycle_insn_p
= false;
6913 if (ready
.n_ready
> 0)
6914 prune_ready_list (temp_state
, false, ls
.shadows_only_p
,
6915 ls
.modulo_epilogue
);
6919 if (!must_backtrack
)
6920 for (i
= 0; i
< ready
.n_ready
; i
++)
6922 rtx_insn
*insn
= ready_element (&ready
, i
);
6923 if (INSN_EXACT_TICK (insn
) == clock_var
)
6925 must_backtrack
= true;
6930 if (must_backtrack
&& modulo_ii
> 0)
6932 if (modulo_backtracks_left
== 0)
6934 modulo_backtracks_left
--;
6936 while (must_backtrack
)
6938 struct haifa_saved_data
*failed
;
6939 rtx_insn
*failed_insn
;
6941 must_backtrack
= false;
6942 failed
= verify_shadows ();
6943 gcc_assert (failed
);
6945 failed_insn
= failed
->delay_pair
->i1
;
6946 /* Clear these queues. */
6947 perform_replacements_new_cycle ();
6948 toggle_cancelled_flags (false);
6949 unschedule_insns_until (failed_insn
);
6950 while (failed
!= backtrack_queue
)
6951 free_topmost_backtrack_point (true);
6952 restore_last_backtrack_point (&ls
);
6953 if (sched_verbose
>= 2)
6954 fprintf (sched_dump
, ";;\t\trewind to cycle %d\n", clock_var
);
6955 /* Delay by at least a cycle. This could cause additional
6957 queue_insn (failed_insn
, 1, "backtracked");
6961 if (ready
.n_ready
> 0)
6962 goto resume_after_backtrack
;
6965 if (clock_var
== 0 && ls
.first_cycle_insn_p
)
6971 ls
.first_cycle_insn_p
= true;
6973 if (ls
.modulo_epilogue
)
6976 if (!ls
.first_cycle_insn_p
|| advance
)
6977 advance_one_cycle ();
6978 perform_replacements_new_cycle ();
6981 /* Once again, debug insn suckiness: they can be on the ready list
6982 even if they have unresolved dependencies. To make our view
6983 of the world consistent, remove such "ready" insns. */
6984 restart_debug_insn_loop
:
6985 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
6989 x
= ready_element (&ready
, i
);
6990 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x
)) != NULL
6991 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x
)) != NULL
)
6993 ready_remove (&ready
, i
);
6994 goto restart_debug_insn_loop
;
6997 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
7001 x
= ready_element (&ready
, i
);
7002 resolve_dependencies (x
);
7004 for (i
= 0; i
<= max_insn_queue_index
; i
++)
7006 rtx_insn_list
*link
;
7007 while ((link
= insn_queue
[i
]) != NULL
)
7009 rtx_insn
*x
= link
->insn ();
7010 insn_queue
[i
] = link
->next ();
7011 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
7012 free_INSN_LIST_node (link
);
7013 resolve_dependencies (x
);
7019 undo_all_replacements ();
7024 fprintf (sched_dump
, ";;\tReady list (final): ");
7025 debug_ready_list (&ready
);
7028 if (modulo_ii
== 0 && current_sched_info
->queue_must_finish_empty
)
7029 /* Sanity check -- queue must be empty now. Meaningless if region has
7031 gcc_assert (!q_size
&& !ready
.n_ready
&& !ready
.n_debug
);
7032 else if (modulo_ii
== 0)
7034 /* We must maintain QUEUE_INDEX between blocks in region. */
7035 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
7039 x
= ready_element (&ready
, i
);
7040 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
7041 TODO_SPEC (x
) = HARD_DEP
;
7045 for (i
= 0; i
<= max_insn_queue_index
; i
++)
7047 rtx_insn_list
*link
;
7048 for (link
= insn_queue
[i
]; link
; link
= link
->next ())
7053 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
7054 TODO_SPEC (x
) = HARD_DEP
;
7056 free_INSN_LIST_list (&insn_queue
[i
]);
7060 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
7061 model_end_schedule ();
7065 commit_schedule (prev_head
, tail
, target_bb
);
7067 fprintf (sched_dump
, ";; total time = %d\n", clock_var
);
7070 last_scheduled_insn
= tail
;
7072 scheduled_insns
.truncate (0);
7074 if (!current_sched_info
->queue_must_finish_empty
7075 || haifa_recovery_bb_recently_added_p
)
7077 /* INSN_TICK (minimum clock tick at which the insn becomes
7078 ready) may be not correct for the insn in the subsequent
7079 blocks of the region. We should use a correct value of
7080 `clock_var' or modify INSN_TICK. It is better to keep
7081 clock_var value equal to 0 at the start of a basic block.
7082 Therefore we modify INSN_TICK here. */
7083 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
7086 if (targetm
.sched
.finish
)
7088 targetm
.sched
.finish (sched_dump
, sched_verbose
);
7089 /* Target might have added some instructions to the scheduled block
7090 in its md_finish () hook. These new insns don't have any data
7091 initialized and to identify them we extend h_i_d so that they'll
7093 sched_extend_luids ();
7096 /* Update head/tail boundaries. */
7097 head
= NEXT_INSN (prev_head
);
7098 tail
= last_scheduled_insn
;
7102 fprintf (sched_dump
, ";; new head = %d\n;; new tail = %d\n",
7103 INSN_UID (head
), INSN_UID (tail
));
7105 if (sched_verbose
>= 2)
7107 dump_insn_stream (head
, tail
);
7108 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats
,
7112 fprintf (sched_dump
, "\n");
7115 head
= restore_other_notes (head
, NULL
);
7117 current_sched_info
->head
= head
;
7118 current_sched_info
->tail
= tail
;
7120 free_backtrack_queue ();
7125 /* Set_priorities: compute priority of each insn in the block. */
7128 set_priorities (rtx_insn
*head
, rtx_insn
*tail
)
7132 int sched_max_insns_priority
=
7133 current_sched_info
->sched_max_insns_priority
;
7134 rtx_insn
*prev_head
;
7136 if (head
== tail
&& ! INSN_P (head
))
7141 prev_head
= PREV_INSN (head
);
7142 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
7148 (void) priority (insn
);
7150 gcc_assert (INSN_PRIORITY_KNOWN (insn
));
7152 sched_max_insns_priority
= MAX (sched_max_insns_priority
,
7153 INSN_PRIORITY (insn
));
7156 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
7161 /* Set sched_dump and sched_verbose for the desired debugging output. */
7163 setup_sched_dump (void)
7165 sched_verbose
= sched_verbose_param
;
7166 sched_dump
= dump_file
;
7171 /* Allocate data for register pressure sensitive scheduling. */
7173 alloc_global_sched_pressure_data (void)
7175 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7177 int i
, max_regno
= max_reg_num ();
7179 if (sched_dump
!= NULL
)
7180 /* We need info about pseudos for rtl dumps about pseudo
7181 classes and costs. */
7182 regstat_init_n_sets_and_refs ();
7183 ira_set_pseudo_classes (true, sched_verbose
? sched_dump
: NULL
);
7184 sched_regno_pressure_class
7185 = (enum reg_class
*) xmalloc (max_regno
* sizeof (enum reg_class
));
7186 for (i
= 0; i
< max_regno
; i
++)
7187 sched_regno_pressure_class
[i
]
7188 = (i
< FIRST_PSEUDO_REGISTER
7189 ? ira_pressure_class_translate
[REGNO_REG_CLASS (i
)]
7190 : ira_pressure_class_translate
[reg_allocno_class (i
)]);
7191 curr_reg_live
= BITMAP_ALLOC (NULL
);
7192 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
7194 saved_reg_live
= BITMAP_ALLOC (NULL
);
7195 region_ref_regs
= BITMAP_ALLOC (NULL
);
7197 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
7198 tmp_bitmap
= BITMAP_ALLOC (NULL
);
7200 /* Calculate number of CALL_SAVED_REGS and FIXED_REGS in register classes
7201 that we calculate register pressure for. */
7202 for (int c
= 0; c
< ira_pressure_classes_num
; ++c
)
7204 enum reg_class cl
= ira_pressure_classes
[c
];
7206 call_saved_regs_num
[cl
] = 0;
7207 fixed_regs_num
[cl
] = 0;
7209 for (int i
= 0; i
< ira_class_hard_regs_num
[cl
]; ++i
)
7211 unsigned int regno
= ira_class_hard_regs
[cl
][i
];
7212 if (fixed_regs
[regno
])
7213 ++fixed_regs_num
[cl
];
7214 else if (!crtl
->abi
->clobbers_full_reg_p (regno
))
7215 ++call_saved_regs_num
[cl
];
7221 /* Free data for register pressure sensitive scheduling. Also called
7222 from schedule_region when stopping sched-pressure early. */
7224 free_global_sched_pressure_data (void)
7226 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7228 if (regstat_n_sets_and_refs
!= NULL
)
7229 regstat_free_n_sets_and_refs ();
7230 if (sched_pressure
== SCHED_PRESSURE_WEIGHTED
)
7232 BITMAP_FREE (region_ref_regs
);
7233 BITMAP_FREE (saved_reg_live
);
7235 if (sched_pressure
== SCHED_PRESSURE_MODEL
)
7236 BITMAP_FREE (tmp_bitmap
);
7237 BITMAP_FREE (curr_reg_live
);
7238 free (sched_regno_pressure_class
);
7242 /* Initialize some global state for the scheduler. This function works
7243 with the common data shared between all the schedulers. It is called
7244 from the scheduler specific initialization routine. */
7249 /* Disable speculative loads in their presence if cc0 defined. */
7251 flag_schedule_speculative_load
= 0;
7253 if (targetm
.sched
.dispatch (NULL
, IS_DISPATCH_ON
))
7254 targetm
.sched
.dispatch_do (NULL
, DISPATCH_INIT
);
7256 if (live_range_shrinkage_p
)
7257 sched_pressure
= SCHED_PRESSURE_WEIGHTED
;
7258 else if (flag_sched_pressure
7259 && !reload_completed
7260 && common_sched_info
->sched_pass_id
== SCHED_RGN_PASS
)
7261 sched_pressure
= ((enum sched_pressure_algorithm
)
7262 param_sched_pressure_algorithm
);
7264 sched_pressure
= SCHED_PRESSURE_NONE
;
7266 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
7267 ira_setup_eliminable_regset ();
7269 /* Initialize SPEC_INFO. */
7270 if (targetm
.sched
.set_sched_flags
)
7272 spec_info
= &spec_info_var
;
7273 targetm
.sched
.set_sched_flags (spec_info
);
7275 if (spec_info
->mask
!= 0)
7277 spec_info
->data_weakness_cutoff
7278 = (param_sched_spec_prob_cutoff
* MAX_DEP_WEAK
) / 100;
7279 spec_info
->control_weakness_cutoff
7280 = (param_sched_spec_prob_cutoff
* REG_BR_PROB_BASE
) / 100;
7283 /* So we won't read anything accidentally. */
7288 /* So we won't read anything accidentally. */
7291 /* Initialize issue_rate. */
7292 if (targetm
.sched
.issue_rate
)
7293 issue_rate
= targetm
.sched
.issue_rate ();
7297 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
7298 /* Don't use max_issue with reg_pressure scheduling. Multipass
7299 scheduling and reg_pressure scheduling undo each other's decisions. */
7300 && sched_pressure
== SCHED_PRESSURE_NONE
)
7301 dfa_lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
7305 /* Set to "0" so that we recalculate. */
7306 max_lookahead_tries
= 0;
7308 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
7309 targetm
.sched
.init_dfa_pre_cycle_insn ();
7311 if (targetm
.sched
.init_dfa_post_cycle_insn
)
7312 targetm
.sched
.init_dfa_post_cycle_insn ();
7315 dfa_state_size
= state_size ();
7317 init_alias_analysis ();
7320 df_set_flags (DF_LR_RUN_DCE
);
7321 df_note_add_problem ();
7323 /* More problems needed for interloop dep calculation in SMS. */
7324 if (common_sched_info
->sched_pass_id
== SCHED_SMS_PASS
)
7326 df_rd_add_problem ();
7327 df_chain_add_problem (DF_DU_CHAIN
+ DF_UD_CHAIN
);
7332 /* Do not run DCE after reload, as this can kill nops inserted
7334 if (reload_completed
)
7335 df_clear_flags (DF_LR_RUN_DCE
);
7337 regstat_compute_calls_crossed ();
7339 if (targetm
.sched
.init_global
)
7340 targetm
.sched
.init_global (sched_dump
, sched_verbose
, get_max_uid () + 1);
7342 alloc_global_sched_pressure_data ();
7344 curr_state
= xmalloc (dfa_state_size
);
7347 static void haifa_init_only_bb (basic_block
, basic_block
);
7349 /* Initialize data structures specific to the Haifa scheduler. */
7351 haifa_sched_init (void)
7353 setup_sched_dump ();
7356 scheduled_insns
.create (0);
7358 if (spec_info
!= NULL
)
7360 sched_deps_info
->use_deps_list
= 1;
7361 sched_deps_info
->generate_spec_deps
= 1;
7364 /* Initialize luids, dependency caches, target and h_i_d for the
7369 auto_vec
<basic_block
> bbs (n_basic_blocks_for_fn (cfun
));
7371 FOR_EACH_BB_FN (bb
, cfun
)
7372 bbs
.quick_push (bb
);
7373 sched_init_luids (bbs
);
7374 sched_deps_init (true);
7375 sched_extend_target ();
7376 haifa_init_h_i_d (bbs
);
7379 sched_init_only_bb
= haifa_init_only_bb
;
7380 sched_split_block
= sched_split_block_1
;
7381 sched_create_empty_bb
= sched_create_empty_bb_1
;
7382 haifa_recovery_bb_ever_added_p
= false;
7384 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
7385 before_recovery
= 0;
7391 /* Finish work with the data specific to the Haifa scheduler. */
7393 haifa_sched_finish (void)
7395 sched_create_empty_bb
= NULL
;
7396 sched_split_block
= NULL
;
7397 sched_init_only_bb
= NULL
;
7399 if (spec_info
&& spec_info
->dump
)
7401 char c
= reload_completed
? 'a' : 'b';
7403 fprintf (spec_info
->dump
,
7404 ";; %s:\n", current_function_name ());
7406 fprintf (spec_info
->dump
,
7407 ";; Procedure %cr-begin-data-spec motions == %d\n",
7409 fprintf (spec_info
->dump
,
7410 ";; Procedure %cr-be-in-data-spec motions == %d\n",
7412 fprintf (spec_info
->dump
,
7413 ";; Procedure %cr-begin-control-spec motions == %d\n",
7414 c
, nr_begin_control
);
7415 fprintf (spec_info
->dump
,
7416 ";; Procedure %cr-be-in-control-spec motions == %d\n",
7417 c
, nr_be_in_control
);
7420 scheduled_insns
.release ();
7422 /* Finalize h_i_d, dependency caches, and luids for the whole
7423 function. Target will be finalized in md_global_finish (). */
7424 sched_deps_finish ();
7425 sched_finish_luids ();
7426 current_sched_info
= NULL
;
7431 /* Free global data used during insn scheduling. This function works with
7432 the common data shared between the schedulers. */
7437 haifa_finish_h_i_d ();
7438 free_global_sched_pressure_data ();
7441 if (targetm
.sched
.finish_global
)
7442 targetm
.sched
.finish_global (sched_dump
, sched_verbose
);
7444 end_alias_analysis ();
7446 regstat_free_calls_crossed ();
7451 /* Free all delay_pair structures that were recorded. */
7453 free_delay_pairs (void)
7457 delay_htab
->empty ();
7458 delay_htab_i2
->empty ();
7462 /* Fix INSN_TICKs of the instructions in the current block as well as
7463 INSN_TICKs of their dependents.
7464 HEAD and TAIL are the begin and the end of the current scheduled block. */
7466 fix_inter_tick (rtx_insn
*head
, rtx_insn
*tail
)
7468 /* Set of instructions with corrected INSN_TICK. */
7469 auto_bitmap processed
;
7470 /* ??? It is doubtful if we should assume that cycle advance happens on
7471 basic block boundaries. Basically insns that are unconditionally ready
7472 on the start of the block are more preferable then those which have
7473 a one cycle dependency over insn from the previous block. */
7474 int next_clock
= clock_var
+ 1;
7476 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7477 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7478 across different blocks. */
7479 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
7484 sd_iterator_def sd_it
;
7487 tick
= INSN_TICK (head
);
7488 gcc_assert (tick
>= MIN_TICK
);
7490 /* Fix INSN_TICK of instruction from just scheduled block. */
7491 if (bitmap_set_bit (processed
, INSN_LUID (head
)))
7495 if (tick
< MIN_TICK
)
7498 INSN_TICK (head
) = tick
;
7501 if (DEBUG_INSN_P (head
))
7504 FOR_EACH_DEP (head
, SD_LIST_RES_FORW
, sd_it
, dep
)
7508 next
= DEP_CON (dep
);
7509 tick
= INSN_TICK (next
);
7511 if (tick
!= INVALID_TICK
7512 /* If NEXT has its INSN_TICK calculated, fix it.
7513 If not - it will be properly calculated from
7514 scratch later in fix_tick_ready. */
7515 && bitmap_set_bit (processed
, INSN_LUID (next
)))
7519 if (tick
< MIN_TICK
)
7522 if (tick
> INTER_TICK (next
))
7523 INTER_TICK (next
) = tick
;
7525 tick
= INTER_TICK (next
);
7527 INSN_TICK (next
) = tick
;
7534 /* Check if NEXT is ready to be added to the ready or queue list.
7535 If "yes", add it to the proper list.
7537 -1 - is not ready yet,
7538 0 - added to the ready list,
7539 0 < N - queued for N cycles. */
7541 try_ready (rtx_insn
*next
)
7543 ds_t old_ts
, new_ts
;
7545 old_ts
= TODO_SPEC (next
);
7547 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
| DEP_CONTROL
| DEP_POSTPONED
))
7548 && (old_ts
== HARD_DEP
7549 || old_ts
== DEP_POSTPONED
7550 || (old_ts
& SPECULATIVE
)
7551 || old_ts
== DEP_CONTROL
));
7553 new_ts
= recompute_todo_spec (next
, false);
7555 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
7556 gcc_assert (new_ts
== old_ts
7557 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
7558 else if (current_sched_info
->new_ready
)
7559 new_ts
= current_sched_info
->new_ready (next
, new_ts
);
7561 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7562 have its original pattern or changed (speculative) one. This is due
7563 to changing ebb in region scheduling.
7564 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7565 has speculative pattern.
7567 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7568 control-speculative NEXT could have been discarded by sched-rgn.c
7569 (the same case as when discarded by can_schedule_ready_p ()). */
7571 if ((new_ts
& SPECULATIVE
)
7572 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7573 need to change anything. */
7574 && new_ts
!= old_ts
)
7579 gcc_assert ((new_ts
& SPECULATIVE
) && !(new_ts
& ~SPECULATIVE
));
7581 res
= haifa_speculate_insn (next
, new_ts
, &new_pat
);
7586 /* It would be nice to change DEP_STATUS of all dependences,
7587 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7588 so we won't reanalyze anything. */
7593 /* We follow the rule, that every speculative insn
7594 has non-null ORIG_PAT. */
7595 if (!ORIG_PAT (next
))
7596 ORIG_PAT (next
) = PATTERN (next
);
7600 if (!ORIG_PAT (next
))
7601 /* If we gonna to overwrite the original pattern of insn,
7603 ORIG_PAT (next
) = PATTERN (next
);
7605 res
= haifa_change_pattern (next
, new_pat
);
7614 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7615 either correct (new_ts & SPECULATIVE),
7616 or we simply don't care (new_ts & HARD_DEP). */
7618 gcc_assert (!ORIG_PAT (next
)
7619 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
7621 TODO_SPEC (next
) = new_ts
;
7623 if (new_ts
& (HARD_DEP
| DEP_POSTPONED
))
7625 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7626 control-speculative NEXT could have been discarded by sched-rgn.c
7627 (the same case as when discarded by can_schedule_ready_p ()). */
7628 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7630 change_queue_index (next
, QUEUE_NOWHERE
);
7634 else if (!(new_ts
& BEGIN_SPEC
)
7635 && ORIG_PAT (next
) && PREDICATED_PAT (next
) == NULL_RTX
7636 && !IS_SPECULATION_CHECK_P (next
))
7637 /* We should change pattern of every previously speculative
7638 instruction - and we determine if NEXT was speculative by using
7639 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7640 pat too, so skip them. */
7642 bool success
= haifa_change_pattern (next
, ORIG_PAT (next
));
7643 gcc_assert (success
);
7644 ORIG_PAT (next
) = 0;
7647 if (sched_verbose
>= 2)
7649 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
7650 (*current_sched_info
->print_insn
) (next
, 0));
7652 if (spec_info
&& spec_info
->dump
)
7654 if (new_ts
& BEGIN_DATA
)
7655 fprintf (spec_info
->dump
, "; data-spec;");
7656 if (new_ts
& BEGIN_CONTROL
)
7657 fprintf (spec_info
->dump
, "; control-spec;");
7658 if (new_ts
& BE_IN_CONTROL
)
7659 fprintf (spec_info
->dump
, "; in-control-spec;");
7661 if (TODO_SPEC (next
) & DEP_CONTROL
)
7662 fprintf (sched_dump
, " predicated");
7663 fprintf (sched_dump
, "\n");
7666 adjust_priority (next
);
7668 return fix_tick_ready (next
);
7671 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7673 fix_tick_ready (rtx_insn
*next
)
7677 if (!DEBUG_INSN_P (next
) && !sd_lists_empty_p (next
, SD_LIST_RES_BACK
))
7680 sd_iterator_def sd_it
;
7683 tick
= INSN_TICK (next
);
7684 /* if tick is not equal to INVALID_TICK, then update
7685 INSN_TICK of NEXT with the most recent resolved dependence
7686 cost. Otherwise, recalculate from scratch. */
7687 full_p
= (tick
== INVALID_TICK
);
7689 FOR_EACH_DEP (next
, SD_LIST_RES_BACK
, sd_it
, dep
)
7691 rtx_insn
*pro
= DEP_PRO (dep
);
7694 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
7696 tick1
= INSN_TICK (pro
) + dep_cost (dep
);
7707 INSN_TICK (next
) = tick
;
7709 delay
= tick
- clock_var
;
7710 if (delay
<= 0 || sched_pressure
!= SCHED_PRESSURE_NONE
|| sched_fusion
)
7711 delay
= QUEUE_READY
;
7713 change_queue_index (next
, delay
);
7718 /* Move NEXT to the proper queue list with (DELAY >= 1),
7719 or add it to the ready list (DELAY == QUEUE_READY),
7720 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7722 change_queue_index (rtx_insn
*next
, int delay
)
7724 int i
= QUEUE_INDEX (next
);
7726 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
7728 gcc_assert (i
!= QUEUE_SCHEDULED
);
7730 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
7731 || (delay
< 0 && delay
== i
))
7732 /* We have nothing to do. */
7735 /* Remove NEXT from wherever it is now. */
7736 if (i
== QUEUE_READY
)
7737 ready_remove_insn (next
);
7739 queue_remove (next
);
7741 /* Add it to the proper place. */
7742 if (delay
== QUEUE_READY
)
7743 ready_add (readyp
, next
, false);
7744 else if (delay
>= 1)
7745 queue_insn (next
, delay
, "change queue index");
7747 if (sched_verbose
>= 2)
7749 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
7750 (*current_sched_info
->print_insn
) (next
, 0));
7752 if (delay
== QUEUE_READY
)
7753 fprintf (sched_dump
, " into ready\n");
7754 else if (delay
>= 1)
7755 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
7757 fprintf (sched_dump
, " removed from ready or queue lists\n");
7761 static int sched_ready_n_insns
= -1;
7763 /* Initialize per region data structures. */
7765 sched_extend_ready_list (int new_sched_ready_n_insns
)
7769 if (sched_ready_n_insns
== -1)
7770 /* At the first call we need to initialize one more choice_stack
7774 sched_ready_n_insns
= 0;
7775 scheduled_insns
.reserve (new_sched_ready_n_insns
);
7778 i
= sched_ready_n_insns
+ 1;
7780 ready
.veclen
= new_sched_ready_n_insns
+ issue_rate
;
7781 ready
.vec
= XRESIZEVEC (rtx_insn
*, ready
.vec
, ready
.veclen
);
7783 gcc_assert (new_sched_ready_n_insns
>= sched_ready_n_insns
);
7785 ready_try
= (signed char *) xrecalloc (ready_try
, new_sched_ready_n_insns
,
7786 sched_ready_n_insns
,
7787 sizeof (*ready_try
));
7789 /* We allocate +1 element to save initial state in the choice_stack[0]
7791 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
7792 new_sched_ready_n_insns
+ 1);
7794 for (; i
<= new_sched_ready_n_insns
; i
++)
7796 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
7798 if (targetm
.sched
.first_cycle_multipass_init
)
7799 targetm
.sched
.first_cycle_multipass_init (&(choice_stack
[i
]
7803 sched_ready_n_insns
= new_sched_ready_n_insns
;
7806 /* Free per region data structures. */
7808 sched_finish_ready_list (void)
7819 for (i
= 0; i
<= sched_ready_n_insns
; i
++)
7821 if (targetm
.sched
.first_cycle_multipass_fini
)
7822 targetm
.sched
.first_cycle_multipass_fini (&(choice_stack
[i
]
7825 free (choice_stack
[i
].state
);
7827 free (choice_stack
);
7828 choice_stack
= NULL
;
7830 sched_ready_n_insns
= -1;
7834 haifa_luid_for_non_insn (rtx x
)
7836 gcc_assert (NOTE_P (x
) || LABEL_P (x
));
7841 /* Generates recovery code for INSN. */
7843 generate_recovery_code (rtx_insn
*insn
)
7845 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
7846 begin_speculative_block (insn
);
7848 /* Here we have insn with no dependencies to
7849 instructions other then CHECK_SPEC ones. */
7851 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
7852 add_to_speculative_block (insn
);
7856 Tries to add speculative dependencies of type FS between instructions
7857 in deps_list L and TWIN. */
7859 process_insn_forw_deps_be_in_spec (rtx_insn
*insn
, rtx_insn
*twin
, ds_t fs
)
7861 sd_iterator_def sd_it
;
7864 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
7869 consumer
= DEP_CON (dep
);
7871 ds
= DEP_STATUS (dep
);
7873 if (/* If we want to create speculative dep. */
7875 /* And we can do that because this is a true dep. */
7876 && (ds
& DEP_TYPES
) == DEP_TRUE
)
7878 gcc_assert (!(ds
& BE_IN_SPEC
));
7880 if (/* If this dep can be overcome with 'begin speculation'. */
7882 /* Then we have a choice: keep the dep 'begin speculative'
7883 or transform it into 'be in speculative'. */
7885 if (/* In try_ready we assert that if insn once became ready
7886 it can be removed from the ready (or queue) list only
7887 due to backend decision. Hence we can't let the
7888 probability of the speculative dep to decrease. */
7889 ds_weak (ds
) <= ds_weak (fs
))
7893 new_ds
= (ds
& ~BEGIN_SPEC
) | fs
;
7895 if (/* consumer can 'be in speculative'. */
7896 sched_insn_is_legitimate_for_speculation_p (consumer
,
7898 /* Transform it to be in speculative. */
7903 /* Mark the dep as 'be in speculative'. */
7908 dep_def _new_dep
, *new_dep
= &_new_dep
;
7910 init_dep_1 (new_dep
, twin
, consumer
, DEP_TYPE (dep
), ds
);
7911 sd_add_dep (new_dep
, false);
7916 /* Generates recovery code for BEGIN speculative INSN. */
7918 begin_speculative_block (rtx_insn
*insn
)
7920 if (TODO_SPEC (insn
) & BEGIN_DATA
)
7922 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
7925 create_check_block_twin (insn
, false);
7927 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
7930 static void haifa_init_insn (rtx_insn
*);
7932 /* Generates recovery code for BE_IN speculative INSN. */
7934 add_to_speculative_block (rtx_insn
*insn
)
7937 sd_iterator_def sd_it
;
7939 auto_vec
<rtx_insn
*, 10> twins
;
7941 ts
= TODO_SPEC (insn
);
7942 gcc_assert (!(ts
& ~BE_IN_SPEC
));
7944 if (ts
& BE_IN_DATA
)
7946 if (ts
& BE_IN_CONTROL
)
7949 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
7950 gcc_assert (!TODO_SPEC (insn
));
7952 DONE_SPEC (insn
) |= ts
;
7954 /* First we convert all simple checks to branchy. */
7955 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7956 sd_iterator_cond (&sd_it
, &dep
);)
7958 rtx_insn
*check
= DEP_PRO (dep
);
7960 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
7962 create_check_block_twin (check
, true);
7964 /* Restart search. */
7965 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7968 /* Continue search. */
7969 sd_iterator_next (&sd_it
);
7972 auto_vec
<rtx_insn
*> priorities_roots
;
7973 clear_priorities (insn
, &priorities_roots
);
7977 rtx_insn
*check
, *twin
;
7980 /* Get the first backward dependency of INSN. */
7981 sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
7982 if (!sd_iterator_cond (&sd_it
, &dep
))
7983 /* INSN has no backward dependencies left. */
7986 gcc_assert ((DEP_STATUS (dep
) & BEGIN_SPEC
) == 0
7987 && (DEP_STATUS (dep
) & BE_IN_SPEC
) != 0
7988 && (DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
7990 check
= DEP_PRO (dep
);
7992 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
7993 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
7995 rec
= BLOCK_FOR_INSN (check
);
7997 twin
= emit_insn_before (copy_insn (PATTERN (insn
)), BB_END (rec
));
7998 haifa_init_insn (twin
);
8000 sd_copy_back_deps (twin
, insn
, true);
8002 if (sched_verbose
&& spec_info
->dump
)
8003 /* INSN_BB (insn) isn't determined for twin insns yet.
8004 So we can't use current_sched_info->print_insn. */
8005 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
8006 INSN_UID (twin
), rec
->index
);
8008 twins
.safe_push (twin
);
8010 /* Add dependences between TWIN and all appropriate
8011 instructions from REC. */
8012 FOR_EACH_DEP (insn
, SD_LIST_SPEC_BACK
, sd_it
, dep
)
8014 rtx_insn
*pro
= DEP_PRO (dep
);
8016 gcc_assert (DEP_TYPE (dep
) == REG_DEP_TRUE
);
8018 /* INSN might have dependencies from the instructions from
8019 several recovery blocks. At this iteration we process those
8020 producers that reside in REC. */
8021 if (BLOCK_FOR_INSN (pro
) == rec
)
8023 dep_def _new_dep
, *new_dep
= &_new_dep
;
8025 init_dep (new_dep
, pro
, twin
, REG_DEP_TRUE
);
8026 sd_add_dep (new_dep
, false);
8030 process_insn_forw_deps_be_in_spec (insn
, twin
, ts
);
8032 /* Remove all dependencies between INSN and insns in REC. */
8033 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8034 sd_iterator_cond (&sd_it
, &dep
);)
8036 rtx_insn
*pro
= DEP_PRO (dep
);
8038 if (BLOCK_FOR_INSN (pro
) == rec
)
8039 sd_delete_dep (sd_it
);
8041 sd_iterator_next (&sd_it
);
8045 /* We couldn't have added the dependencies between INSN and TWINS earlier
8046 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
8049 FOR_EACH_VEC_ELT_REVERSE (twins
, i
, twin
)
8051 dep_def _new_dep
, *new_dep
= &_new_dep
;
8053 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
8054 sd_add_dep (new_dep
, false);
8057 calc_priorities (priorities_roots
);
8060 /* Extends and fills with zeros (only the new part) array pointed to by P. */
8062 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
8064 gcc_assert (new_nmemb
>= old_nmemb
);
8065 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
8066 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
8071 Find fallthru edge from PRED. */
8073 find_fallthru_edge_from (basic_block pred
)
8078 succ
= pred
->next_bb
;
8079 gcc_assert (succ
->prev_bb
== pred
);
8081 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
8083 e
= find_fallthru_edge (pred
->succs
);
8087 gcc_assert (e
->dest
== succ
|| e
->dest
->index
== EXIT_BLOCK
);
8093 e
= find_fallthru_edge (succ
->preds
);
8097 gcc_assert (e
->src
== pred
);
8105 /* Extend per basic block data structures. */
8107 sched_extend_bb (void)
8109 /* The following is done to keep current_sched_info->next_tail non null. */
8110 rtx_insn
*end
= BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
);
8111 rtx_insn
*insn
= DEBUG_INSN_P (end
) ? prev_nondebug_insn (end
) : end
;
8112 if (NEXT_INSN (end
) == 0
8115 /* Don't emit a NOTE if it would end up before a BARRIER. */
8116 && !BARRIER_P (next_nondebug_insn (end
))))
8118 rtx_note
*note
= emit_note_after (NOTE_INSN_DELETED
, end
);
8119 /* Make note appear outside BB. */
8120 set_block_for_insn (note
, NULL
);
8121 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
) = end
;
8125 /* Init per basic block data structures. */
8127 sched_init_bbs (void)
8132 /* Initialize BEFORE_RECOVERY variable. */
8134 init_before_recovery (basic_block
*before_recovery_ptr
)
8139 last
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
8140 e
= find_fallthru_edge_from (last
);
8144 /* We create two basic blocks:
8145 1. Single instruction block is inserted right after E->SRC
8147 2. Empty block right before EXIT_BLOCK.
8148 Between these two blocks recovery blocks will be emitted. */
8150 basic_block single
, empty
;
8152 /* If the fallthrough edge to exit we've found is from the block we've
8153 created before, don't do anything more. */
8154 if (last
== after_recovery
)
8157 adding_bb_to_current_region_p
= false;
8159 single
= sched_create_empty_bb (last
);
8160 empty
= sched_create_empty_bb (single
);
8162 /* Add new blocks to the root loop. */
8163 if (current_loops
!= NULL
)
8165 add_bb_to_loop (single
, (*current_loops
->larray
)[0]);
8166 add_bb_to_loop (empty
, (*current_loops
->larray
)[0]);
8169 single
->count
= last
->count
;
8170 empty
->count
= last
->count
;
8171 BB_COPY_PARTITION (single
, last
);
8172 BB_COPY_PARTITION (empty
, last
);
8174 redirect_edge_succ (e
, single
);
8175 make_single_succ_edge (single
, empty
, 0);
8176 make_single_succ_edge (empty
, EXIT_BLOCK_PTR_FOR_FN (cfun
),
8179 rtx_code_label
*label
= block_label (empty
);
8180 rtx_jump_insn
*x
= emit_jump_insn_after (targetm
.gen_jump (label
),
8182 JUMP_LABEL (x
) = label
;
8183 LABEL_NUSES (label
)++;
8184 haifa_init_insn (x
);
8186 emit_barrier_after (x
);
8188 sched_init_only_bb (empty
, NULL
);
8189 sched_init_only_bb (single
, NULL
);
8192 adding_bb_to_current_region_p
= true;
8193 before_recovery
= single
;
8194 after_recovery
= empty
;
8196 if (before_recovery_ptr
)
8197 *before_recovery_ptr
= before_recovery
;
8199 if (sched_verbose
>= 2 && spec_info
->dump
)
8200 fprintf (spec_info
->dump
,
8201 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8202 last
->index
, single
->index
, empty
->index
);
8205 before_recovery
= last
;
8208 /* Returns new recovery block. */
8210 sched_create_recovery_block (basic_block
*before_recovery_ptr
)
8215 haifa_recovery_bb_recently_added_p
= true;
8216 haifa_recovery_bb_ever_added_p
= true;
8218 init_before_recovery (before_recovery_ptr
);
8220 barrier
= get_last_bb_insn (before_recovery
);
8221 gcc_assert (BARRIER_P (barrier
));
8223 rtx_insn
*label
= emit_label_after (gen_label_rtx (), barrier
);
8225 rec
= create_basic_block (label
, label
, before_recovery
);
8227 /* A recovery block always ends with an unconditional jump. */
8228 emit_barrier_after (BB_END (rec
));
8230 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
8231 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
8233 if (sched_verbose
&& spec_info
->dump
)
8234 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
8240 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8241 and emit necessary jumps. */
8243 sched_create_recovery_edges (basic_block first_bb
, basic_block rec
,
8244 basic_block second_bb
)
8248 /* This is fixing of incoming edge. */
8249 /* ??? Which other flags should be specified? */
8250 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
8251 /* Partition type is the same, if it is "unpartitioned". */
8252 edge_flags
= EDGE_CROSSING
;
8256 edge e2
= single_succ_edge (first_bb
);
8257 edge e
= make_edge (first_bb
, rec
, edge_flags
);
8259 /* TODO: The actual probability can be determined and is computed as
8260 'todo_spec' variable in create_check_block_twin and
8261 in sel-sched.c `check_ds' in create_speculation_check. */
8262 e
->probability
= profile_probability::very_unlikely ();
8263 rec
->count
= e
->count ();
8264 e2
->probability
= e
->probability
.invert ();
8266 rtx_code_label
*label
= block_label (second_bb
);
8267 rtx_jump_insn
*jump
= emit_jump_insn_after (targetm
.gen_jump (label
),
8269 JUMP_LABEL (jump
) = label
;
8270 LABEL_NUSES (label
)++;
8272 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
8273 /* Partition type is the same, if it is "unpartitioned". */
8275 /* Rewritten from cfgrtl.c. */
8276 if (crtl
->has_bb_partition
&& targetm_common
.have_named_sections
)
8278 /* We don't need the same note for the check because
8279 any_condjump_p (check) == true. */
8280 CROSSING_JUMP_P (jump
) = 1;
8282 edge_flags
= EDGE_CROSSING
;
8287 make_single_succ_edge (rec
, second_bb
, edge_flags
);
8288 if (dom_info_available_p (CDI_DOMINATORS
))
8289 set_immediate_dominator (CDI_DOMINATORS
, rec
, first_bb
);
8292 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
8293 INSN is a simple check, that should be converted to branchy one. */
8295 create_check_block_twin (rtx_insn
*insn
, bool mutate_p
)
8298 rtx_insn
*label
, *check
, *twin
;
8301 sd_iterator_def sd_it
;
8303 dep_def _new_dep
, *new_dep
= &_new_dep
;
8306 gcc_assert (ORIG_PAT (insn
) != NULL_RTX
);
8309 todo_spec
= TODO_SPEC (insn
);
8312 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
8313 && (TODO_SPEC (insn
) & SPECULATIVE
) == 0);
8315 todo_spec
= CHECK_SPEC (insn
);
8318 todo_spec
&= SPECULATIVE
;
8320 /* Create recovery block. */
8321 if (mutate_p
|| targetm
.sched
.needs_block_p (todo_spec
))
8323 rec
= sched_create_recovery_block (NULL
);
8324 label
= BB_HEAD (rec
);
8328 rec
= EXIT_BLOCK_PTR_FOR_FN (cfun
);
8333 check_pat
= targetm
.sched
.gen_spec_check (insn
, label
, todo_spec
);
8335 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8337 /* To have mem_reg alive at the beginning of second_bb,
8338 we emit check BEFORE insn, so insn after splitting
8339 insn will be at the beginning of second_bb, which will
8340 provide us with the correct life information. */
8341 check
= emit_jump_insn_before (check_pat
, insn
);
8342 JUMP_LABEL (check
) = label
;
8343 LABEL_NUSES (label
)++;
8346 check
= emit_insn_before (check_pat
, insn
);
8348 /* Extend data structures. */
8349 haifa_init_insn (check
);
8351 /* CHECK is being added to current region. Extend ready list. */
8352 gcc_assert (sched_ready_n_insns
!= -1);
8353 sched_extend_ready_list (sched_ready_n_insns
+ 1);
8355 if (current_sched_info
->add_remove_insn
)
8356 current_sched_info
->add_remove_insn (insn
, 0);
8358 RECOVERY_BLOCK (check
) = rec
;
8360 if (sched_verbose
&& spec_info
->dump
)
8361 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
8362 (*current_sched_info
->print_insn
) (check
, 0));
8364 gcc_assert (ORIG_PAT (insn
));
8366 /* Initialize TWIN (twin is a duplicate of original instruction
8367 in the recovery block). */
8368 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8370 sd_iterator_def sd_it
;
8373 FOR_EACH_DEP (insn
, SD_LIST_RES_BACK
, sd_it
, dep
)
8374 if ((DEP_STATUS (dep
) & DEP_OUTPUT
) != 0)
8376 struct _dep _dep2
, *dep2
= &_dep2
;
8378 init_dep (dep2
, DEP_PRO (dep
), check
, REG_DEP_TRUE
);
8380 sd_add_dep (dep2
, true);
8383 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
8384 haifa_init_insn (twin
);
8386 if (sched_verbose
&& spec_info
->dump
)
8387 /* INSN_BB (insn) isn't determined for twin insns yet.
8388 So we can't use current_sched_info->print_insn. */
8389 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
8390 INSN_UID (twin
), rec
->index
);
8394 ORIG_PAT (check
) = ORIG_PAT (insn
);
8395 HAS_INTERNAL_DEP (check
) = 1;
8397 /* ??? We probably should change all OUTPUT dependencies to
8401 /* Copy all resolved back dependencies of INSN to TWIN. This will
8402 provide correct value for INSN_TICK (TWIN). */
8403 sd_copy_back_deps (twin
, insn
, true);
8405 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8406 /* In case of branchy check, fix CFG. */
8408 basic_block first_bb
, second_bb
;
8411 first_bb
= BLOCK_FOR_INSN (check
);
8412 second_bb
= sched_split_block (first_bb
, check
);
8414 sched_create_recovery_edges (first_bb
, rec
, second_bb
);
8416 sched_init_only_bb (second_bb
, first_bb
);
8417 sched_init_only_bb (rec
, EXIT_BLOCK_PTR_FOR_FN (cfun
));
8419 jump
= BB_END (rec
);
8420 haifa_init_insn (jump
);
8423 /* Move backward dependences from INSN to CHECK and
8424 move forward dependences from INSN to TWIN. */
8426 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
8427 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
8429 rtx_insn
*pro
= DEP_PRO (dep
);
8432 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8433 check --TRUE--> producer ??? or ANTI ???
8434 twin --TRUE--> producer
8435 twin --ANTI--> check
8437 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8438 check --ANTI--> producer
8439 twin --ANTI--> producer
8440 twin --ANTI--> check
8442 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8443 check ~~TRUE~~> producer
8444 twin ~~TRUE~~> producer
8445 twin --ANTI--> check */
8447 ds
= DEP_STATUS (dep
);
8449 if (ds
& BEGIN_SPEC
)
8451 gcc_assert (!mutate_p
);
8455 init_dep_1 (new_dep
, pro
, check
, DEP_TYPE (dep
), ds
);
8456 sd_add_dep (new_dep
, false);
8458 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8460 DEP_CON (new_dep
) = twin
;
8461 sd_add_dep (new_dep
, false);
8465 /* Second, remove backward dependencies of INSN. */
8466 for (sd_it
= sd_iterator_start (insn
, SD_LIST_SPEC_BACK
);
8467 sd_iterator_cond (&sd_it
, &dep
);)
8469 if ((DEP_STATUS (dep
) & BEGIN_SPEC
)
8471 /* We can delete this dep because we overcome it with
8472 BEGIN_SPECULATION. */
8473 sd_delete_dep (sd_it
);
8475 sd_iterator_next (&sd_it
);
8478 /* Future Speculations. Determine what BE_IN speculations will be like. */
8481 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8484 gcc_assert (!DONE_SPEC (insn
));
8488 ds_t ts
= TODO_SPEC (insn
);
8490 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
8491 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
8493 /* Luckiness of future speculations solely depends upon initial
8494 BEGIN speculation. */
8495 if (ts
& BEGIN_DATA
)
8496 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
8497 if (ts
& BEGIN_CONTROL
)
8498 fs
= set_dep_weak (fs
, BE_IN_CONTROL
,
8499 get_dep_weak (ts
, BEGIN_CONTROL
));
8502 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
8504 /* Future speculations: call the helper. */
8505 process_insn_forw_deps_be_in_spec (insn
, twin
, fs
);
8507 if (rec
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8509 /* Which types of dependencies should we use here is,
8510 generally, machine-dependent question... But, for now,
8515 init_dep (new_dep
, insn
, check
, REG_DEP_TRUE
);
8516 sd_add_dep (new_dep
, false);
8518 init_dep (new_dep
, insn
, twin
, REG_DEP_OUTPUT
);
8519 sd_add_dep (new_dep
, false);
8523 if (spec_info
->dump
)
8524 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
8525 (*current_sched_info
->print_insn
) (insn
, 0));
8527 /* Remove all dependencies of the INSN. */
8529 sd_it
= sd_iterator_start (insn
, (SD_LIST_FORW
8531 | SD_LIST_RES_BACK
));
8532 while (sd_iterator_cond (&sd_it
, &dep
))
8533 sd_delete_dep (sd_it
);
8536 /* If former check (INSN) already was moved to the ready (or queue)
8537 list, add new check (CHECK) there too. */
8538 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
8541 /* Remove old check from instruction stream and free its
8543 sched_remove_insn (insn
);
8546 init_dep (new_dep
, check
, twin
, REG_DEP_ANTI
);
8547 sd_add_dep (new_dep
, false);
8551 init_dep_1 (new_dep
, insn
, check
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
8552 sd_add_dep (new_dep
, false);
8556 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8557 because it'll be done later in add_to_speculative_block. */
8559 auto_vec
<rtx_insn
*> priorities_roots
;
8561 clear_priorities (twin
, &priorities_roots
);
8562 calc_priorities (priorities_roots
);
8566 /* Removes dependency between instructions in the recovery block REC
8567 and usual region instructions. It keeps inner dependences so it
8568 won't be necessary to recompute them. */
8570 fix_recovery_deps (basic_block rec
)
8572 rtx_insn
*note
, *insn
, *jump
;
8573 auto_vec
<rtx_insn
*, 10> ready_list
;
8574 auto_bitmap in_ready
;
8576 /* NOTE - a basic block note. */
8577 note
= NEXT_INSN (BB_HEAD (rec
));
8578 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8579 insn
= BB_END (rec
);
8580 gcc_assert (JUMP_P (insn
));
8581 insn
= PREV_INSN (insn
);
8585 sd_iterator_def sd_it
;
8588 for (sd_it
= sd_iterator_start (insn
, SD_LIST_FORW
);
8589 sd_iterator_cond (&sd_it
, &dep
);)
8591 rtx_insn
*consumer
= DEP_CON (dep
);
8593 if (BLOCK_FOR_INSN (consumer
) != rec
)
8595 sd_delete_dep (sd_it
);
8597 if (bitmap_set_bit (in_ready
, INSN_LUID (consumer
)))
8598 ready_list
.safe_push (consumer
);
8602 gcc_assert ((DEP_STATUS (dep
) & DEP_TYPES
) == DEP_TRUE
);
8604 sd_iterator_next (&sd_it
);
8608 insn
= PREV_INSN (insn
);
8610 while (insn
!= note
);
8612 /* Try to add instructions to the ready or queue list. */
8615 FOR_EACH_VEC_ELT_REVERSE (ready_list
, i
, temp
)
8618 /* Fixing jump's dependences. */
8619 insn
= BB_HEAD (rec
);
8620 jump
= BB_END (rec
);
8622 gcc_assert (LABEL_P (insn
));
8623 insn
= NEXT_INSN (insn
);
8625 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
8626 add_jump_dependencies (insn
, jump
);
8629 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8630 instruction data. */
8632 haifa_change_pattern (rtx_insn
*insn
, rtx new_pat
)
8636 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
8640 update_insn_after_change (insn
);
8644 /* -1 - can't speculate,
8645 0 - for speculation with REQUEST mode it is OK to use
8646 current instruction pattern,
8647 1 - need to change pattern for *NEW_PAT to be speculative. */
8649 sched_speculate_insn (rtx_insn
*insn
, ds_t request
, rtx
*new_pat
)
8651 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
8652 && (request
& SPECULATIVE
)
8653 && sched_insn_is_legitimate_for_speculation_p (insn
, request
));
8655 if ((request
& spec_info
->mask
) != request
)
8658 if (request
& BE_IN_SPEC
8659 && !(request
& BEGIN_SPEC
))
8662 return targetm
.sched
.speculate_insn (insn
, request
, new_pat
);
8666 haifa_speculate_insn (rtx_insn
*insn
, ds_t request
, rtx
*new_pat
)
8668 gcc_assert (sched_deps_info
->generate_spec_deps
8669 && !IS_SPECULATION_CHECK_P (insn
));
8671 if (HAS_INTERNAL_DEP (insn
)
8672 || SCHED_GROUP_P (insn
))
8675 return sched_speculate_insn (insn
, request
, new_pat
);
8678 /* Print some information about block BB, which starts with HEAD and
8679 ends with TAIL, before scheduling it.
8680 I is zero, if scheduler is about to start with the fresh ebb. */
8682 dump_new_block_header (int i
, basic_block bb
, rtx_insn
*head
, rtx_insn
*tail
)
8685 fprintf (sched_dump
,
8686 ";; ======================================================\n");
8688 fprintf (sched_dump
,
8689 ";; =====================ADVANCING TO=====================\n");
8690 fprintf (sched_dump
,
8691 ";; -- basic block %d from %d to %d -- %s reload\n",
8692 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
8693 (reload_completed
? "after" : "before"));
8694 fprintf (sched_dump
,
8695 ";; ======================================================\n");
8696 fprintf (sched_dump
, "\n");
8699 /* Unlink basic block notes and labels and saves them, so they
8700 can be easily restored. We unlink basic block notes in EBB to
8701 provide back-compatibility with the previous code, as target backends
8702 assume, that there'll be only instructions between
8703 current_sched_info->{head and tail}. We restore these notes as soon
8705 FIRST (LAST) is the first (last) basic block in the ebb.
8706 NB: In usual case (FIRST == LAST) nothing is really done. */
8708 unlink_bb_notes (basic_block first
, basic_block last
)
8710 /* We DON'T unlink basic block notes of the first block in the ebb. */
8714 bb_header
= XNEWVEC (rtx_insn
*, last_basic_block_for_fn (cfun
));
8716 /* Make a sentinel. */
8717 if (last
->next_bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
8718 bb_header
[last
->next_bb
->index
] = 0;
8720 first
= first
->next_bb
;
8723 rtx_insn
*prev
, *label
, *note
, *next
;
8725 label
= BB_HEAD (last
);
8726 if (LABEL_P (label
))
8727 note
= NEXT_INSN (label
);
8730 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8732 prev
= PREV_INSN (label
);
8733 next
= NEXT_INSN (note
);
8734 gcc_assert (prev
&& next
);
8736 SET_NEXT_INSN (prev
) = next
;
8737 SET_PREV_INSN (next
) = prev
;
8739 bb_header
[last
->index
] = label
;
8744 last
= last
->prev_bb
;
8749 /* Restore basic block notes.
8750 FIRST is the first basic block in the ebb. */
8752 restore_bb_notes (basic_block first
)
8757 /* We DON'T unlink basic block notes of the first block in the ebb. */
8758 first
= first
->next_bb
;
8759 /* Remember: FIRST is actually a second basic block in the ebb. */
8761 while (first
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
8762 && bb_header
[first
->index
])
8764 rtx_insn
*prev
, *label
, *note
, *next
;
8766 label
= bb_header
[first
->index
];
8767 prev
= PREV_INSN (label
);
8768 next
= NEXT_INSN (prev
);
8770 if (LABEL_P (label
))
8771 note
= NEXT_INSN (label
);
8774 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
8776 bb_header
[first
->index
] = 0;
8778 SET_NEXT_INSN (prev
) = label
;
8779 SET_NEXT_INSN (note
) = next
;
8780 SET_PREV_INSN (next
) = note
;
8782 first
= first
->next_bb
;
8790 Fix CFG after both in- and inter-block movement of
8791 control_flow_insn_p JUMP. */
8793 fix_jump_move (rtx_insn
*jump
)
8795 basic_block bb
, jump_bb
, jump_bb_next
;
8797 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8798 jump_bb
= BLOCK_FOR_INSN (jump
);
8799 jump_bb_next
= jump_bb
->next_bb
;
8801 gcc_assert (common_sched_info
->sched_pass_id
== SCHED_EBB_PASS
8802 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
8804 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
8805 /* if jump_bb_next is not empty. */
8806 BB_END (jump_bb
) = BB_END (jump_bb_next
);
8808 if (BB_END (bb
) != PREV_INSN (jump
))
8809 /* Then there are instruction after jump that should be placed
8811 BB_END (jump_bb_next
) = BB_END (bb
);
8813 /* Otherwise jump_bb_next is empty. */
8814 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
8816 /* To make assertion in move_insn happy. */
8817 BB_END (bb
) = PREV_INSN (jump
);
8819 update_bb_for_insn (jump_bb_next
);
8822 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8824 move_block_after_check (rtx_insn
*jump
)
8826 basic_block bb
, jump_bb
, jump_bb_next
;
8827 vec
<edge
, va_gc
> *t
;
8829 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
8830 jump_bb
= BLOCK_FOR_INSN (jump
);
8831 jump_bb_next
= jump_bb
->next_bb
;
8833 update_bb_for_insn (jump_bb
);
8835 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
8836 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
8838 unlink_block (jump_bb_next
);
8839 link_block (jump_bb_next
, bb
);
8843 move_succs (&(jump_bb
->succs
), bb
);
8844 move_succs (&(jump_bb_next
->succs
), jump_bb
);
8845 move_succs (&t
, jump_bb_next
);
8847 df_mark_solutions_dirty ();
8849 common_sched_info
->fix_recovery_cfg
8850 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
8853 /* Helper function for move_block_after_check.
8854 This functions attaches edge vector pointed to by SUCCSP to
8857 move_succs (vec
<edge
, va_gc
> **succsp
, basic_block to
)
8862 gcc_assert (to
->succs
== 0);
8864 to
->succs
= *succsp
;
8866 FOR_EACH_EDGE (e
, ei
, to
->succs
)
8872 /* Remove INSN from the instruction stream.
8873 INSN should have any dependencies. */
8875 sched_remove_insn (rtx_insn
*insn
)
8877 sd_finish_insn (insn
);
8879 change_queue_index (insn
, QUEUE_NOWHERE
);
8880 current_sched_info
->add_remove_insn (insn
, 1);
8884 /* Clear priorities of all instructions, that are forward dependent on INSN.
8885 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8886 be invoked to initialize all cleared priorities. */
8888 clear_priorities (rtx_insn
*insn
, rtx_vec_t
*roots_ptr
)
8890 sd_iterator_def sd_it
;
8892 bool insn_is_root_p
= true;
8894 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
);
8896 FOR_EACH_DEP (insn
, SD_LIST_BACK
, sd_it
, dep
)
8898 rtx_insn
*pro
= DEP_PRO (dep
);
8900 if (INSN_PRIORITY_STATUS (pro
) >= 0
8901 && QUEUE_INDEX (insn
) != QUEUE_SCHEDULED
)
8903 /* If DEP doesn't contribute to priority then INSN itself should
8904 be added to priority roots. */
8905 if (contributes_to_priority_p (dep
))
8906 insn_is_root_p
= false;
8908 INSN_PRIORITY_STATUS (pro
) = -1;
8909 clear_priorities (pro
, roots_ptr
);
8914 roots_ptr
->safe_push (insn
);
8917 /* Recompute priorities of instructions, whose priorities might have been
8918 changed. ROOTS is a vector of instructions whose priority computation will
8919 trigger initialization of all cleared priorities. */
8921 calc_priorities (rtx_vec_t roots
)
8926 FOR_EACH_VEC_ELT (roots
, i
, insn
)
8931 /* Add dependences between JUMP and other instructions in the recovery
8932 block. INSN is the first insn the recovery block. */
8934 add_jump_dependencies (rtx_insn
*insn
, rtx_insn
*jump
)
8938 insn
= NEXT_INSN (insn
);
8942 if (dep_list_size (insn
, SD_LIST_FORW
) == 0)
8944 dep_def _new_dep
, *new_dep
= &_new_dep
;
8946 init_dep (new_dep
, insn
, jump
, REG_DEP_ANTI
);
8947 sd_add_dep (new_dep
, false);
8952 gcc_assert (!sd_lists_empty_p (jump
, SD_LIST_BACK
));
8955 /* Extend data structures for logical insn UID. */
8957 sched_extend_luids (void)
8959 int new_luids_max_uid
= get_max_uid () + 1;
8961 sched_luids
.safe_grow_cleared (new_luids_max_uid
);
8964 /* Initialize LUID for INSN. */
8966 sched_init_insn_luid (rtx_insn
*insn
)
8968 int i
= INSN_P (insn
) ? 1 : common_sched_info
->luid_for_non_insn (insn
);
8973 luid
= sched_max_luid
;
8974 sched_max_luid
+= i
;
8979 SET_INSN_LUID (insn
, luid
);
8982 /* Initialize luids for BBS.
8983 The hook common_sched_info->luid_for_non_insn () is used to determine
8984 if notes, labels, etc. need luids. */
8986 sched_init_luids (bb_vec_t bbs
)
8991 sched_extend_luids ();
8992 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
8996 FOR_BB_INSNS (bb
, insn
)
8997 sched_init_insn_luid (insn
);
9003 sched_finish_luids (void)
9005 sched_luids
.release ();
9009 /* Return logical uid of INSN. Helpful while debugging. */
9011 insn_luid (rtx_insn
*insn
)
9013 return INSN_LUID (insn
);
9016 /* Extend per insn data in the target. */
9018 sched_extend_target (void)
9020 if (targetm
.sched
.h_i_d_extended
)
9021 targetm
.sched
.h_i_d_extended ();
9024 /* Extend global scheduler structures (those, that live across calls to
9025 schedule_block) to include information about just emitted INSN. */
9029 int reserve
= (get_max_uid () + 1 - h_i_d
.length ());
9031 && ! h_i_d
.space (reserve
))
9033 h_i_d
.safe_grow_cleared (3 * get_max_uid () / 2);
9034 sched_extend_target ();
9038 /* Initialize h_i_d entry of the INSN with default values.
9039 Values, that are not explicitly initialized here, hold zero. */
9041 init_h_i_d (rtx_insn
*insn
)
9043 if (INSN_LUID (insn
) > 0)
9045 INSN_COST (insn
) = -1;
9046 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
9047 INSN_TICK (insn
) = INVALID_TICK
;
9048 INSN_EXACT_TICK (insn
) = INVALID_TICK
;
9049 INTER_TICK (insn
) = INVALID_TICK
;
9050 TODO_SPEC (insn
) = HARD_DEP
;
9051 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[0].status
9052 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
9053 INSN_AUTOPREF_MULTIPASS_DATA (insn
)[1].status
9054 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED
;
9058 /* Initialize haifa_insn_data for BBS. */
9060 haifa_init_h_i_d (bb_vec_t bbs
)
9066 FOR_EACH_VEC_ELT (bbs
, i
, bb
)
9070 FOR_BB_INSNS (bb
, insn
)
9075 /* Finalize haifa_insn_data. */
9077 haifa_finish_h_i_d (void)
9080 haifa_insn_data_t data
;
9081 reg_use_data
*use
, *next_use
;
9082 reg_set_data
*set
, *next_set
;
9084 FOR_EACH_VEC_ELT (h_i_d
, i
, data
)
9086 free (data
->max_reg_pressure
);
9087 free (data
->reg_pressure
);
9088 for (use
= data
->reg_use_list
; use
!= NULL
; use
= next_use
)
9090 next_use
= use
->next_insn_use
;
9093 for (set
= data
->reg_set_list
; set
!= NULL
; set
= next_set
)
9095 next_set
= set
->next_insn_set
;
9103 /* Init data for the new insn INSN. */
9105 haifa_init_insn (rtx_insn
*insn
)
9107 gcc_assert (insn
!= NULL
);
9109 sched_extend_luids ();
9110 sched_init_insn_luid (insn
);
9111 sched_extend_target ();
9112 sched_deps_init (false);
9116 if (adding_bb_to_current_region_p
)
9118 sd_init_insn (insn
);
9120 /* Extend dependency caches by one element. */
9121 extend_dependency_caches (1, false);
9123 if (sched_pressure
!= SCHED_PRESSURE_NONE
)
9124 init_insn_reg_pressure_info (insn
);
9127 /* Init data for the new basic block BB which comes after AFTER. */
9129 haifa_init_only_bb (basic_block bb
, basic_block after
)
9131 gcc_assert (bb
!= NULL
);
9135 if (common_sched_info
->add_block
)
9136 /* This changes only data structures of the front-end. */
9137 common_sched_info
->add_block (bb
, after
);
9140 /* A generic version of sched_split_block (). */
9142 sched_split_block_1 (basic_block first_bb
, rtx after
)
9146 e
= split_block (first_bb
, after
);
9147 gcc_assert (e
->src
== first_bb
);
9149 /* sched_split_block emits note if *check == BB_END. Probably it
9150 is better to rip that note off. */
9155 /* A generic version of sched_create_empty_bb (). */
9157 sched_create_empty_bb_1 (basic_block after
)
9159 return create_empty_bb (after
);
9162 /* Insert PAT as an INSN into the schedule and update the necessary data
9163 structures to account for it. */
9165 sched_emit_insn (rtx pat
)
9167 rtx_insn
*insn
= emit_insn_before (pat
, first_nonscheduled_insn ());
9168 haifa_init_insn (insn
);
9170 if (current_sched_info
->add_remove_insn
)
9171 current_sched_info
->add_remove_insn (insn
, 0);
9173 (*current_sched_info
->begin_schedule_ready
) (insn
);
9174 scheduled_insns
.safe_push (insn
);
9176 last_scheduled_insn
= insn
;
9180 /* This function returns a candidate satisfying dispatch constraints from
9184 ready_remove_first_dispatch (struct ready_list
*ready
)
9187 rtx_insn
*insn
= ready_element (ready
, 0);
9189 if (ready
->n_ready
== 1
9191 || INSN_CODE (insn
) < 0
9192 || !active_insn_p (insn
)
9193 || targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
9194 return ready_remove_first (ready
);
9196 for (i
= 1; i
< ready
->n_ready
; i
++)
9198 insn
= ready_element (ready
, i
);
9201 || INSN_CODE (insn
) < 0
9202 || !active_insn_p (insn
))
9205 if (targetm
.sched
.dispatch (insn
, FITS_DISPATCH_WINDOW
))
9207 /* Return ith element of ready. */
9208 insn
= ready_remove (ready
, i
);
9213 if (targetm
.sched
.dispatch (NULL
, DISPATCH_VIOLATION
))
9214 return ready_remove_first (ready
);
9216 for (i
= 1; i
< ready
->n_ready
; i
++)
9218 insn
= ready_element (ready
, i
);
9221 || INSN_CODE (insn
) < 0
9222 || !active_insn_p (insn
))
9225 /* Return i-th element of ready. */
9226 if (targetm
.sched
.dispatch (insn
, IS_CMP
))
9227 return ready_remove (ready
, i
);
9230 return ready_remove_first (ready
);
9233 /* Get number of ready insn in the ready list. */
9236 number_in_ready (void)
9238 return ready
.n_ready
;
9241 /* Get number of ready's in the ready list. */
9244 get_ready_element (int i
)
9246 return ready_element (&ready
, i
);
9249 #endif /* INSN_SCHEDULING */