Remove gcc/params.* files.
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2019 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
25
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
34
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
39
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
53 remaining slots.
54
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
57
58 1. choose insn with the longest path to end of bb, ties
59 broken by
60 2. choose insn with least contribution to register pressure,
61 ties broken by
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
65 broken by
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
70
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
77
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
81
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
86
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
94 utilization.
95
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
99 of this case.
100
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
105
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
109
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
114
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
118
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
124 \f
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "backend.h"
129 #include "target.h"
130 #include "rtl.h"
131 #include "cfghooks.h"
132 #include "df.h"
133 #include "memmodel.h"
134 #include "tm_p.h"
135 #include "insn-config.h"
136 #include "regs.h"
137 #include "ira.h"
138 #include "recog.h"
139 #include "insn-attr.h"
140 #include "cfgrtl.h"
141 #include "cfgbuild.h"
142 #include "sched-int.h"
143 #include "common/common-target.h"
144 #include "dbgcnt.h"
145 #include "cfgloop.h"
146 #include "dumpfile.h"
147 #include "print-rtl.h"
148 #include "function-abi.h"
149
150 #ifdef INSN_SCHEDULING
151
152 /* True if we do register pressure relief through live-range
153 shrinkage. */
154 static bool live_range_shrinkage_p;
155
156 /* Switch on live range shrinkage. */
157 void
158 initialize_live_range_shrinkage (void)
159 {
160 live_range_shrinkage_p = true;
161 }
162
163 /* Switch off live range shrinkage. */
164 void
165 finish_live_range_shrinkage (void)
166 {
167 live_range_shrinkage_p = false;
168 }
169
170 /* issue_rate is the number of insns that can be scheduled in the same
171 machine cycle. It can be defined in the config/mach/mach.h file,
172 otherwise we set it to 1. */
173
174 int issue_rate;
175
176 /* This can be set to true by a backend if the scheduler should not
177 enable a DCE pass. */
178 bool sched_no_dce;
179
180 /* The current initiation interval used when modulo scheduling. */
181 static int modulo_ii;
182
183 /* The maximum number of stages we are prepared to handle. */
184 static int modulo_max_stages;
185
186 /* The number of insns that exist in each iteration of the loop. We use this
187 to detect when we've scheduled all insns from the first iteration. */
188 static int modulo_n_insns;
189
190 /* The current count of insns in the first iteration of the loop that have
191 already been scheduled. */
192 static int modulo_insns_scheduled;
193
194 /* The maximum uid of insns from the first iteration of the loop. */
195 static int modulo_iter0_max_uid;
196
197 /* The number of times we should attempt to backtrack when modulo scheduling.
198 Decreased each time we have to backtrack. */
199 static int modulo_backtracks_left;
200
201 /* The stage in which the last insn from the original loop was
202 scheduled. */
203 static int modulo_last_stage;
204
205 /* sched-verbose controls the amount of debugging output the
206 scheduler prints. It is controlled by -fsched-verbose=N:
207 N=0: no debugging output.
208 N=1: default value.
209 N=2: bb's probabilities, detailed ready list info, unit/insn info.
210 N=3: rtl at abort point, control-flow, regions info.
211 N=5: dependences info. */
212 int sched_verbose = 0;
213
214 /* Debugging file. All printouts are sent to dump. */
215 FILE *sched_dump = 0;
216
217 /* This is a placeholder for the scheduler parameters common
218 to all schedulers. */
219 struct common_sched_info_def *common_sched_info;
220
221 #define INSN_TICK(INSN) (HID (INSN)->tick)
222 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
223 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
224 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
225 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
226 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
227 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
228 /* Cached cost of the instruction. Use insn_sched_cost to get cost of the
229 insn. -1 here means that the field is not initialized. */
230 #define INSN_COST(INSN) (HID (INSN)->cost)
231
232 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
233 then it should be recalculated from scratch. */
234 #define INVALID_TICK (-(max_insn_queue_index + 1))
235 /* The minimal value of the INSN_TICK of an instruction. */
236 #define MIN_TICK (-max_insn_queue_index)
237
238 /* Original order of insns in the ready list.
239 Used to keep order of normal insns while separating DEBUG_INSNs. */
240 #define INSN_RFS_DEBUG_ORIG_ORDER(INSN) (HID (INSN)->rfs_debug_orig_order)
241
242 /* The deciding reason for INSN's place in the ready list. */
243 #define INSN_LAST_RFS_WIN(INSN) (HID (INSN)->last_rfs_win)
244
245 /* List of important notes we must keep around. This is a pointer to the
246 last element in the list. */
247 rtx_insn *note_list;
248
249 static struct spec_info_def spec_info_var;
250 /* Description of the speculative part of the scheduling.
251 If NULL - no speculation. */
252 spec_info_t spec_info = NULL;
253
254 /* True, if recovery block was added during scheduling of current block.
255 Used to determine, if we need to fix INSN_TICKs. */
256 static bool haifa_recovery_bb_recently_added_p;
257
258 /* True, if recovery block was added during this scheduling pass.
259 Used to determine if we should have empty memory pools of dependencies
260 after finishing current region. */
261 bool haifa_recovery_bb_ever_added_p;
262
263 /* Counters of different types of speculative instructions. */
264 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
265
266 /* Array used in {unlink, restore}_bb_notes. */
267 static rtx_insn **bb_header = 0;
268
269 /* Basic block after which recovery blocks will be created. */
270 static basic_block before_recovery;
271
272 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
273 created it. */
274 basic_block after_recovery;
275
276 /* FALSE if we add bb to another region, so we don't need to initialize it. */
277 bool adding_bb_to_current_region_p = true;
278
279 /* Queues, etc. */
280
281 /* An instruction is ready to be scheduled when all insns preceding it
282 have already been scheduled. It is important to ensure that all
283 insns which use its result will not be executed until its result
284 has been computed. An insn is maintained in one of four structures:
285
286 (P) the "Pending" set of insns which cannot be scheduled until
287 their dependencies have been satisfied.
288 (Q) the "Queued" set of insns that can be scheduled when sufficient
289 time has passed.
290 (R) the "Ready" list of unscheduled, uncommitted insns.
291 (S) the "Scheduled" list of insns.
292
293 Initially, all insns are either "Pending" or "Ready" depending on
294 whether their dependencies are satisfied.
295
296 Insns move from the "Ready" list to the "Scheduled" list as they
297 are committed to the schedule. As this occurs, the insns in the
298 "Pending" list have their dependencies satisfied and move to either
299 the "Ready" list or the "Queued" set depending on whether
300 sufficient time has passed to make them ready. As time passes,
301 insns move from the "Queued" set to the "Ready" list.
302
303 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
304 unscheduled insns, i.e., those that are ready, queued, and pending.
305 The "Queued" set (Q) is implemented by the variable `insn_queue'.
306 The "Ready" list (R) is implemented by the variables `ready' and
307 `n_ready'.
308 The "Scheduled" list (S) is the new insn chain built by this pass.
309
310 The transition (R->S) is implemented in the scheduling loop in
311 `schedule_block' when the best insn to schedule is chosen.
312 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
313 insns move from the ready list to the scheduled list.
314 The transition (Q->R) is implemented in 'queue_to_insn' as time
315 passes or stalls are introduced. */
316
317 /* Implement a circular buffer to delay instructions until sufficient
318 time has passed. For the new pipeline description interface,
319 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
320 than maximal time of instruction execution computed by genattr.c on
321 the base maximal time of functional unit reservations and getting a
322 result. This is the longest time an insn may be queued. */
323
324 static rtx_insn_list **insn_queue;
325 static int q_ptr = 0;
326 static int q_size = 0;
327 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
328 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
329
330 #define QUEUE_SCHEDULED (-3)
331 #define QUEUE_NOWHERE (-2)
332 #define QUEUE_READY (-1)
333 /* QUEUE_SCHEDULED - INSN is scheduled.
334 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
335 queue or ready list.
336 QUEUE_READY - INSN is in ready list.
337 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
338
339 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
340
341 /* The following variable value refers for all current and future
342 reservations of the processor units. */
343 state_t curr_state;
344
345 /* The following variable value is size of memory representing all
346 current and future reservations of the processor units. */
347 size_t dfa_state_size;
348
349 /* The following array is used to find the best insn from ready when
350 the automaton pipeline interface is used. */
351 signed char *ready_try = NULL;
352
353 /* The ready list. */
354 struct ready_list ready = {NULL, 0, 0, 0, 0};
355
356 /* The pointer to the ready list (to be removed). */
357 static struct ready_list *readyp = &ready;
358
359 /* Scheduling clock. */
360 static int clock_var;
361
362 /* Clock at which the previous instruction was issued. */
363 static int last_clock_var;
364
365 /* Set to true if, when queuing a shadow insn, we discover that it would be
366 scheduled too late. */
367 static bool must_backtrack;
368
369 /* The following variable value is number of essential insns issued on
370 the current cycle. An insn is essential one if it changes the
371 processors state. */
372 int cycle_issued_insns;
373
374 /* This records the actual schedule. It is built up during the main phase
375 of schedule_block, and afterwards used to reorder the insns in the RTL. */
376 static vec<rtx_insn *> scheduled_insns;
377
378 static int may_trap_exp (const_rtx, int);
379
380 /* Nonzero iff the address is comprised from at most 1 register. */
381 #define CONST_BASED_ADDRESS_P(x) \
382 (REG_P (x) \
383 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
384 || (GET_CODE (x) == LO_SUM)) \
385 && (CONSTANT_P (XEXP (x, 0)) \
386 || CONSTANT_P (XEXP (x, 1)))))
387
388 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
389 as found by analyzing insn's expression. */
390
391 \f
392 static int haifa_luid_for_non_insn (rtx x);
393
394 /* Haifa version of sched_info hooks common to all headers. */
395 const struct common_sched_info_def haifa_common_sched_info =
396 {
397 NULL, /* fix_recovery_cfg */
398 NULL, /* add_block */
399 NULL, /* estimate_number_of_insns */
400 haifa_luid_for_non_insn, /* luid_for_non_insn */
401 SCHED_PASS_UNKNOWN /* sched_pass_id */
402 };
403
404 /* Mapping from instruction UID to its Logical UID. */
405 vec<int> sched_luids;
406
407 /* Next LUID to assign to an instruction. */
408 int sched_max_luid = 1;
409
410 /* Haifa Instruction Data. */
411 vec<haifa_insn_data_def> h_i_d;
412
413 void (* sched_init_only_bb) (basic_block, basic_block);
414
415 /* Split block function. Different schedulers might use different functions
416 to handle their internal data consistent. */
417 basic_block (* sched_split_block) (basic_block, rtx);
418
419 /* Create empty basic block after the specified block. */
420 basic_block (* sched_create_empty_bb) (basic_block);
421
422 /* Return the number of cycles until INSN is expected to be ready.
423 Return zero if it already is. */
424 static int
425 insn_delay (rtx_insn *insn)
426 {
427 return MAX (INSN_TICK (insn) - clock_var, 0);
428 }
429
430 static int
431 may_trap_exp (const_rtx x, int is_store)
432 {
433 enum rtx_code code;
434
435 if (x == 0)
436 return TRAP_FREE;
437 code = GET_CODE (x);
438 if (is_store)
439 {
440 if (code == MEM && may_trap_p (x))
441 return TRAP_RISKY;
442 else
443 return TRAP_FREE;
444 }
445 if (code == MEM)
446 {
447 /* The insn uses memory: a volatile load. */
448 if (MEM_VOLATILE_P (x))
449 return IRISKY;
450 /* An exception-free load. */
451 if (!may_trap_p (x))
452 return IFREE;
453 /* A load with 1 base register, to be further checked. */
454 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
455 return PFREE_CANDIDATE;
456 /* No info on the load, to be further checked. */
457 return PRISKY_CANDIDATE;
458 }
459 else
460 {
461 const char *fmt;
462 int i, insn_class = TRAP_FREE;
463
464 /* Neither store nor load, check if it may cause a trap. */
465 if (may_trap_p (x))
466 return TRAP_RISKY;
467 /* Recursive step: walk the insn... */
468 fmt = GET_RTX_FORMAT (code);
469 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
470 {
471 if (fmt[i] == 'e')
472 {
473 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
474 insn_class = WORST_CLASS (insn_class, tmp_class);
475 }
476 else if (fmt[i] == 'E')
477 {
478 int j;
479 for (j = 0; j < XVECLEN (x, i); j++)
480 {
481 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
482 insn_class = WORST_CLASS (insn_class, tmp_class);
483 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
484 break;
485 }
486 }
487 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
488 break;
489 }
490 return insn_class;
491 }
492 }
493
494 /* Classifies rtx X of an insn for the purpose of verifying that X can be
495 executed speculatively (and consequently the insn can be moved
496 speculatively), by examining X, returning:
497 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
498 TRAP_FREE: non-load insn.
499 IFREE: load from a globally safe location.
500 IRISKY: volatile load.
501 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
502 being either PFREE or PRISKY. */
503
504 static int
505 haifa_classify_rtx (const_rtx x)
506 {
507 int tmp_class = TRAP_FREE;
508 int insn_class = TRAP_FREE;
509 enum rtx_code code;
510
511 if (GET_CODE (x) == PARALLEL)
512 {
513 int i, len = XVECLEN (x, 0);
514
515 for (i = len - 1; i >= 0; i--)
516 {
517 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
518 insn_class = WORST_CLASS (insn_class, tmp_class);
519 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
520 break;
521 }
522 }
523 else
524 {
525 code = GET_CODE (x);
526 switch (code)
527 {
528 case CLOBBER:
529 /* Test if it is a 'store'. */
530 tmp_class = may_trap_exp (XEXP (x, 0), 1);
531 break;
532 case SET:
533 /* Test if it is a store. */
534 tmp_class = may_trap_exp (SET_DEST (x), 1);
535 if (tmp_class == TRAP_RISKY)
536 break;
537 /* Test if it is a load. */
538 tmp_class =
539 WORST_CLASS (tmp_class,
540 may_trap_exp (SET_SRC (x), 0));
541 break;
542 case COND_EXEC:
543 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
544 if (tmp_class == TRAP_RISKY)
545 break;
546 tmp_class = WORST_CLASS (tmp_class,
547 may_trap_exp (COND_EXEC_TEST (x), 0));
548 break;
549 case TRAP_IF:
550 tmp_class = TRAP_RISKY;
551 break;
552 default:;
553 }
554 insn_class = tmp_class;
555 }
556
557 return insn_class;
558 }
559
560 int
561 haifa_classify_insn (const_rtx insn)
562 {
563 return haifa_classify_rtx (PATTERN (insn));
564 }
565 \f
566 /* After the scheduler initialization function has been called, this function
567 can be called to enable modulo scheduling. II is the initiation interval
568 we should use, it affects the delays for delay_pairs that were recorded as
569 separated by a given number of stages.
570
571 MAX_STAGES provides us with a limit
572 after which we give up scheduling; the caller must have unrolled at least
573 as many copies of the loop body and recorded delay_pairs for them.
574
575 INSNS is the number of real (non-debug) insns in one iteration of
576 the loop. MAX_UID can be used to test whether an insn belongs to
577 the first iteration of the loop; all of them have a uid lower than
578 MAX_UID. */
579 void
580 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
581 {
582 modulo_ii = ii;
583 modulo_max_stages = max_stages;
584 modulo_n_insns = insns;
585 modulo_iter0_max_uid = max_uid;
586 modulo_backtracks_left = param_max_modulo_backtrack_attempts;
587 }
588
589 /* A structure to record a pair of insns where the first one is a real
590 insn that has delay slots, and the second is its delayed shadow.
591 I1 is scheduled normally and will emit an assembly instruction,
592 while I2 describes the side effect that takes place at the
593 transition between cycles CYCLES and (CYCLES + 1) after I1. */
594 struct delay_pair
595 {
596 struct delay_pair *next_same_i1;
597 rtx_insn *i1, *i2;
598 int cycles;
599 /* When doing modulo scheduling, we a delay_pair can also be used to
600 show that I1 and I2 are the same insn in a different stage. If that
601 is the case, STAGES will be nonzero. */
602 int stages;
603 };
604
605 /* Helpers for delay hashing. */
606
607 struct delay_i1_hasher : nofree_ptr_hash <delay_pair>
608 {
609 typedef void *compare_type;
610 static inline hashval_t hash (const delay_pair *);
611 static inline bool equal (const delay_pair *, const void *);
612 };
613
614 /* Returns a hash value for X, based on hashing just I1. */
615
616 inline hashval_t
617 delay_i1_hasher::hash (const delay_pair *x)
618 {
619 return htab_hash_pointer (x->i1);
620 }
621
622 /* Return true if I1 of pair X is the same as that of pair Y. */
623
624 inline bool
625 delay_i1_hasher::equal (const delay_pair *x, const void *y)
626 {
627 return x->i1 == y;
628 }
629
630 struct delay_i2_hasher : free_ptr_hash <delay_pair>
631 {
632 typedef void *compare_type;
633 static inline hashval_t hash (const delay_pair *);
634 static inline bool equal (const delay_pair *, const void *);
635 };
636
637 /* Returns a hash value for X, based on hashing just I2. */
638
639 inline hashval_t
640 delay_i2_hasher::hash (const delay_pair *x)
641 {
642 return htab_hash_pointer (x->i2);
643 }
644
645 /* Return true if I2 of pair X is the same as that of pair Y. */
646
647 inline bool
648 delay_i2_hasher::equal (const delay_pair *x, const void *y)
649 {
650 return x->i2 == y;
651 }
652
653 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
654 indexed by I2. */
655 static hash_table<delay_i1_hasher> *delay_htab;
656 static hash_table<delay_i2_hasher> *delay_htab_i2;
657
658 /* Called through htab_traverse. Walk the hashtable using I2 as
659 index, and delete all elements involving an UID higher than
660 that pointed to by *DATA. */
661 int
662 haifa_htab_i2_traverse (delay_pair **slot, int *data)
663 {
664 int maxuid = *data;
665 struct delay_pair *p = *slot;
666 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
667 {
668 delay_htab_i2->clear_slot (slot);
669 }
670 return 1;
671 }
672
673 /* Called through htab_traverse. Walk the hashtable using I2 as
674 index, and delete all elements involving an UID higher than
675 that pointed to by *DATA. */
676 int
677 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
678 {
679 int maxuid = *data;
680 struct delay_pair *p, *first, **pprev;
681
682 if (INSN_UID ((*pslot)->i1) >= maxuid)
683 {
684 delay_htab->clear_slot (pslot);
685 return 1;
686 }
687 pprev = &first;
688 for (p = *pslot; p; p = p->next_same_i1)
689 {
690 if (INSN_UID (p->i2) < maxuid)
691 {
692 *pprev = p;
693 pprev = &p->next_same_i1;
694 }
695 }
696 *pprev = NULL;
697 if (first == NULL)
698 delay_htab->clear_slot (pslot);
699 else
700 *pslot = first;
701 return 1;
702 }
703
704 /* Discard all delay pairs which involve an insn with an UID higher
705 than MAX_UID. */
706 void
707 discard_delay_pairs_above (int max_uid)
708 {
709 delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
710 delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
711 }
712
713 /* This function can be called by a port just before it starts the final
714 scheduling pass. It records the fact that an instruction with delay
715 slots has been split into two insns, I1 and I2. The first one will be
716 scheduled normally and initiates the operation. The second one is a
717 shadow which must follow a specific number of cycles after I1; its only
718 purpose is to show the side effect that occurs at that cycle in the RTL.
719 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
720 while I2 retains the original insn type.
721
722 There are two ways in which the number of cycles can be specified,
723 involving the CYCLES and STAGES arguments to this function. If STAGES
724 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
725 which is multiplied by MODULO_II to give the number of cycles. This is
726 only useful if the caller also calls set_modulo_params to enable modulo
727 scheduling. */
728
729 void
730 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
731 {
732 struct delay_pair *p = XNEW (struct delay_pair);
733 struct delay_pair **slot;
734
735 p->i1 = i1;
736 p->i2 = i2;
737 p->cycles = cycles;
738 p->stages = stages;
739
740 if (!delay_htab)
741 {
742 delay_htab = new hash_table<delay_i1_hasher> (10);
743 delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
744 }
745 slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
746 p->next_same_i1 = *slot;
747 *slot = p;
748 slot = delay_htab_i2->find_slot (p, INSERT);
749 *slot = p;
750 }
751
752 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
753 and return the other insn if so. Return NULL otherwise. */
754 rtx_insn *
755 real_insn_for_shadow (rtx_insn *insn)
756 {
757 struct delay_pair *pair;
758
759 if (!delay_htab)
760 return NULL;
761
762 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
763 if (!pair || pair->stages > 0)
764 return NULL;
765 return pair->i1;
766 }
767
768 /* For a pair P of insns, return the fixed distance in cycles from the first
769 insn after which the second must be scheduled. */
770 static int
771 pair_delay (struct delay_pair *p)
772 {
773 if (p->stages == 0)
774 return p->cycles;
775 else
776 return p->stages * modulo_ii;
777 }
778
779 /* Given an insn INSN, add a dependence on its delayed shadow if it
780 has one. Also try to find situations where shadows depend on each other
781 and add dependencies to the real insns to limit the amount of backtracking
782 needed. */
783 void
784 add_delay_dependencies (rtx_insn *insn)
785 {
786 struct delay_pair *pair;
787 sd_iterator_def sd_it;
788 dep_t dep;
789
790 if (!delay_htab)
791 return;
792
793 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
794 if (!pair)
795 return;
796 add_dependence (insn, pair->i1, REG_DEP_ANTI);
797 if (pair->stages)
798 return;
799
800 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
801 {
802 rtx_insn *pro = DEP_PRO (dep);
803 struct delay_pair *other_pair
804 = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
805 if (!other_pair || other_pair->stages)
806 continue;
807 if (pair_delay (other_pair) >= pair_delay (pair))
808 {
809 if (sched_verbose >= 4)
810 {
811 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
812 INSN_UID (other_pair->i1),
813 INSN_UID (pair->i1));
814 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
815 INSN_UID (pair->i1),
816 INSN_UID (pair->i2),
817 pair_delay (pair));
818 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
819 INSN_UID (other_pair->i1),
820 INSN_UID (other_pair->i2),
821 pair_delay (other_pair));
822 }
823 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
824 }
825 }
826 }
827 \f
828 /* Forward declarations. */
829
830 static int priority (rtx_insn *, bool force_recompute = false);
831 static int autopref_rank_for_schedule (const rtx_insn *, const rtx_insn *);
832 static int rank_for_schedule (const void *, const void *);
833 static void swap_sort (rtx_insn **, int);
834 static void queue_insn (rtx_insn *, int, const char *);
835 static int schedule_insn (rtx_insn *);
836 static void adjust_priority (rtx_insn *);
837 static void advance_one_cycle (void);
838 static void extend_h_i_d (void);
839
840
841 /* Notes handling mechanism:
842 =========================
843 Generally, NOTES are saved before scheduling and restored after scheduling.
844 The scheduler distinguishes between two types of notes:
845
846 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
847 Before scheduling a region, a pointer to the note is added to the insn
848 that follows or precedes it. (This happens as part of the data dependence
849 computation). After scheduling an insn, the pointer contained in it is
850 used for regenerating the corresponding note (in reemit_notes).
851
852 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
853 these notes are put in a list (in rm_other_notes() and
854 unlink_other_notes ()). After scheduling the block, these notes are
855 inserted at the beginning of the block (in schedule_block()). */
856
857 static void ready_add (struct ready_list *, rtx_insn *, bool);
858 static rtx_insn *ready_remove_first (struct ready_list *);
859 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
860
861 static void queue_to_ready (struct ready_list *);
862 static int early_queue_to_ready (state_t, struct ready_list *);
863
864 /* The following functions are used to implement multi-pass scheduling
865 on the first cycle. */
866 static rtx_insn *ready_remove (struct ready_list *, int);
867 static void ready_remove_insn (rtx_insn *);
868
869 static void fix_inter_tick (rtx_insn *, rtx_insn *);
870 static int fix_tick_ready (rtx_insn *);
871 static void change_queue_index (rtx_insn *, int);
872
873 /* The following functions are used to implement scheduling of data/control
874 speculative instructions. */
875
876 static void extend_h_i_d (void);
877 static void init_h_i_d (rtx_insn *);
878 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
879 static void generate_recovery_code (rtx_insn *);
880 static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
881 static void begin_speculative_block (rtx_insn *);
882 static void add_to_speculative_block (rtx_insn *);
883 static void init_before_recovery (basic_block *);
884 static void create_check_block_twin (rtx_insn *, bool);
885 static void fix_recovery_deps (basic_block);
886 static bool haifa_change_pattern (rtx_insn *, rtx);
887 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
888 static void restore_bb_notes (basic_block);
889 static void fix_jump_move (rtx_insn *);
890 static void move_block_after_check (rtx_insn *);
891 static void move_succs (vec<edge, va_gc> **, basic_block);
892 static void sched_remove_insn (rtx_insn *);
893 static void clear_priorities (rtx_insn *, rtx_vec_t *);
894 static void calc_priorities (rtx_vec_t);
895 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
896
897 #endif /* INSN_SCHEDULING */
898 \f
899 /* Point to state used for the current scheduling pass. */
900 struct haifa_sched_info *current_sched_info;
901 \f
902 #ifndef INSN_SCHEDULING
903 void
904 schedule_insns (void)
905 {
906 }
907 #else
908
909 /* Do register pressure sensitive insn scheduling if the flag is set
910 up. */
911 enum sched_pressure_algorithm sched_pressure;
912
913 /* Map regno -> its pressure class. The map defined only when
914 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
915 enum reg_class *sched_regno_pressure_class;
916
917 /* The current register pressure. Only elements corresponding pressure
918 classes are defined. */
919 static int curr_reg_pressure[N_REG_CLASSES];
920
921 /* Saved value of the previous array. */
922 static int saved_reg_pressure[N_REG_CLASSES];
923
924 /* Register living at given scheduling point. */
925 static bitmap curr_reg_live;
926
927 /* Saved value of the previous array. */
928 static bitmap saved_reg_live;
929
930 /* Registers mentioned in the current region. */
931 static bitmap region_ref_regs;
932
933 /* Temporary bitmap used for SCHED_PRESSURE_MODEL. */
934 static bitmap tmp_bitmap;
935
936 /* Effective number of available registers of a given class (see comment
937 in sched_pressure_start_bb). */
938 static int sched_class_regs_num[N_REG_CLASSES];
939 /* The number of registers that the function would need to save before it
940 uses them, and the number of fixed_regs. Helpers for calculating of
941 sched_class_regs_num. */
942 static int call_saved_regs_num[N_REG_CLASSES];
943 static int fixed_regs_num[N_REG_CLASSES];
944
945 /* Initiate register pressure relative info for scheduling the current
946 region. Currently it is only clearing register mentioned in the
947 current region. */
948 void
949 sched_init_region_reg_pressure_info (void)
950 {
951 bitmap_clear (region_ref_regs);
952 }
953
954 /* PRESSURE[CL] describes the pressure on register class CL. Update it
955 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
956 LIVE tracks the set of live registers; if it is null, assume that
957 every birth or death is genuine. */
958 static inline void
959 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
960 {
961 enum reg_class pressure_class;
962
963 pressure_class = sched_regno_pressure_class[regno];
964 if (regno >= FIRST_PSEUDO_REGISTER)
965 {
966 if (pressure_class != NO_REGS)
967 {
968 if (birth_p)
969 {
970 if (!live || bitmap_set_bit (live, regno))
971 pressure[pressure_class]
972 += (ira_reg_class_max_nregs
973 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
974 }
975 else
976 {
977 if (!live || bitmap_clear_bit (live, regno))
978 pressure[pressure_class]
979 -= (ira_reg_class_max_nregs
980 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
981 }
982 }
983 }
984 else if (pressure_class != NO_REGS
985 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
986 {
987 if (birth_p)
988 {
989 if (!live || bitmap_set_bit (live, regno))
990 pressure[pressure_class]++;
991 }
992 else
993 {
994 if (!live || bitmap_clear_bit (live, regno))
995 pressure[pressure_class]--;
996 }
997 }
998 }
999
1000 /* Initiate current register pressure related info from living
1001 registers given by LIVE. */
1002 static void
1003 initiate_reg_pressure_info (bitmap live)
1004 {
1005 int i;
1006 unsigned int j;
1007 bitmap_iterator bi;
1008
1009 for (i = 0; i < ira_pressure_classes_num; i++)
1010 curr_reg_pressure[ira_pressure_classes[i]] = 0;
1011 bitmap_clear (curr_reg_live);
1012 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1013 if (sched_pressure == SCHED_PRESSURE_MODEL
1014 || current_nr_blocks == 1
1015 || bitmap_bit_p (region_ref_regs, j))
1016 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1017 }
1018
1019 /* Mark registers in X as mentioned in the current region. */
1020 static void
1021 setup_ref_regs (rtx x)
1022 {
1023 int i, j;
1024 const RTX_CODE code = GET_CODE (x);
1025 const char *fmt;
1026
1027 if (REG_P (x))
1028 {
1029 bitmap_set_range (region_ref_regs, REGNO (x), REG_NREGS (x));
1030 return;
1031 }
1032 fmt = GET_RTX_FORMAT (code);
1033 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1034 if (fmt[i] == 'e')
1035 setup_ref_regs (XEXP (x, i));
1036 else if (fmt[i] == 'E')
1037 {
1038 for (j = 0; j < XVECLEN (x, i); j++)
1039 setup_ref_regs (XVECEXP (x, i, j));
1040 }
1041 }
1042
1043 /* Initiate current register pressure related info at the start of
1044 basic block BB. */
1045 static void
1046 initiate_bb_reg_pressure_info (basic_block bb)
1047 {
1048 unsigned int i ATTRIBUTE_UNUSED;
1049 rtx_insn *insn;
1050
1051 if (current_nr_blocks > 1)
1052 FOR_BB_INSNS (bb, insn)
1053 if (NONDEBUG_INSN_P (insn))
1054 setup_ref_regs (PATTERN (insn));
1055 initiate_reg_pressure_info (df_get_live_in (bb));
1056 if (bb_has_eh_pred (bb))
1057 for (i = 0; ; ++i)
1058 {
1059 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1060
1061 if (regno == INVALID_REGNUM)
1062 break;
1063 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1064 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1065 regno, true);
1066 }
1067 }
1068
1069 /* Save current register pressure related info. */
1070 static void
1071 save_reg_pressure (void)
1072 {
1073 int i;
1074
1075 for (i = 0; i < ira_pressure_classes_num; i++)
1076 saved_reg_pressure[ira_pressure_classes[i]]
1077 = curr_reg_pressure[ira_pressure_classes[i]];
1078 bitmap_copy (saved_reg_live, curr_reg_live);
1079 }
1080
1081 /* Restore saved register pressure related info. */
1082 static void
1083 restore_reg_pressure (void)
1084 {
1085 int i;
1086
1087 for (i = 0; i < ira_pressure_classes_num; i++)
1088 curr_reg_pressure[ira_pressure_classes[i]]
1089 = saved_reg_pressure[ira_pressure_classes[i]];
1090 bitmap_copy (curr_reg_live, saved_reg_live);
1091 }
1092
1093 /* Return TRUE if the register is dying after its USE. */
1094 static bool
1095 dying_use_p (struct reg_use_data *use)
1096 {
1097 struct reg_use_data *next;
1098
1099 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1100 if (NONDEBUG_INSN_P (next->insn)
1101 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1102 return false;
1103 return true;
1104 }
1105
1106 /* Print info about the current register pressure and its excess for
1107 each pressure class. */
1108 static void
1109 print_curr_reg_pressure (void)
1110 {
1111 int i;
1112 enum reg_class cl;
1113
1114 fprintf (sched_dump, ";;\t");
1115 for (i = 0; i < ira_pressure_classes_num; i++)
1116 {
1117 cl = ira_pressure_classes[i];
1118 gcc_assert (curr_reg_pressure[cl] >= 0);
1119 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1120 curr_reg_pressure[cl],
1121 curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1122 }
1123 fprintf (sched_dump, "\n");
1124 }
1125 \f
1126 /* Determine if INSN has a condition that is clobbered if a register
1127 in SET_REGS is modified. */
1128 static bool
1129 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1130 {
1131 rtx pat = PATTERN (insn);
1132 gcc_assert (GET_CODE (pat) == COND_EXEC);
1133 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1134 {
1135 sd_iterator_def sd_it;
1136 dep_t dep;
1137 haifa_change_pattern (insn, ORIG_PAT (insn));
1138 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1139 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1140 TODO_SPEC (insn) = HARD_DEP;
1141 if (sched_verbose >= 2)
1142 fprintf (sched_dump,
1143 ";;\t\tdequeue insn %s because of clobbered condition\n",
1144 (*current_sched_info->print_insn) (insn, 0));
1145 return true;
1146 }
1147
1148 return false;
1149 }
1150
1151 /* This function should be called after modifying the pattern of INSN,
1152 to update scheduler data structures as needed. */
1153 static void
1154 update_insn_after_change (rtx_insn *insn)
1155 {
1156 sd_iterator_def sd_it;
1157 dep_t dep;
1158
1159 dfa_clear_single_insn_cache (insn);
1160
1161 sd_it = sd_iterator_start (insn,
1162 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1163 while (sd_iterator_cond (&sd_it, &dep))
1164 {
1165 DEP_COST (dep) = UNKNOWN_DEP_COST;
1166 sd_iterator_next (&sd_it);
1167 }
1168
1169 /* Invalidate INSN_COST, so it'll be recalculated. */
1170 INSN_COST (insn) = -1;
1171 /* Invalidate INSN_TICK, so it'll be recalculated. */
1172 INSN_TICK (insn) = INVALID_TICK;
1173
1174 /* Invalidate autoprefetch data entry. */
1175 INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
1176 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1177 INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
1178 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
1179 }
1180
1181
1182 /* Two VECs, one to hold dependencies for which pattern replacements
1183 need to be applied or restored at the start of the next cycle, and
1184 another to hold an integer that is either one, to apply the
1185 corresponding replacement, or zero to restore it. */
1186 static vec<dep_t> next_cycle_replace_deps;
1187 static vec<int> next_cycle_apply;
1188
1189 static void apply_replacement (dep_t, bool);
1190 static void restore_pattern (dep_t, bool);
1191
1192 /* Look at the remaining dependencies for insn NEXT, and compute and return
1193 the TODO_SPEC value we should use for it. This is called after one of
1194 NEXT's dependencies has been resolved.
1195 We also perform pattern replacements for predication, and for broken
1196 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1197 false. */
1198
1199 static ds_t
1200 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1201 {
1202 ds_t new_ds;
1203 sd_iterator_def sd_it;
1204 dep_t dep, modify_dep = NULL;
1205 int n_spec = 0;
1206 int n_control = 0;
1207 int n_replace = 0;
1208 bool first_p = true;
1209
1210 if (sd_lists_empty_p (next, SD_LIST_BACK))
1211 /* NEXT has all its dependencies resolved. */
1212 return 0;
1213
1214 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1215 return HARD_DEP;
1216
1217 /* If NEXT is intended to sit adjacent to this instruction, we don't
1218 want to try to break any dependencies. Treat it as a HARD_DEP. */
1219 if (SCHED_GROUP_P (next))
1220 return HARD_DEP;
1221
1222 /* Now we've got NEXT with speculative deps only.
1223 1. Look at the deps to see what we have to do.
1224 2. Check if we can do 'todo'. */
1225 new_ds = 0;
1226
1227 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1228 {
1229 rtx_insn *pro = DEP_PRO (dep);
1230 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1231
1232 if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1233 continue;
1234
1235 if (ds)
1236 {
1237 n_spec++;
1238 if (first_p)
1239 {
1240 first_p = false;
1241
1242 new_ds = ds;
1243 }
1244 else
1245 new_ds = ds_merge (new_ds, ds);
1246 }
1247 else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1248 {
1249 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1250 {
1251 n_control++;
1252 modify_dep = dep;
1253 }
1254 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1255 }
1256 else if (DEP_REPLACE (dep) != NULL)
1257 {
1258 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1259 {
1260 n_replace++;
1261 modify_dep = dep;
1262 }
1263 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1264 }
1265 }
1266
1267 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1268 {
1269 if (!dbg_cnt (sched_breakdep))
1270 return HARD_DEP;
1271 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1272 {
1273 struct dep_replacement *desc = DEP_REPLACE (dep);
1274 if (desc != NULL)
1275 {
1276 if (desc->insn == next && !for_backtrack)
1277 {
1278 gcc_assert (n_replace == 1);
1279 apply_replacement (dep, true);
1280 }
1281 DEP_STATUS (dep) |= DEP_CANCELLED;
1282 }
1283 }
1284 return 0;
1285 }
1286
1287 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1288 {
1289 rtx_insn *pro, *other;
1290 rtx new_pat;
1291 rtx cond = NULL_RTX;
1292 bool success;
1293 rtx_insn *prev = NULL;
1294 int i;
1295 unsigned regno;
1296
1297 if ((current_sched_info->flags & DO_PREDICATION) == 0
1298 || (ORIG_PAT (next) != NULL_RTX
1299 && PREDICATED_PAT (next) == NULL_RTX))
1300 return HARD_DEP;
1301
1302 pro = DEP_PRO (modify_dep);
1303 other = real_insn_for_shadow (pro);
1304 if (other != NULL_RTX)
1305 pro = other;
1306
1307 cond = sched_get_reverse_condition_uncached (pro);
1308 regno = REGNO (XEXP (cond, 0));
1309
1310 /* Find the last scheduled insn that modifies the condition register.
1311 We can stop looking once we find the insn we depend on through the
1312 REG_DEP_CONTROL; if the condition register isn't modified after it,
1313 we know that it still has the right value. */
1314 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1315 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1316 {
1317 HARD_REG_SET t;
1318
1319 find_all_hard_reg_sets (prev, &t, true);
1320 if (TEST_HARD_REG_BIT (t, regno))
1321 return HARD_DEP;
1322 if (prev == pro)
1323 break;
1324 }
1325 if (ORIG_PAT (next) == NULL_RTX)
1326 {
1327 ORIG_PAT (next) = PATTERN (next);
1328
1329 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1330 success = haifa_change_pattern (next, new_pat);
1331 if (!success)
1332 return HARD_DEP;
1333 PREDICATED_PAT (next) = new_pat;
1334 }
1335 else if (PATTERN (next) != PREDICATED_PAT (next))
1336 {
1337 bool success = haifa_change_pattern (next,
1338 PREDICATED_PAT (next));
1339 gcc_assert (success);
1340 }
1341 DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1342 return DEP_CONTROL;
1343 }
1344
1345 if (PREDICATED_PAT (next) != NULL_RTX)
1346 {
1347 int tick = INSN_TICK (next);
1348 bool success = haifa_change_pattern (next,
1349 ORIG_PAT (next));
1350 INSN_TICK (next) = tick;
1351 gcc_assert (success);
1352 }
1353
1354 /* We can't handle the case where there are both speculative and control
1355 dependencies, so we return HARD_DEP in such a case. Also fail if
1356 we have speculative dependencies with not enough points, or more than
1357 one control dependency. */
1358 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1359 || (n_spec > 0
1360 /* Too few points? */
1361 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1362 || n_control > 0
1363 || n_replace > 0)
1364 return HARD_DEP;
1365
1366 return new_ds;
1367 }
1368 \f
1369 /* Pointer to the last instruction scheduled. */
1370 static rtx_insn *last_scheduled_insn;
1371
1372 /* Pointer to the last nondebug instruction scheduled within the
1373 block, or the prev_head of the scheduling block. Used by
1374 rank_for_schedule, so that insns independent of the last scheduled
1375 insn will be preferred over dependent instructions. */
1376 static rtx_insn *last_nondebug_scheduled_insn;
1377
1378 /* Pointer that iterates through the list of unscheduled insns if we
1379 have a dbg_cnt enabled. It always points at an insn prior to the
1380 first unscheduled one. */
1381 static rtx_insn *nonscheduled_insns_begin;
1382
1383 /* Compute cost of executing INSN.
1384 This is the number of cycles between instruction issue and
1385 instruction results. */
1386 int
1387 insn_sched_cost (rtx_insn *insn)
1388 {
1389 int cost;
1390
1391 if (sched_fusion)
1392 return 0;
1393
1394 if (sel_sched_p ())
1395 {
1396 if (recog_memoized (insn) < 0)
1397 return 0;
1398
1399 cost = insn_default_latency (insn);
1400 if (cost < 0)
1401 cost = 0;
1402
1403 return cost;
1404 }
1405
1406 cost = INSN_COST (insn);
1407
1408 if (cost < 0)
1409 {
1410 /* A USE insn, or something else we don't need to
1411 understand. We can't pass these directly to
1412 result_ready_cost or insn_default_latency because it will
1413 trigger a fatal error for unrecognizable insns. */
1414 if (recog_memoized (insn) < 0)
1415 {
1416 INSN_COST (insn) = 0;
1417 return 0;
1418 }
1419 else
1420 {
1421 cost = insn_default_latency (insn);
1422 if (cost < 0)
1423 cost = 0;
1424
1425 INSN_COST (insn) = cost;
1426 }
1427 }
1428
1429 return cost;
1430 }
1431
1432 /* Compute cost of dependence LINK.
1433 This is the number of cycles between instruction issue and
1434 instruction results.
1435 ??? We also use this function to call recog_memoized on all insns. */
1436 int
1437 dep_cost_1 (dep_t link, dw_t dw)
1438 {
1439 rtx_insn *insn = DEP_PRO (link);
1440 rtx_insn *used = DEP_CON (link);
1441 int cost;
1442
1443 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1444 return DEP_COST (link);
1445
1446 if (delay_htab)
1447 {
1448 struct delay_pair *delay_entry;
1449 delay_entry
1450 = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1451 if (delay_entry)
1452 {
1453 if (delay_entry->i1 == insn)
1454 {
1455 DEP_COST (link) = pair_delay (delay_entry);
1456 return DEP_COST (link);
1457 }
1458 }
1459 }
1460
1461 /* A USE insn should never require the value used to be computed.
1462 This allows the computation of a function's result and parameter
1463 values to overlap the return and call. We don't care about the
1464 dependence cost when only decreasing register pressure. */
1465 if (recog_memoized (used) < 0)
1466 {
1467 cost = 0;
1468 recog_memoized (insn);
1469 }
1470 else
1471 {
1472 enum reg_note dep_type = DEP_TYPE (link);
1473
1474 cost = insn_sched_cost (insn);
1475
1476 if (INSN_CODE (insn) >= 0)
1477 {
1478 if (dep_type == REG_DEP_ANTI)
1479 cost = 0;
1480 else if (dep_type == REG_DEP_OUTPUT)
1481 {
1482 cost = (insn_default_latency (insn)
1483 - insn_default_latency (used));
1484 if (cost <= 0)
1485 cost = 1;
1486 }
1487 else if (bypass_p (insn))
1488 cost = insn_latency (insn, used);
1489 }
1490
1491
1492 if (targetm.sched.adjust_cost)
1493 cost = targetm.sched.adjust_cost (used, (int) dep_type, insn, cost,
1494 dw);
1495
1496 if (cost < 0)
1497 cost = 0;
1498 }
1499
1500 DEP_COST (link) = cost;
1501 return cost;
1502 }
1503
1504 /* Compute cost of dependence LINK.
1505 This is the number of cycles between instruction issue and
1506 instruction results. */
1507 int
1508 dep_cost (dep_t link)
1509 {
1510 return dep_cost_1 (link, 0);
1511 }
1512
1513 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1514 INSN_PRIORITY explicitly. */
1515 void
1516 increase_insn_priority (rtx_insn *insn, int amount)
1517 {
1518 if (!sel_sched_p ())
1519 {
1520 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1521 if (INSN_PRIORITY_KNOWN (insn))
1522 INSN_PRIORITY (insn) += amount;
1523 }
1524 else
1525 {
1526 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1527 Use EXPR_PRIORITY instead. */
1528 sel_add_to_insn_priority (insn, amount);
1529 }
1530 }
1531
1532 /* Return 'true' if DEP should be included in priority calculations. */
1533 static bool
1534 contributes_to_priority_p (dep_t dep)
1535 {
1536 if (DEBUG_INSN_P (DEP_CON (dep))
1537 || DEBUG_INSN_P (DEP_PRO (dep)))
1538 return false;
1539
1540 /* Critical path is meaningful in block boundaries only. */
1541 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1542 DEP_PRO (dep)))
1543 return false;
1544
1545 if (DEP_REPLACE (dep) != NULL)
1546 return false;
1547
1548 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1549 then speculative instructions will less likely be
1550 scheduled. That is because the priority of
1551 their producers will increase, and, thus, the
1552 producers will more likely be scheduled, thus,
1553 resolving the dependence. */
1554 if (sched_deps_info->generate_spec_deps
1555 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1556 && (DEP_STATUS (dep) & SPECULATIVE))
1557 return false;
1558
1559 return true;
1560 }
1561
1562 /* Compute the number of nondebug deps in list LIST for INSN. */
1563
1564 static int
1565 dep_list_size (rtx_insn *insn, sd_list_types_def list)
1566 {
1567 sd_iterator_def sd_it;
1568 dep_t dep;
1569 int dbgcount = 0, nodbgcount = 0;
1570
1571 if (!MAY_HAVE_DEBUG_INSNS)
1572 return sd_lists_size (insn, list);
1573
1574 FOR_EACH_DEP (insn, list, sd_it, dep)
1575 {
1576 if (DEBUG_INSN_P (DEP_CON (dep)))
1577 dbgcount++;
1578 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1579 nodbgcount++;
1580 }
1581
1582 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1583
1584 return nodbgcount;
1585 }
1586
1587 bool sched_fusion;
1588
1589 /* Compute the priority number for INSN. */
1590 static int
1591 priority (rtx_insn *insn, bool force_recompute)
1592 {
1593 if (! INSN_P (insn))
1594 return 0;
1595
1596 /* We should not be interested in priority of an already scheduled insn. */
1597 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1598
1599 if (force_recompute || !INSN_PRIORITY_KNOWN (insn))
1600 {
1601 int this_priority = -1;
1602
1603 if (sched_fusion)
1604 {
1605 int this_fusion_priority;
1606
1607 targetm.sched.fusion_priority (insn, FUSION_MAX_PRIORITY,
1608 &this_fusion_priority, &this_priority);
1609 INSN_FUSION_PRIORITY (insn) = this_fusion_priority;
1610 }
1611 else if (dep_list_size (insn, SD_LIST_FORW) == 0)
1612 /* ??? We should set INSN_PRIORITY to insn_sched_cost when and insn
1613 has some forward deps but all of them are ignored by
1614 contributes_to_priority hook. At the moment we set priority of
1615 such insn to 0. */
1616 this_priority = insn_sched_cost (insn);
1617 else
1618 {
1619 rtx_insn *prev_first, *twin;
1620 basic_block rec;
1621
1622 /* For recovery check instructions we calculate priority slightly
1623 different than that of normal instructions. Instead of walking
1624 through INSN_FORW_DEPS (check) list, we walk through
1625 INSN_FORW_DEPS list of each instruction in the corresponding
1626 recovery block. */
1627
1628 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1629 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1630 if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1631 {
1632 prev_first = PREV_INSN (insn);
1633 twin = insn;
1634 }
1635 else
1636 {
1637 prev_first = NEXT_INSN (BB_HEAD (rec));
1638 twin = PREV_INSN (BB_END (rec));
1639 }
1640
1641 do
1642 {
1643 sd_iterator_def sd_it;
1644 dep_t dep;
1645
1646 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1647 {
1648 rtx_insn *next;
1649 int next_priority;
1650
1651 next = DEP_CON (dep);
1652
1653 if (BLOCK_FOR_INSN (next) != rec)
1654 {
1655 int cost;
1656
1657 if (!contributes_to_priority_p (dep))
1658 continue;
1659
1660 if (twin == insn)
1661 cost = dep_cost (dep);
1662 else
1663 {
1664 struct _dep _dep1, *dep1 = &_dep1;
1665
1666 init_dep (dep1, insn, next, REG_DEP_ANTI);
1667
1668 cost = dep_cost (dep1);
1669 }
1670
1671 next_priority = cost + priority (next);
1672
1673 if (next_priority > this_priority)
1674 this_priority = next_priority;
1675 }
1676 }
1677
1678 twin = PREV_INSN (twin);
1679 }
1680 while (twin != prev_first);
1681 }
1682
1683 if (this_priority < 0)
1684 {
1685 gcc_assert (this_priority == -1);
1686
1687 this_priority = insn_sched_cost (insn);
1688 }
1689
1690 INSN_PRIORITY (insn) = this_priority;
1691 INSN_PRIORITY_STATUS (insn) = 1;
1692 }
1693
1694 return INSN_PRIORITY (insn);
1695 }
1696 \f
1697 /* Macros and functions for keeping the priority queue sorted, and
1698 dealing with queuing and dequeuing of instructions. */
1699
1700 /* For each pressure class CL, set DEATH[CL] to the number of registers
1701 in that class that die in INSN. */
1702
1703 static void
1704 calculate_reg_deaths (rtx_insn *insn, int *death)
1705 {
1706 int i;
1707 struct reg_use_data *use;
1708
1709 for (i = 0; i < ira_pressure_classes_num; i++)
1710 death[ira_pressure_classes[i]] = 0;
1711 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1712 if (dying_use_p (use))
1713 mark_regno_birth_or_death (0, death, use->regno, true);
1714 }
1715
1716 /* Setup info about the current register pressure impact of scheduling
1717 INSN at the current scheduling point. */
1718 static void
1719 setup_insn_reg_pressure_info (rtx_insn *insn)
1720 {
1721 int i, change, before, after, hard_regno;
1722 int excess_cost_change;
1723 machine_mode mode;
1724 enum reg_class cl;
1725 struct reg_pressure_data *pressure_info;
1726 int *max_reg_pressure;
1727 static int death[N_REG_CLASSES];
1728
1729 gcc_checking_assert (!DEBUG_INSN_P (insn));
1730
1731 excess_cost_change = 0;
1732 calculate_reg_deaths (insn, death);
1733 pressure_info = INSN_REG_PRESSURE (insn);
1734 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1735 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1736 for (i = 0; i < ira_pressure_classes_num; i++)
1737 {
1738 cl = ira_pressure_classes[i];
1739 gcc_assert (curr_reg_pressure[cl] >= 0);
1740 change = (int) pressure_info[i].set_increase - death[cl];
1741 before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1742 after = MAX (0, max_reg_pressure[i] + change
1743 - sched_class_regs_num[cl]);
1744 hard_regno = ira_class_hard_regs[cl][0];
1745 gcc_assert (hard_regno >= 0);
1746 mode = reg_raw_mode[hard_regno];
1747 excess_cost_change += ((after - before)
1748 * (ira_memory_move_cost[mode][cl][0]
1749 + ira_memory_move_cost[mode][cl][1]));
1750 }
1751 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1752 }
1753 \f
1754 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1755 It tries to make the scheduler take register pressure into account
1756 without introducing too many unnecessary stalls. It hooks into the
1757 main scheduling algorithm at several points:
1758
1759 - Before scheduling starts, model_start_schedule constructs a
1760 "model schedule" for the current block. This model schedule is
1761 chosen solely to keep register pressure down. It does not take the
1762 target's pipeline or the original instruction order into account,
1763 except as a tie-breaker. It also doesn't work to a particular
1764 pressure limit.
1765
1766 This model schedule gives us an idea of what pressure can be
1767 achieved for the block and gives us an example of a schedule that
1768 keeps to that pressure. It also makes the final schedule less
1769 dependent on the original instruction order. This is important
1770 because the original order can either be "wide" (many values live
1771 at once, such as in user-scheduled code) or "narrow" (few values
1772 live at once, such as after loop unrolling, where several
1773 iterations are executed sequentially).
1774
1775 We do not apply this model schedule to the rtx stream. We simply
1776 record it in model_schedule. We also compute the maximum pressure,
1777 MP, that was seen during this schedule.
1778
1779 - Instructions are added to the ready queue even if they require
1780 a stall. The length of the stall is instead computed as:
1781
1782 MAX (INSN_TICK (INSN) - clock_var, 0)
1783
1784 (= insn_delay). This allows rank_for_schedule to choose between
1785 introducing a deliberate stall or increasing pressure.
1786
1787 - Before sorting the ready queue, model_set_excess_costs assigns
1788 a pressure-based cost to each ready instruction in the queue.
1789 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1790 (ECC for short) and is effectively measured in cycles.
1791
1792 - rank_for_schedule ranks instructions based on:
1793
1794 ECC (insn) + insn_delay (insn)
1795
1796 then as:
1797
1798 insn_delay (insn)
1799
1800 So, for example, an instruction X1 with an ECC of 1 that can issue
1801 now will win over an instruction X0 with an ECC of zero that would
1802 introduce a stall of one cycle. However, an instruction X2 with an
1803 ECC of 2 that can issue now will lose to both X0 and X1.
1804
1805 - When an instruction is scheduled, model_recompute updates the model
1806 schedule with the new pressures (some of which might now exceed the
1807 original maximum pressure MP). model_update_limit_points then searches
1808 for the new point of maximum pressure, if not already known. */
1809
1810 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1811 from surrounding debug information. */
1812 #define MODEL_BAR \
1813 ";;\t\t+------------------------------------------------------\n"
1814
1815 /* Information about the pressure on a particular register class at a
1816 particular point of the model schedule. */
1817 struct model_pressure_data {
1818 /* The pressure at this point of the model schedule, or -1 if the
1819 point is associated with an instruction that has already been
1820 scheduled. */
1821 int ref_pressure;
1822
1823 /* The maximum pressure during or after this point of the model schedule. */
1824 int max_pressure;
1825 };
1826
1827 /* Per-instruction information that is used while building the model
1828 schedule. Here, "schedule" refers to the model schedule rather
1829 than the main schedule. */
1830 struct model_insn_info {
1831 /* The instruction itself. */
1832 rtx_insn *insn;
1833
1834 /* If this instruction is in model_worklist, these fields link to the
1835 previous (higher-priority) and next (lower-priority) instructions
1836 in the list. */
1837 struct model_insn_info *prev;
1838 struct model_insn_info *next;
1839
1840 /* While constructing the schedule, QUEUE_INDEX describes whether an
1841 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1842 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1843 old_queue records the value that QUEUE_INDEX had before scheduling
1844 started, so that we can restore it once the schedule is complete. */
1845 int old_queue;
1846
1847 /* The relative importance of an unscheduled instruction. Higher
1848 values indicate greater importance. */
1849 unsigned int model_priority;
1850
1851 /* The length of the longest path of satisfied true dependencies
1852 that leads to this instruction. */
1853 unsigned int depth;
1854
1855 /* The length of the longest path of dependencies of any kind
1856 that leads from this instruction. */
1857 unsigned int alap;
1858
1859 /* The number of predecessor nodes that must still be scheduled. */
1860 int unscheduled_preds;
1861 };
1862
1863 /* Information about the pressure limit for a particular register class.
1864 This structure is used when applying a model schedule to the main
1865 schedule. */
1866 struct model_pressure_limit {
1867 /* The maximum register pressure seen in the original model schedule. */
1868 int orig_pressure;
1869
1870 /* The maximum register pressure seen in the current model schedule
1871 (which excludes instructions that have already been scheduled). */
1872 int pressure;
1873
1874 /* The point of the current model schedule at which PRESSURE is first
1875 reached. It is set to -1 if the value needs to be recomputed. */
1876 int point;
1877 };
1878
1879 /* Describes a particular way of measuring register pressure. */
1880 struct model_pressure_group {
1881 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1882 struct model_pressure_limit limits[N_REG_CLASSES];
1883
1884 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1885 on register class ira_pressure_classes[PCI] at point POINT of the
1886 current model schedule. A POINT of model_num_insns describes the
1887 pressure at the end of the schedule. */
1888 struct model_pressure_data *model;
1889 };
1890
1891 /* Index POINT gives the instruction at point POINT of the model schedule.
1892 This array doesn't change during main scheduling. */
1893 static vec<rtx_insn *> model_schedule;
1894
1895 /* The list of instructions in the model worklist, sorted in order of
1896 decreasing priority. */
1897 static struct model_insn_info *model_worklist;
1898
1899 /* Index I describes the instruction with INSN_LUID I. */
1900 static struct model_insn_info *model_insns;
1901
1902 /* The number of instructions in the model schedule. */
1903 static int model_num_insns;
1904
1905 /* The index of the first instruction in model_schedule that hasn't yet been
1906 added to the main schedule, or model_num_insns if all of them have. */
1907 static int model_curr_point;
1908
1909 /* Describes the pressure before each instruction in the model schedule. */
1910 static struct model_pressure_group model_before_pressure;
1911
1912 /* The first unused model_priority value (as used in model_insn_info). */
1913 static unsigned int model_next_priority;
1914
1915
1916 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1917 at point POINT of the model schedule. */
1918 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1919 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1920
1921 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1922 after point POINT of the model schedule. */
1923 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1924 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1925
1926 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1927 of the model schedule. */
1928 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1929 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1930
1931 /* Information about INSN that is used when creating the model schedule. */
1932 #define MODEL_INSN_INFO(INSN) \
1933 (&model_insns[INSN_LUID (INSN)])
1934
1935 /* The instruction at point POINT of the model schedule. */
1936 #define MODEL_INSN(POINT) \
1937 (model_schedule[POINT])
1938
1939
1940 /* Return INSN's index in the model schedule, or model_num_insns if it
1941 doesn't belong to that schedule. */
1942
1943 static int
1944 model_index (rtx_insn *insn)
1945 {
1946 if (INSN_MODEL_INDEX (insn) == 0)
1947 return model_num_insns;
1948 return INSN_MODEL_INDEX (insn) - 1;
1949 }
1950
1951 /* Make sure that GROUP->limits is up-to-date for the current point
1952 of the model schedule. */
1953
1954 static void
1955 model_update_limit_points_in_group (struct model_pressure_group *group)
1956 {
1957 int pci, max_pressure, point;
1958
1959 for (pci = 0; pci < ira_pressure_classes_num; pci++)
1960 {
1961 /* We may have passed the final point at which the pressure in
1962 group->limits[pci].pressure was reached. Update the limit if so. */
1963 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1964 group->limits[pci].pressure = max_pressure;
1965
1966 /* Find the point at which MAX_PRESSURE is first reached. We need
1967 to search in three cases:
1968
1969 - We've already moved past the previous pressure point.
1970 In this case we search forward from model_curr_point.
1971
1972 - We scheduled the previous point of maximum pressure ahead of
1973 its position in the model schedule, but doing so didn't bring
1974 the pressure point earlier. In this case we search forward
1975 from that previous pressure point.
1976
1977 - Scheduling an instruction early caused the maximum pressure
1978 to decrease. In this case we will have set the pressure
1979 point to -1, and we search forward from model_curr_point. */
1980 point = MAX (group->limits[pci].point, model_curr_point);
1981 while (point < model_num_insns
1982 && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1983 point++;
1984 group->limits[pci].point = point;
1985
1986 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
1987 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
1988 }
1989 }
1990
1991 /* Make sure that all register-pressure limits are up-to-date for the
1992 current position in the model schedule. */
1993
1994 static void
1995 model_update_limit_points (void)
1996 {
1997 model_update_limit_points_in_group (&model_before_pressure);
1998 }
1999
2000 /* Return the model_index of the last unscheduled use in chain USE
2001 outside of USE's instruction. Return -1 if there are no other uses,
2002 or model_num_insns if the register is live at the end of the block. */
2003
2004 static int
2005 model_last_use_except (struct reg_use_data *use)
2006 {
2007 struct reg_use_data *next;
2008 int last, index;
2009
2010 last = -1;
2011 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2012 if (NONDEBUG_INSN_P (next->insn)
2013 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2014 {
2015 index = model_index (next->insn);
2016 if (index == model_num_insns)
2017 return model_num_insns;
2018 if (last < index)
2019 last = index;
2020 }
2021 return last;
2022 }
2023
2024 /* An instruction with model_index POINT has just been scheduled, and it
2025 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2026 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2027 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2028
2029 static void
2030 model_start_update_pressure (struct model_pressure_group *group,
2031 int point, int pci, int delta)
2032 {
2033 int next_max_pressure;
2034
2035 if (point == model_num_insns)
2036 {
2037 /* The instruction wasn't part of the model schedule; it was moved
2038 from a different block. Update the pressure for the end of
2039 the model schedule. */
2040 MODEL_REF_PRESSURE (group, point, pci) += delta;
2041 MODEL_MAX_PRESSURE (group, point, pci) += delta;
2042 }
2043 else
2044 {
2045 /* Record that this instruction has been scheduled. Nothing now
2046 changes between POINT and POINT + 1, so get the maximum pressure
2047 from the latter. If the maximum pressure decreases, the new
2048 pressure point may be before POINT. */
2049 MODEL_REF_PRESSURE (group, point, pci) = -1;
2050 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2051 if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2052 {
2053 MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2054 if (group->limits[pci].point == point)
2055 group->limits[pci].point = -1;
2056 }
2057 }
2058 }
2059
2060 /* Record that scheduling a later instruction has changed the pressure
2061 at point POINT of the model schedule by DELTA (which might be 0).
2062 Update GROUP accordingly. Return nonzero if these changes might
2063 trigger changes to previous points as well. */
2064
2065 static int
2066 model_update_pressure (struct model_pressure_group *group,
2067 int point, int pci, int delta)
2068 {
2069 int ref_pressure, max_pressure, next_max_pressure;
2070
2071 /* If POINT hasn't yet been scheduled, update its pressure. */
2072 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2073 if (ref_pressure >= 0 && delta != 0)
2074 {
2075 ref_pressure += delta;
2076 MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2077
2078 /* Check whether the maximum pressure in the overall schedule
2079 has increased. (This means that the MODEL_MAX_PRESSURE of
2080 every point <= POINT will need to increase too; see below.) */
2081 if (group->limits[pci].pressure < ref_pressure)
2082 group->limits[pci].pressure = ref_pressure;
2083
2084 /* If we are at maximum pressure, and the maximum pressure
2085 point was previously unknown or later than POINT,
2086 bring it forward. */
2087 if (group->limits[pci].pressure == ref_pressure
2088 && !IN_RANGE (group->limits[pci].point, 0, point))
2089 group->limits[pci].point = point;
2090
2091 /* If POINT used to be the point of maximum pressure, but isn't
2092 any longer, we need to recalculate it using a forward walk. */
2093 if (group->limits[pci].pressure > ref_pressure
2094 && group->limits[pci].point == point)
2095 group->limits[pci].point = -1;
2096 }
2097
2098 /* Update the maximum pressure at POINT. Changes here might also
2099 affect the maximum pressure at POINT - 1. */
2100 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2101 max_pressure = MAX (ref_pressure, next_max_pressure);
2102 if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2103 {
2104 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2105 return 1;
2106 }
2107 return 0;
2108 }
2109
2110 /* INSN has just been scheduled. Update the model schedule accordingly. */
2111
2112 static void
2113 model_recompute (rtx_insn *insn)
2114 {
2115 struct {
2116 int last_use;
2117 int regno;
2118 } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2119 struct reg_use_data *use;
2120 struct reg_pressure_data *reg_pressure;
2121 int delta[N_REG_CLASSES];
2122 int pci, point, mix, new_last, cl, ref_pressure, queue;
2123 unsigned int i, num_uses, num_pending_births;
2124 bool print_p;
2125
2126 /* The destinations of INSN were previously live from POINT onwards, but are
2127 now live from model_curr_point onwards. Set up DELTA accordingly. */
2128 point = model_index (insn);
2129 reg_pressure = INSN_REG_PRESSURE (insn);
2130 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2131 {
2132 cl = ira_pressure_classes[pci];
2133 delta[cl] = reg_pressure[pci].set_increase;
2134 }
2135
2136 /* Record which registers previously died at POINT, but which now die
2137 before POINT. Adjust DELTA so that it represents the effect of
2138 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2139 registers that will be born in the range [model_curr_point, POINT). */
2140 num_uses = 0;
2141 num_pending_births = 0;
2142 bitmap_clear (tmp_bitmap);
2143 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2144 {
2145 new_last = model_last_use_except (use);
2146 if (new_last < point && bitmap_set_bit (tmp_bitmap, use->regno))
2147 {
2148 gcc_assert (num_uses < ARRAY_SIZE (uses));
2149 uses[num_uses].last_use = new_last;
2150 uses[num_uses].regno = use->regno;
2151 /* This register is no longer live after POINT - 1. */
2152 mark_regno_birth_or_death (NULL, delta, use->regno, false);
2153 num_uses++;
2154 if (new_last >= 0)
2155 num_pending_births++;
2156 }
2157 }
2158
2159 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2160 Also set each group pressure limit for POINT. */
2161 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2162 {
2163 cl = ira_pressure_classes[pci];
2164 model_start_update_pressure (&model_before_pressure,
2165 point, pci, delta[cl]);
2166 }
2167
2168 /* Walk the model schedule backwards, starting immediately before POINT. */
2169 print_p = false;
2170 if (point != model_curr_point)
2171 do
2172 {
2173 point--;
2174 insn = MODEL_INSN (point);
2175 queue = QUEUE_INDEX (insn);
2176
2177 if (queue != QUEUE_SCHEDULED)
2178 {
2179 /* DELTA describes the effect of the move on the register pressure
2180 after POINT. Make it describe the effect on the pressure
2181 before POINT. */
2182 i = 0;
2183 while (i < num_uses)
2184 {
2185 if (uses[i].last_use == point)
2186 {
2187 /* This register is now live again. */
2188 mark_regno_birth_or_death (NULL, delta,
2189 uses[i].regno, true);
2190
2191 /* Remove this use from the array. */
2192 uses[i] = uses[num_uses - 1];
2193 num_uses--;
2194 num_pending_births--;
2195 }
2196 else
2197 i++;
2198 }
2199
2200 if (sched_verbose >= 5)
2201 {
2202 if (!print_p)
2203 {
2204 fprintf (sched_dump, MODEL_BAR);
2205 fprintf (sched_dump, ";;\t\t| New pressure for model"
2206 " schedule\n");
2207 fprintf (sched_dump, MODEL_BAR);
2208 print_p = true;
2209 }
2210
2211 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2212 point, INSN_UID (insn),
2213 str_pattern_slim (PATTERN (insn)));
2214 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2215 {
2216 cl = ira_pressure_classes[pci];
2217 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2218 point, pci);
2219 fprintf (sched_dump, " %s:[%d->%d]",
2220 reg_class_names[ira_pressure_classes[pci]],
2221 ref_pressure, ref_pressure + delta[cl]);
2222 }
2223 fprintf (sched_dump, "\n");
2224 }
2225 }
2226
2227 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2228 might have changed as well. */
2229 mix = num_pending_births;
2230 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2231 {
2232 cl = ira_pressure_classes[pci];
2233 mix |= delta[cl];
2234 mix |= model_update_pressure (&model_before_pressure,
2235 point, pci, delta[cl]);
2236 }
2237 }
2238 while (mix && point > model_curr_point);
2239
2240 if (print_p)
2241 fprintf (sched_dump, MODEL_BAR);
2242 }
2243
2244 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2245 check whether the insn's pattern needs restoring. */
2246 static bool
2247 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2248 {
2249 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2250 return false;
2251
2252 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2253 {
2254 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2255 gcc_assert (next == DEP_CON (dep));
2256 }
2257 else
2258 {
2259 struct dep_replacement *desc = DEP_REPLACE (dep);
2260 if (desc->insn != next)
2261 {
2262 gcc_assert (*desc->loc == desc->orig);
2263 return false;
2264 }
2265 }
2266 return true;
2267 }
2268 \f
2269 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2270 pressure on CL from P to P'. We use this to calculate a "base ECC",
2271 baseECC (CL, X), for each pressure class CL and each instruction X.
2272 Supposing X changes the pressure on CL from P to P', and that the
2273 maximum pressure on CL in the current model schedule is MP', then:
2274
2275 * if X occurs before or at the next point of maximum pressure in
2276 the model schedule and P' > MP', then:
2277
2278 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2279
2280 The idea is that the pressure after scheduling a fixed set of
2281 instructions -- in this case, the set up to and including the
2282 next maximum pressure point -- is going to be the same regardless
2283 of the order; we simply want to keep the intermediate pressure
2284 under control. Thus X has a cost of zero unless scheduling it
2285 now would exceed MP'.
2286
2287 If all increases in the set are by the same amount, no zero-cost
2288 instruction will ever cause the pressure to exceed MP'. However,
2289 if X is instead moved past an instruction X' with pressure in the
2290 range (MP' - (P' - P), MP'), the pressure at X' will increase
2291 beyond MP'. Since baseECC is very much a heuristic anyway,
2292 it doesn't seem worth the overhead of tracking cases like these.
2293
2294 The cost of exceeding MP' is always based on the original maximum
2295 pressure MP. This is so that going 2 registers over the original
2296 limit has the same cost regardless of whether it comes from two
2297 separate +1 deltas or from a single +2 delta.
2298
2299 * if X occurs after the next point of maximum pressure in the model
2300 schedule and P' > P, then:
2301
2302 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2303
2304 That is, if we move X forward across a point of maximum pressure,
2305 and if X increases the pressure by P' - P, then we conservatively
2306 assume that scheduling X next would increase the maximum pressure
2307 by P' - P. Again, the cost of doing this is based on the original
2308 maximum pressure MP, for the same reason as above.
2309
2310 * if P' < P, P > MP, and X occurs at or after the next point of
2311 maximum pressure, then:
2312
2313 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2314
2315 That is, if we have already exceeded the original maximum pressure MP,
2316 and if X might reduce the maximum pressure again -- or at least push
2317 it further back, and thus allow more scheduling freedom -- it is given
2318 a negative cost to reflect the improvement.
2319
2320 * otherwise,
2321
2322 baseECC (CL, X) = 0
2323
2324 In this case, X is not expected to affect the maximum pressure MP',
2325 so it has zero cost.
2326
2327 We then create a combined value baseECC (X) that is the sum of
2328 baseECC (CL, X) for each pressure class CL.
2329
2330 baseECC (X) could itself be used as the ECC value described above.
2331 However, this is often too conservative, in the sense that it
2332 tends to make high-priority instructions that increase pressure
2333 wait too long in cases where introducing a spill would be better.
2334 For this reason the final ECC is a priority-adjusted form of
2335 baseECC (X). Specifically, we calculate:
2336
2337 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2338 baseP = MAX { P (X) | baseECC (X) <= 0 }
2339
2340 Then:
2341
2342 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2343
2344 Thus an instruction's effect on pressure is ignored if it has a high
2345 enough priority relative to the ones that don't increase pressure.
2346 Negative values of baseECC (X) do not increase the priority of X
2347 itself, but they do make it harder for other instructions to
2348 increase the pressure further.
2349
2350 This pressure cost is deliberately timid. The intention has been
2351 to choose a heuristic that rarely interferes with the normal list
2352 scheduler in cases where that scheduler would produce good code.
2353 We simply want to curb some of its worst excesses. */
2354
2355 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2356
2357 Here we use the very simplistic cost model that every register above
2358 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2359 measures instead, such as one based on MEMORY_MOVE_COST. However:
2360
2361 (1) In order for an instruction to be scheduled, the higher cost
2362 would need to be justified in a single saving of that many stalls.
2363 This is overly pessimistic, because the benefit of spilling is
2364 often to avoid a sequence of several short stalls rather than
2365 a single long one.
2366
2367 (2) The cost is still arbitrary. Because we are not allocating
2368 registers during scheduling, we have no way of knowing for
2369 sure how many memory accesses will be required by each spill,
2370 where the spills will be placed within the block, or even
2371 which block(s) will contain the spills.
2372
2373 So a higher cost than 1 is often too conservative in practice,
2374 forcing blocks to contain unnecessary stalls instead of spill code.
2375 The simple cost below seems to be the best compromise. It reduces
2376 the interference with the normal list scheduler, which helps make
2377 it more suitable for a default-on option. */
2378
2379 static int
2380 model_spill_cost (int cl, int from, int to)
2381 {
2382 from = MAX (from, sched_class_regs_num[cl]);
2383 return MAX (to, from) - from;
2384 }
2385
2386 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2387 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2388 P' = P + DELTA. */
2389
2390 static int
2391 model_excess_group_cost (struct model_pressure_group *group,
2392 int point, int pci, int delta)
2393 {
2394 int pressure, cl;
2395
2396 cl = ira_pressure_classes[pci];
2397 if (delta < 0 && point >= group->limits[pci].point)
2398 {
2399 pressure = MAX (group->limits[pci].orig_pressure,
2400 curr_reg_pressure[cl] + delta);
2401 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2402 }
2403
2404 if (delta > 0)
2405 {
2406 if (point > group->limits[pci].point)
2407 pressure = group->limits[pci].pressure + delta;
2408 else
2409 pressure = curr_reg_pressure[cl] + delta;
2410
2411 if (pressure > group->limits[pci].pressure)
2412 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2413 pressure);
2414 }
2415
2416 return 0;
2417 }
2418
2419 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2420 if PRINT_P. */
2421
2422 static int
2423 model_excess_cost (rtx_insn *insn, bool print_p)
2424 {
2425 int point, pci, cl, cost, this_cost, delta;
2426 struct reg_pressure_data *insn_reg_pressure;
2427 int insn_death[N_REG_CLASSES];
2428
2429 calculate_reg_deaths (insn, insn_death);
2430 point = model_index (insn);
2431 insn_reg_pressure = INSN_REG_PRESSURE (insn);
2432 cost = 0;
2433
2434 if (print_p)
2435 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2436 INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2437
2438 /* Sum up the individual costs for each register class. */
2439 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2440 {
2441 cl = ira_pressure_classes[pci];
2442 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2443 this_cost = model_excess_group_cost (&model_before_pressure,
2444 point, pci, delta);
2445 cost += this_cost;
2446 if (print_p)
2447 fprintf (sched_dump, " %s:[%d base cost %d]",
2448 reg_class_names[cl], delta, this_cost);
2449 }
2450
2451 if (print_p)
2452 fprintf (sched_dump, "\n");
2453
2454 return cost;
2455 }
2456
2457 /* Dump the next points of maximum pressure for GROUP. */
2458
2459 static void
2460 model_dump_pressure_points (struct model_pressure_group *group)
2461 {
2462 int pci, cl;
2463
2464 fprintf (sched_dump, ";;\t\t| pressure points");
2465 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2466 {
2467 cl = ira_pressure_classes[pci];
2468 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2469 curr_reg_pressure[cl], group->limits[pci].pressure);
2470 if (group->limits[pci].point < model_num_insns)
2471 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2472 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2473 else
2474 fprintf (sched_dump, "end]");
2475 }
2476 fprintf (sched_dump, "\n");
2477 }
2478
2479 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2480
2481 static void
2482 model_set_excess_costs (rtx_insn **insns, int count)
2483 {
2484 int i, cost, priority_base, priority;
2485 bool print_p;
2486
2487 /* Record the baseECC value for each instruction in the model schedule,
2488 except that negative costs are converted to zero ones now rather than
2489 later. Do not assign a cost to debug instructions, since they must
2490 not change code-generation decisions. Experiments suggest we also
2491 get better results by not assigning a cost to instructions from
2492 a different block.
2493
2494 Set PRIORITY_BASE to baseP in the block comment above. This is the
2495 maximum priority of the "cheap" instructions, which should always
2496 include the next model instruction. */
2497 priority_base = 0;
2498 print_p = false;
2499 for (i = 0; i < count; i++)
2500 if (INSN_MODEL_INDEX (insns[i]))
2501 {
2502 if (sched_verbose >= 6 && !print_p)
2503 {
2504 fprintf (sched_dump, MODEL_BAR);
2505 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2506 model_dump_pressure_points (&model_before_pressure);
2507 fprintf (sched_dump, MODEL_BAR);
2508 print_p = true;
2509 }
2510 cost = model_excess_cost (insns[i], print_p);
2511 if (cost <= 0)
2512 {
2513 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2514 priority_base = MAX (priority_base, priority);
2515 cost = 0;
2516 }
2517 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2518 }
2519 if (print_p)
2520 fprintf (sched_dump, MODEL_BAR);
2521
2522 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2523 instruction. */
2524 for (i = 0; i < count; i++)
2525 {
2526 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2527 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2528 if (cost > 0 && priority > priority_base)
2529 {
2530 cost += priority_base - priority;
2531 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2532 }
2533 }
2534 }
2535 \f
2536
2537 /* Enum of rank_for_schedule heuristic decisions. */
2538 enum rfs_decision {
2539 RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2540 RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2541 RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2542 RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2543 RFS_DEP_COUNT, RFS_TIE, RFS_FUSION, RFS_COST, RFS_N };
2544
2545 /* Corresponding strings for print outs. */
2546 static const char *rfs_str[RFS_N] = {
2547 "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2548 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2549 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2550 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2551 "RFS_DEP_COUNT", "RFS_TIE", "RFS_FUSION", "RFS_COST" };
2552
2553 /* Statistical breakdown of rank_for_schedule decisions. */
2554 struct rank_for_schedule_stats_t { unsigned stats[RFS_N]; };
2555 static rank_for_schedule_stats_t rank_for_schedule_stats;
2556
2557 /* Return the result of comparing insns TMP and TMP2 and update
2558 Rank_For_Schedule statistics. */
2559 static int
2560 rfs_result (enum rfs_decision decision, int result, rtx tmp, rtx tmp2)
2561 {
2562 ++rank_for_schedule_stats.stats[decision];
2563 if (result < 0)
2564 INSN_LAST_RFS_WIN (tmp) = decision;
2565 else if (result > 0)
2566 INSN_LAST_RFS_WIN (tmp2) = decision;
2567 else
2568 gcc_unreachable ();
2569 return result;
2570 }
2571
2572 /* Sorting predicate to move DEBUG_INSNs to the top of ready list, while
2573 keeping normal insns in original order. */
2574
2575 static int
2576 rank_for_schedule_debug (const void *x, const void *y)
2577 {
2578 rtx_insn *tmp = *(rtx_insn * const *) y;
2579 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2580
2581 /* Schedule debug insns as early as possible. */
2582 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2583 return -1;
2584 else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2585 return 1;
2586 else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2587 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2588 else
2589 return INSN_RFS_DEBUG_ORIG_ORDER (tmp2) - INSN_RFS_DEBUG_ORIG_ORDER (tmp);
2590 }
2591
2592 /* Returns a positive value if x is preferred; returns a negative value if
2593 y is preferred. Should never return 0, since that will make the sort
2594 unstable. */
2595
2596 static int
2597 rank_for_schedule (const void *x, const void *y)
2598 {
2599 rtx_insn *tmp = *(rtx_insn * const *) y;
2600 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2601 int tmp_class, tmp2_class;
2602 int val, priority_val, info_val, diff;
2603
2604 if (live_range_shrinkage_p)
2605 {
2606 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2607 code. */
2608 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2609 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2610 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2611 && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2612 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2613 return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff, tmp, tmp2);
2614 /* Sort by INSN_LUID (original insn order), so that we make the
2615 sort stable. This minimizes instruction movement, thus
2616 minimizing sched's effect on debugging and cross-jumping. */
2617 return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2618 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2619 }
2620
2621 /* The insn in a schedule group should be issued the first. */
2622 if (flag_sched_group_heuristic &&
2623 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2624 return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1,
2625 tmp, tmp2);
2626
2627 /* Make sure that priority of TMP and TMP2 are initialized. */
2628 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2629
2630 if (sched_fusion)
2631 {
2632 /* The instruction that has the same fusion priority as the last
2633 instruction is the instruction we picked next. If that is not
2634 the case, we sort ready list firstly by fusion priority, then
2635 by priority, and at last by INSN_LUID. */
2636 int a = INSN_FUSION_PRIORITY (tmp);
2637 int b = INSN_FUSION_PRIORITY (tmp2);
2638 int last = -1;
2639
2640 if (last_nondebug_scheduled_insn
2641 && !NOTE_P (last_nondebug_scheduled_insn)
2642 && BLOCK_FOR_INSN (tmp)
2643 == BLOCK_FOR_INSN (last_nondebug_scheduled_insn))
2644 last = INSN_FUSION_PRIORITY (last_nondebug_scheduled_insn);
2645
2646 if (a != last && b != last)
2647 {
2648 if (a == b)
2649 {
2650 a = INSN_PRIORITY (tmp);
2651 b = INSN_PRIORITY (tmp2);
2652 }
2653 if (a != b)
2654 return rfs_result (RFS_FUSION, b - a, tmp, tmp2);
2655 else
2656 return rfs_result (RFS_FUSION,
2657 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2658 }
2659 else if (a == b)
2660 {
2661 gcc_assert (last_nondebug_scheduled_insn
2662 && !NOTE_P (last_nondebug_scheduled_insn));
2663 last = INSN_PRIORITY (last_nondebug_scheduled_insn);
2664
2665 a = abs (INSN_PRIORITY (tmp) - last);
2666 b = abs (INSN_PRIORITY (tmp2) - last);
2667 if (a != b)
2668 return rfs_result (RFS_FUSION, a - b, tmp, tmp2);
2669 else
2670 return rfs_result (RFS_FUSION,
2671 INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2672 }
2673 else if (a == last)
2674 return rfs_result (RFS_FUSION, -1, tmp, tmp2);
2675 else
2676 return rfs_result (RFS_FUSION, 1, tmp, tmp2);
2677 }
2678
2679 if (sched_pressure != SCHED_PRESSURE_NONE)
2680 {
2681 /* Prefer insn whose scheduling results in the smallest register
2682 pressure excess. */
2683 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2684 + insn_delay (tmp)
2685 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2686 - insn_delay (tmp2))))
2687 return rfs_result (RFS_PRESSURE_DELAY, diff, tmp, tmp2);
2688 }
2689
2690 if (sched_pressure != SCHED_PRESSURE_NONE
2691 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2692 && INSN_TICK (tmp2) != INSN_TICK (tmp))
2693 {
2694 diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2695 return rfs_result (RFS_PRESSURE_TICK, diff, tmp, tmp2);
2696 }
2697
2698 /* If we are doing backtracking in this schedule, prefer insns that
2699 have forward dependencies with negative cost against an insn that
2700 was already scheduled. */
2701 if (current_sched_info->flags & DO_BACKTRACKING)
2702 {
2703 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2704 if (priority_val)
2705 return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val, tmp, tmp2);
2706 }
2707
2708 /* Prefer insn with higher priority. */
2709 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2710
2711 if (flag_sched_critical_path_heuristic && priority_val)
2712 return rfs_result (RFS_PRIORITY, priority_val, tmp, tmp2);
2713
2714 if (param_sched_autopref_queue_depth >= 0)
2715 {
2716 int autopref = autopref_rank_for_schedule (tmp, tmp2);
2717 if (autopref != 0)
2718 return autopref;
2719 }
2720
2721 /* Prefer speculative insn with greater dependencies weakness. */
2722 if (flag_sched_spec_insn_heuristic && spec_info)
2723 {
2724 ds_t ds1, ds2;
2725 dw_t dw1, dw2;
2726 int dw;
2727
2728 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2729 if (ds1)
2730 dw1 = ds_weak (ds1);
2731 else
2732 dw1 = NO_DEP_WEAK;
2733
2734 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2735 if (ds2)
2736 dw2 = ds_weak (ds2);
2737 else
2738 dw2 = NO_DEP_WEAK;
2739
2740 dw = dw2 - dw1;
2741 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2742 return rfs_result (RFS_SPECULATION, dw, tmp, tmp2);
2743 }
2744
2745 info_val = (*current_sched_info->rank) (tmp, tmp2);
2746 if (flag_sched_rank_heuristic && info_val)
2747 return rfs_result (RFS_SCHED_RANK, info_val, tmp, tmp2);
2748
2749 /* Compare insns based on their relation to the last scheduled
2750 non-debug insn. */
2751 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2752 {
2753 dep_t dep1;
2754 dep_t dep2;
2755 rtx_insn *last = last_nondebug_scheduled_insn;
2756
2757 /* Classify the instructions into three classes:
2758 1) Data dependent on last schedule insn.
2759 2) Anti/Output dependent on last scheduled insn.
2760 3) Independent of last scheduled insn, or has latency of one.
2761 Choose the insn from the highest numbered class if different. */
2762 dep1 = sd_find_dep_between (last, tmp, true);
2763
2764 if (dep1 == NULL || dep_cost (dep1) == 1)
2765 tmp_class = 3;
2766 else if (/* Data dependence. */
2767 DEP_TYPE (dep1) == REG_DEP_TRUE)
2768 tmp_class = 1;
2769 else
2770 tmp_class = 2;
2771
2772 dep2 = sd_find_dep_between (last, tmp2, true);
2773
2774 if (dep2 == NULL || dep_cost (dep2) == 1)
2775 tmp2_class = 3;
2776 else if (/* Data dependence. */
2777 DEP_TYPE (dep2) == REG_DEP_TRUE)
2778 tmp2_class = 1;
2779 else
2780 tmp2_class = 2;
2781
2782 if ((val = tmp2_class - tmp_class))
2783 return rfs_result (RFS_LAST_INSN, val, tmp, tmp2);
2784 }
2785
2786 /* Prefer instructions that occur earlier in the model schedule. */
2787 if (sched_pressure == SCHED_PRESSURE_MODEL)
2788 {
2789 diff = model_index (tmp) - model_index (tmp2);
2790 if (diff != 0)
2791 return rfs_result (RFS_PRESSURE_INDEX, diff, tmp, tmp2);
2792 }
2793
2794 /* Prefer the insn which has more later insns that depend on it.
2795 This gives the scheduler more freedom when scheduling later
2796 instructions at the expense of added register pressure. */
2797
2798 val = (dep_list_size (tmp2, SD_LIST_FORW)
2799 - dep_list_size (tmp, SD_LIST_FORW));
2800
2801 if (flag_sched_dep_count_heuristic && val != 0)
2802 return rfs_result (RFS_DEP_COUNT, val, tmp, tmp2);
2803
2804 /* Sort by INSN_COST rather than INSN_LUID. This means that instructions
2805 which take longer to execute are prioritised and it leads to more
2806 dual-issue opportunities on in-order cores which have this feature. */
2807
2808 if (INSN_COST (tmp) != INSN_COST (tmp2))
2809 return rfs_result (RFS_COST, INSN_COST (tmp2) - INSN_COST (tmp),
2810 tmp, tmp2);
2811
2812 /* If insns are equally good, sort by INSN_LUID (original insn order),
2813 so that we make the sort stable. This minimizes instruction movement,
2814 thus minimizing sched's effect on debugging and cross-jumping. */
2815 return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2), tmp, tmp2);
2816 }
2817
2818 /* Resort the array A in which only element at index N may be out of order. */
2819
2820 HAIFA_INLINE static void
2821 swap_sort (rtx_insn **a, int n)
2822 {
2823 rtx_insn *insn = a[n - 1];
2824 int i = n - 2;
2825
2826 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2827 {
2828 a[i + 1] = a[i];
2829 i -= 1;
2830 }
2831 a[i + 1] = insn;
2832 }
2833
2834 /* Add INSN to the insn queue so that it can be executed at least
2835 N_CYCLES after the currently executing insn. Preserve insns
2836 chain for debugging purposes. REASON will be printed in debugging
2837 output. */
2838
2839 HAIFA_INLINE static void
2840 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2841 {
2842 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2843 rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2844 int new_tick;
2845
2846 gcc_assert (n_cycles <= max_insn_queue_index);
2847 gcc_assert (!DEBUG_INSN_P (insn));
2848
2849 insn_queue[next_q] = link;
2850 q_size += 1;
2851
2852 if (sched_verbose >= 2)
2853 {
2854 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2855 (*current_sched_info->print_insn) (insn, 0));
2856
2857 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2858 }
2859
2860 QUEUE_INDEX (insn) = next_q;
2861
2862 if (current_sched_info->flags & DO_BACKTRACKING)
2863 {
2864 new_tick = clock_var + n_cycles;
2865 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2866 INSN_TICK (insn) = new_tick;
2867
2868 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2869 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2870 {
2871 must_backtrack = true;
2872 if (sched_verbose >= 2)
2873 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2874 }
2875 }
2876 }
2877
2878 /* Remove INSN from queue. */
2879 static void
2880 queue_remove (rtx_insn *insn)
2881 {
2882 gcc_assert (QUEUE_INDEX (insn) >= 0);
2883 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2884 q_size--;
2885 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2886 }
2887
2888 /* Return a pointer to the bottom of the ready list, i.e. the insn
2889 with the lowest priority. */
2890
2891 rtx_insn **
2892 ready_lastpos (struct ready_list *ready)
2893 {
2894 gcc_assert (ready->n_ready >= 1);
2895 return ready->vec + ready->first - ready->n_ready + 1;
2896 }
2897
2898 /* Add an element INSN to the ready list so that it ends up with the
2899 lowest/highest priority depending on FIRST_P. */
2900
2901 HAIFA_INLINE static void
2902 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2903 {
2904 if (!first_p)
2905 {
2906 if (ready->first == ready->n_ready)
2907 {
2908 memmove (ready->vec + ready->veclen - ready->n_ready,
2909 ready_lastpos (ready),
2910 ready->n_ready * sizeof (rtx));
2911 ready->first = ready->veclen - 1;
2912 }
2913 ready->vec[ready->first - ready->n_ready] = insn;
2914 }
2915 else
2916 {
2917 if (ready->first == ready->veclen - 1)
2918 {
2919 if (ready->n_ready)
2920 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2921 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2922 ready_lastpos (ready),
2923 ready->n_ready * sizeof (rtx));
2924 ready->first = ready->veclen - 2;
2925 }
2926 ready->vec[++(ready->first)] = insn;
2927 }
2928
2929 ready->n_ready++;
2930 if (DEBUG_INSN_P (insn))
2931 ready->n_debug++;
2932
2933 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2934 QUEUE_INDEX (insn) = QUEUE_READY;
2935
2936 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2937 && INSN_EXACT_TICK (insn) < clock_var)
2938 {
2939 must_backtrack = true;
2940 }
2941 }
2942
2943 /* Remove the element with the highest priority from the ready list and
2944 return it. */
2945
2946 HAIFA_INLINE static rtx_insn *
2947 ready_remove_first (struct ready_list *ready)
2948 {
2949 rtx_insn *t;
2950
2951 gcc_assert (ready->n_ready);
2952 t = ready->vec[ready->first--];
2953 ready->n_ready--;
2954 if (DEBUG_INSN_P (t))
2955 ready->n_debug--;
2956 /* If the queue becomes empty, reset it. */
2957 if (ready->n_ready == 0)
2958 ready->first = ready->veclen - 1;
2959
2960 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2961 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2962
2963 return t;
2964 }
2965
2966 /* The following code implements multi-pass scheduling for the first
2967 cycle. In other words, we will try to choose ready insn which
2968 permits to start maximum number of insns on the same cycle. */
2969
2970 /* Return a pointer to the element INDEX from the ready. INDEX for
2971 insn with the highest priority is 0, and the lowest priority has
2972 N_READY - 1. */
2973
2974 rtx_insn *
2975 ready_element (struct ready_list *ready, int index)
2976 {
2977 gcc_assert (ready->n_ready && index < ready->n_ready);
2978
2979 return ready->vec[ready->first - index];
2980 }
2981
2982 /* Remove the element INDEX from the ready list and return it. INDEX
2983 for insn with the highest priority is 0, and the lowest priority
2984 has N_READY - 1. */
2985
2986 HAIFA_INLINE static rtx_insn *
2987 ready_remove (struct ready_list *ready, int index)
2988 {
2989 rtx_insn *t;
2990 int i;
2991
2992 if (index == 0)
2993 return ready_remove_first (ready);
2994 gcc_assert (ready->n_ready && index < ready->n_ready);
2995 t = ready->vec[ready->first - index];
2996 ready->n_ready--;
2997 if (DEBUG_INSN_P (t))
2998 ready->n_debug--;
2999 for (i = index; i < ready->n_ready; i++)
3000 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
3001 QUEUE_INDEX (t) = QUEUE_NOWHERE;
3002 return t;
3003 }
3004
3005 /* Remove INSN from the ready list. */
3006 static void
3007 ready_remove_insn (rtx_insn *insn)
3008 {
3009 int i;
3010
3011 for (i = 0; i < readyp->n_ready; i++)
3012 if (ready_element (readyp, i) == insn)
3013 {
3014 ready_remove (readyp, i);
3015 return;
3016 }
3017 gcc_unreachable ();
3018 }
3019
3020 /* Calculate difference of two statistics set WAS and NOW.
3021 Result returned in WAS. */
3022 static void
3023 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
3024 const rank_for_schedule_stats_t *now)
3025 {
3026 for (int i = 0; i < RFS_N; ++i)
3027 was->stats[i] = now->stats[i] - was->stats[i];
3028 }
3029
3030 /* Print rank_for_schedule statistics. */
3031 static void
3032 print_rank_for_schedule_stats (const char *prefix,
3033 const rank_for_schedule_stats_t *stats,
3034 struct ready_list *ready)
3035 {
3036 for (int i = 0; i < RFS_N; ++i)
3037 if (stats->stats[i])
3038 {
3039 fprintf (sched_dump, "%s%20s: %u", prefix, rfs_str[i], stats->stats[i]);
3040
3041 if (ready != NULL)
3042 /* Print out insns that won due to RFS_<I>. */
3043 {
3044 rtx_insn **p = ready_lastpos (ready);
3045
3046 fprintf (sched_dump, ":");
3047 /* Start with 1 since least-priority insn didn't have any wins. */
3048 for (int j = 1; j < ready->n_ready; ++j)
3049 if (INSN_LAST_RFS_WIN (p[j]) == i)
3050 fprintf (sched_dump, " %s",
3051 (*current_sched_info->print_insn) (p[j], 0));
3052 }
3053 fprintf (sched_dump, "\n");
3054 }
3055 }
3056
3057 /* Separate DEBUG_INSNS from normal insns. DEBUG_INSNs go to the end
3058 of array. */
3059 static void
3060 ready_sort_debug (struct ready_list *ready)
3061 {
3062 int i;
3063 rtx_insn **first = ready_lastpos (ready);
3064
3065 for (i = 0; i < ready->n_ready; ++i)
3066 if (!DEBUG_INSN_P (first[i]))
3067 INSN_RFS_DEBUG_ORIG_ORDER (first[i]) = i;
3068
3069 qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule_debug);
3070 }
3071
3072 /* Sort non-debug insns in the ready list READY by ascending priority.
3073 Assumes that all debug insns are separated from the real insns. */
3074 static void
3075 ready_sort_real (struct ready_list *ready)
3076 {
3077 int i;
3078 rtx_insn **first = ready_lastpos (ready);
3079 int n_ready_real = ready->n_ready - ready->n_debug;
3080
3081 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3082 for (i = 0; i < n_ready_real; ++i)
3083 setup_insn_reg_pressure_info (first[i]);
3084 else if (sched_pressure == SCHED_PRESSURE_MODEL
3085 && model_curr_point < model_num_insns)
3086 model_set_excess_costs (first, n_ready_real);
3087
3088 rank_for_schedule_stats_t stats1;
3089 if (sched_verbose >= 4)
3090 stats1 = rank_for_schedule_stats;
3091
3092 if (n_ready_real == 2)
3093 swap_sort (first, n_ready_real);
3094 else if (n_ready_real > 2)
3095 qsort (first, n_ready_real, sizeof (rtx), rank_for_schedule);
3096
3097 if (sched_verbose >= 4)
3098 {
3099 rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
3100 print_rank_for_schedule_stats (";;\t\t", &stats1, ready);
3101 }
3102 }
3103
3104 /* Sort the ready list READY by ascending priority. */
3105 static void
3106 ready_sort (struct ready_list *ready)
3107 {
3108 if (ready->n_debug > 0)
3109 ready_sort_debug (ready);
3110 else
3111 ready_sort_real (ready);
3112 }
3113
3114 /* PREV is an insn that is ready to execute. Adjust its priority if that
3115 will help shorten or lengthen register lifetimes as appropriate. Also
3116 provide a hook for the target to tweak itself. */
3117
3118 HAIFA_INLINE static void
3119 adjust_priority (rtx_insn *prev)
3120 {
3121 /* ??? There used to be code here to try and estimate how an insn
3122 affected register lifetimes, but it did it by looking at REG_DEAD
3123 notes, which we removed in schedule_region. Nor did it try to
3124 take into account register pressure or anything useful like that.
3125
3126 Revisit when we have a machine model to work with and not before. */
3127
3128 if (targetm.sched.adjust_priority)
3129 INSN_PRIORITY (prev) =
3130 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3131 }
3132
3133 /* Advance DFA state STATE on one cycle. */
3134 void
3135 advance_state (state_t state)
3136 {
3137 if (targetm.sched.dfa_pre_advance_cycle)
3138 targetm.sched.dfa_pre_advance_cycle ();
3139
3140 if (targetm.sched.dfa_pre_cycle_insn)
3141 state_transition (state,
3142 targetm.sched.dfa_pre_cycle_insn ());
3143
3144 state_transition (state, NULL);
3145
3146 if (targetm.sched.dfa_post_cycle_insn)
3147 state_transition (state,
3148 targetm.sched.dfa_post_cycle_insn ());
3149
3150 if (targetm.sched.dfa_post_advance_cycle)
3151 targetm.sched.dfa_post_advance_cycle ();
3152 }
3153
3154 /* Advance time on one cycle. */
3155 HAIFA_INLINE static void
3156 advance_one_cycle (void)
3157 {
3158 advance_state (curr_state);
3159 if (sched_verbose >= 4)
3160 fprintf (sched_dump, ";;\tAdvance the current state.\n");
3161 }
3162
3163 /* Update register pressure after scheduling INSN. */
3164 static void
3165 update_register_pressure (rtx_insn *insn)
3166 {
3167 struct reg_use_data *use;
3168 struct reg_set_data *set;
3169
3170 gcc_checking_assert (!DEBUG_INSN_P (insn));
3171
3172 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3173 if (dying_use_p (use))
3174 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3175 use->regno, false);
3176 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3177 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3178 set->regno, true);
3179 }
3180
3181 /* Set up or update (if UPDATE_P) max register pressure (see its
3182 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3183 after insn AFTER. */
3184 static void
3185 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3186 {
3187 int i, p;
3188 bool eq_p;
3189 rtx_insn *insn;
3190 static int max_reg_pressure[N_REG_CLASSES];
3191
3192 save_reg_pressure ();
3193 for (i = 0; i < ira_pressure_classes_num; i++)
3194 max_reg_pressure[ira_pressure_classes[i]]
3195 = curr_reg_pressure[ira_pressure_classes[i]];
3196 for (insn = NEXT_INSN (after);
3197 insn != NULL_RTX && ! BARRIER_P (insn)
3198 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3199 insn = NEXT_INSN (insn))
3200 if (NONDEBUG_INSN_P (insn))
3201 {
3202 eq_p = true;
3203 for (i = 0; i < ira_pressure_classes_num; i++)
3204 {
3205 p = max_reg_pressure[ira_pressure_classes[i]];
3206 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3207 {
3208 eq_p = false;
3209 INSN_MAX_REG_PRESSURE (insn)[i]
3210 = max_reg_pressure[ira_pressure_classes[i]];
3211 }
3212 }
3213 if (update_p && eq_p)
3214 break;
3215 update_register_pressure (insn);
3216 for (i = 0; i < ira_pressure_classes_num; i++)
3217 if (max_reg_pressure[ira_pressure_classes[i]]
3218 < curr_reg_pressure[ira_pressure_classes[i]])
3219 max_reg_pressure[ira_pressure_classes[i]]
3220 = curr_reg_pressure[ira_pressure_classes[i]];
3221 }
3222 restore_reg_pressure ();
3223 }
3224
3225 /* Update the current register pressure after scheduling INSN. Update
3226 also max register pressure for unscheduled insns of the current
3227 BB. */
3228 static void
3229 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3230 {
3231 int i;
3232 int before[N_REG_CLASSES];
3233
3234 for (i = 0; i < ira_pressure_classes_num; i++)
3235 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3236 update_register_pressure (insn);
3237 for (i = 0; i < ira_pressure_classes_num; i++)
3238 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3239 break;
3240 if (i < ira_pressure_classes_num)
3241 setup_insn_max_reg_pressure (insn, true);
3242 }
3243
3244 /* Set up register pressure at the beginning of basic block BB whose
3245 insns starting after insn AFTER. Set up also max register pressure
3246 for all insns of the basic block. */
3247 void
3248 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3249 {
3250 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3251 initiate_bb_reg_pressure_info (bb);
3252 setup_insn_max_reg_pressure (after, false);
3253 }
3254 \f
3255 /* If doing predication while scheduling, verify whether INSN, which
3256 has just been scheduled, clobbers the conditions of any
3257 instructions that must be predicated in order to break their
3258 dependencies. If so, remove them from the queues so that they will
3259 only be scheduled once their control dependency is resolved. */
3260
3261 static void
3262 check_clobbered_conditions (rtx_insn *insn)
3263 {
3264 HARD_REG_SET t;
3265 int i;
3266
3267 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3268 return;
3269
3270 find_all_hard_reg_sets (insn, &t, true);
3271
3272 restart:
3273 for (i = 0; i < ready.n_ready; i++)
3274 {
3275 rtx_insn *x = ready_element (&ready, i);
3276 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3277 {
3278 ready_remove_insn (x);
3279 goto restart;
3280 }
3281 }
3282 for (i = 0; i <= max_insn_queue_index; i++)
3283 {
3284 rtx_insn_list *link;
3285 int q = NEXT_Q_AFTER (q_ptr, i);
3286
3287 restart_queue:
3288 for (link = insn_queue[q]; link; link = link->next ())
3289 {
3290 rtx_insn *x = link->insn ();
3291 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3292 {
3293 queue_remove (x);
3294 goto restart_queue;
3295 }
3296 }
3297 }
3298 }
3299 \f
3300 /* Return (in order):
3301
3302 - positive if INSN adversely affects the pressure on one
3303 register class
3304
3305 - negative if INSN reduces the pressure on one register class
3306
3307 - 0 if INSN doesn't affect the pressure on any register class. */
3308
3309 static int
3310 model_classify_pressure (struct model_insn_info *insn)
3311 {
3312 struct reg_pressure_data *reg_pressure;
3313 int death[N_REG_CLASSES];
3314 int pci, cl, sum;
3315
3316 calculate_reg_deaths (insn->insn, death);
3317 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3318 sum = 0;
3319 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3320 {
3321 cl = ira_pressure_classes[pci];
3322 if (death[cl] < reg_pressure[pci].set_increase)
3323 return 1;
3324 sum += reg_pressure[pci].set_increase - death[cl];
3325 }
3326 return sum;
3327 }
3328
3329 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3330
3331 static int
3332 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3333 {
3334 unsigned int height1, height2;
3335 unsigned int priority1, priority2;
3336
3337 /* Prefer instructions with a higher model priority. */
3338 if (insn1->model_priority != insn2->model_priority)
3339 return insn1->model_priority > insn2->model_priority;
3340
3341 /* Combine the length of the longest path of satisfied true dependencies
3342 that leads to each instruction (depth) with the length of the longest
3343 path of any dependencies that leads from the instruction (alap).
3344 Prefer instructions with the greatest combined length. If the combined
3345 lengths are equal, prefer instructions with the greatest depth.
3346
3347 The idea is that, if we have a set S of "equal" instructions that each
3348 have ALAP value X, and we pick one such instruction I, any true-dependent
3349 successors of I that have ALAP value X - 1 should be preferred over S.
3350 This encourages the schedule to be "narrow" rather than "wide".
3351 However, if I is a low-priority instruction that we decided to
3352 schedule because of its model_classify_pressure, and if there
3353 is a set of higher-priority instructions T, the aforementioned
3354 successors of I should not have the edge over T. */
3355 height1 = insn1->depth + insn1->alap;
3356 height2 = insn2->depth + insn2->alap;
3357 if (height1 != height2)
3358 return height1 > height2;
3359 if (insn1->depth != insn2->depth)
3360 return insn1->depth > insn2->depth;
3361
3362 /* We have no real preference between INSN1 an INSN2 as far as attempts
3363 to reduce pressure go. Prefer instructions with higher priorities. */
3364 priority1 = INSN_PRIORITY (insn1->insn);
3365 priority2 = INSN_PRIORITY (insn2->insn);
3366 if (priority1 != priority2)
3367 return priority1 > priority2;
3368
3369 /* Use the original rtl sequence as a tie-breaker. */
3370 return insn1 < insn2;
3371 }
3372
3373 /* Add INSN to the model worklist immediately after PREV. Add it to the
3374 beginning of the list if PREV is null. */
3375
3376 static void
3377 model_add_to_worklist_at (struct model_insn_info *insn,
3378 struct model_insn_info *prev)
3379 {
3380 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3381 QUEUE_INDEX (insn->insn) = QUEUE_READY;
3382
3383 insn->prev = prev;
3384 if (prev)
3385 {
3386 insn->next = prev->next;
3387 prev->next = insn;
3388 }
3389 else
3390 {
3391 insn->next = model_worklist;
3392 model_worklist = insn;
3393 }
3394 if (insn->next)
3395 insn->next->prev = insn;
3396 }
3397
3398 /* Remove INSN from the model worklist. */
3399
3400 static void
3401 model_remove_from_worklist (struct model_insn_info *insn)
3402 {
3403 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3404 QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3405
3406 if (insn->prev)
3407 insn->prev->next = insn->next;
3408 else
3409 model_worklist = insn->next;
3410 if (insn->next)
3411 insn->next->prev = insn->prev;
3412 }
3413
3414 /* Add INSN to the model worklist. Start looking for a suitable position
3415 between neighbors PREV and NEXT, testing at most param_max_sched_ready_insns
3416 insns either side. A null PREV indicates the beginning of the list and
3417 a null NEXT indicates the end. */
3418
3419 static void
3420 model_add_to_worklist (struct model_insn_info *insn,
3421 struct model_insn_info *prev,
3422 struct model_insn_info *next)
3423 {
3424 int count;
3425
3426 count = param_max_sched_ready_insns;
3427 if (count > 0 && prev && model_order_p (insn, prev))
3428 do
3429 {
3430 count--;
3431 prev = prev->prev;
3432 }
3433 while (count > 0 && prev && model_order_p (insn, prev));
3434 else
3435 while (count > 0 && next && model_order_p (next, insn))
3436 {
3437 count--;
3438 prev = next;
3439 next = next->next;
3440 }
3441 model_add_to_worklist_at (insn, prev);
3442 }
3443
3444 /* INSN may now have a higher priority (in the model_order_p sense)
3445 than before. Move it up the worklist if necessary. */
3446
3447 static void
3448 model_promote_insn (struct model_insn_info *insn)
3449 {
3450 struct model_insn_info *prev;
3451 int count;
3452
3453 prev = insn->prev;
3454 count = param_max_sched_ready_insns;
3455 while (count > 0 && prev && model_order_p (insn, prev))
3456 {
3457 count--;
3458 prev = prev->prev;
3459 }
3460 if (prev != insn->prev)
3461 {
3462 model_remove_from_worklist (insn);
3463 model_add_to_worklist_at (insn, prev);
3464 }
3465 }
3466
3467 /* Add INSN to the end of the model schedule. */
3468
3469 static void
3470 model_add_to_schedule (rtx_insn *insn)
3471 {
3472 unsigned int point;
3473
3474 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3475 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3476
3477 point = model_schedule.length ();
3478 model_schedule.quick_push (insn);
3479 INSN_MODEL_INDEX (insn) = point + 1;
3480 }
3481
3482 /* Analyze the instructions that are to be scheduled, setting up
3483 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3484 instructions to model_worklist. */
3485
3486 static void
3487 model_analyze_insns (void)
3488 {
3489 rtx_insn *start, *end, *iter;
3490 sd_iterator_def sd_it;
3491 dep_t dep;
3492 struct model_insn_info *insn, *con;
3493
3494 model_num_insns = 0;
3495 start = PREV_INSN (current_sched_info->next_tail);
3496 end = current_sched_info->prev_head;
3497 for (iter = start; iter != end; iter = PREV_INSN (iter))
3498 if (NONDEBUG_INSN_P (iter))
3499 {
3500 insn = MODEL_INSN_INFO (iter);
3501 insn->insn = iter;
3502 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3503 {
3504 con = MODEL_INSN_INFO (DEP_CON (dep));
3505 if (con->insn && insn->alap < con->alap + 1)
3506 insn->alap = con->alap + 1;
3507 }
3508
3509 insn->old_queue = QUEUE_INDEX (iter);
3510 QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3511
3512 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3513 if (insn->unscheduled_preds == 0)
3514 model_add_to_worklist (insn, NULL, model_worklist);
3515
3516 model_num_insns++;
3517 }
3518 }
3519
3520 /* The global state describes the register pressure at the start of the
3521 model schedule. Initialize GROUP accordingly. */
3522
3523 static void
3524 model_init_pressure_group (struct model_pressure_group *group)
3525 {
3526 int pci, cl;
3527
3528 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3529 {
3530 cl = ira_pressure_classes[pci];
3531 group->limits[pci].pressure = curr_reg_pressure[cl];
3532 group->limits[pci].point = 0;
3533 }
3534 /* Use index model_num_insns to record the state after the last
3535 instruction in the model schedule. */
3536 group->model = XNEWVEC (struct model_pressure_data,
3537 (model_num_insns + 1) * ira_pressure_classes_num);
3538 }
3539
3540 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3541 Update the maximum pressure for the whole schedule. */
3542
3543 static void
3544 model_record_pressure (struct model_pressure_group *group,
3545 int point, int pci, int pressure)
3546 {
3547 MODEL_REF_PRESSURE (group, point, pci) = pressure;
3548 if (group->limits[pci].pressure < pressure)
3549 {
3550 group->limits[pci].pressure = pressure;
3551 group->limits[pci].point = point;
3552 }
3553 }
3554
3555 /* INSN has just been added to the end of the model schedule. Record its
3556 register-pressure information. */
3557
3558 static void
3559 model_record_pressures (struct model_insn_info *insn)
3560 {
3561 struct reg_pressure_data *reg_pressure;
3562 int point, pci, cl, delta;
3563 int death[N_REG_CLASSES];
3564
3565 point = model_index (insn->insn);
3566 if (sched_verbose >= 2)
3567 {
3568 if (point == 0)
3569 {
3570 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3571 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3572 }
3573 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3574 point, INSN_UID (insn->insn), insn->model_priority,
3575 insn->depth + insn->alap, insn->depth,
3576 INSN_PRIORITY (insn->insn),
3577 str_pattern_slim (PATTERN (insn->insn)));
3578 }
3579 calculate_reg_deaths (insn->insn, death);
3580 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3581 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3582 {
3583 cl = ira_pressure_classes[pci];
3584 delta = reg_pressure[pci].set_increase - death[cl];
3585 if (sched_verbose >= 2)
3586 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3587 curr_reg_pressure[cl], delta);
3588 model_record_pressure (&model_before_pressure, point, pci,
3589 curr_reg_pressure[cl]);
3590 }
3591 if (sched_verbose >= 2)
3592 fprintf (sched_dump, "\n");
3593 }
3594
3595 /* All instructions have been added to the model schedule. Record the
3596 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3597
3598 static void
3599 model_record_final_pressures (struct model_pressure_group *group)
3600 {
3601 int point, pci, max_pressure, ref_pressure, cl;
3602
3603 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3604 {
3605 /* Record the final pressure for this class. */
3606 cl = ira_pressure_classes[pci];
3607 point = model_num_insns;
3608 ref_pressure = curr_reg_pressure[cl];
3609 model_record_pressure (group, point, pci, ref_pressure);
3610
3611 /* Record the original maximum pressure. */
3612 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3613
3614 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3615 max_pressure = ref_pressure;
3616 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3617 while (point > 0)
3618 {
3619 point--;
3620 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3621 max_pressure = MAX (max_pressure, ref_pressure);
3622 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3623 }
3624 }
3625 }
3626
3627 /* Update all successors of INSN, given that INSN has just been scheduled. */
3628
3629 static void
3630 model_add_successors_to_worklist (struct model_insn_info *insn)
3631 {
3632 sd_iterator_def sd_it;
3633 struct model_insn_info *con;
3634 dep_t dep;
3635
3636 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3637 {
3638 con = MODEL_INSN_INFO (DEP_CON (dep));
3639 /* Ignore debug instructions, and instructions from other blocks. */
3640 if (con->insn)
3641 {
3642 con->unscheduled_preds--;
3643
3644 /* Update the depth field of each true-dependent successor.
3645 Increasing the depth gives them a higher priority than
3646 before. */
3647 if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3648 {
3649 con->depth = insn->depth + 1;
3650 if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3651 model_promote_insn (con);
3652 }
3653
3654 /* If this is a true dependency, or if there are no remaining
3655 dependencies for CON (meaning that CON only had non-true
3656 dependencies), make sure that CON is on the worklist.
3657 We don't bother otherwise because it would tend to fill the
3658 worklist with a lot of low-priority instructions that are not
3659 yet ready to issue. */
3660 if ((con->depth > 0 || con->unscheduled_preds == 0)
3661 && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3662 model_add_to_worklist (con, insn, insn->next);
3663 }
3664 }
3665 }
3666
3667 /* Give INSN a higher priority than any current instruction, then give
3668 unscheduled predecessors of INSN a higher priority still. If any of
3669 those predecessors are not on the model worklist, do the same for its
3670 predecessors, and so on. */
3671
3672 static void
3673 model_promote_predecessors (struct model_insn_info *insn)
3674 {
3675 struct model_insn_info *pro, *first;
3676 sd_iterator_def sd_it;
3677 dep_t dep;
3678
3679 if (sched_verbose >= 7)
3680 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3681 INSN_UID (insn->insn), model_next_priority);
3682 insn->model_priority = model_next_priority++;
3683 model_remove_from_worklist (insn);
3684 model_add_to_worklist_at (insn, NULL);
3685
3686 first = NULL;
3687 for (;;)
3688 {
3689 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3690 {
3691 pro = MODEL_INSN_INFO (DEP_PRO (dep));
3692 /* The first test is to ignore debug instructions, and instructions
3693 from other blocks. */
3694 if (pro->insn
3695 && pro->model_priority != model_next_priority
3696 && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3697 {
3698 pro->model_priority = model_next_priority;
3699 if (sched_verbose >= 7)
3700 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3701 if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3702 {
3703 /* PRO is already in the worklist, but it now has
3704 a higher priority than before. Move it at the
3705 appropriate place. */
3706 model_remove_from_worklist (pro);
3707 model_add_to_worklist (pro, NULL, model_worklist);
3708 }
3709 else
3710 {
3711 /* PRO isn't in the worklist. Recursively process
3712 its predecessors until we find one that is. */
3713 pro->next = first;
3714 first = pro;
3715 }
3716 }
3717 }
3718 if (!first)
3719 break;
3720 insn = first;
3721 first = insn->next;
3722 }
3723 if (sched_verbose >= 7)
3724 fprintf (sched_dump, " = %d\n", model_next_priority);
3725 model_next_priority++;
3726 }
3727
3728 /* Pick one instruction from model_worklist and process it. */
3729
3730 static void
3731 model_choose_insn (void)
3732 {
3733 struct model_insn_info *insn, *fallback;
3734 int count;
3735
3736 if (sched_verbose >= 7)
3737 {
3738 fprintf (sched_dump, ";;\t+--- worklist:\n");
3739 insn = model_worklist;
3740 count = param_max_sched_ready_insns;
3741 while (count > 0 && insn)
3742 {
3743 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3744 INSN_UID (insn->insn), insn->model_priority,
3745 insn->depth + insn->alap, insn->depth,
3746 INSN_PRIORITY (insn->insn));
3747 count--;
3748 insn = insn->next;
3749 }
3750 }
3751
3752 /* Look for a ready instruction whose model_classify_priority is zero
3753 or negative, picking the highest-priority one. Adding such an
3754 instruction to the schedule now should do no harm, and may actually
3755 do some good.
3756
3757 Failing that, see whether there is an instruction with the highest
3758 extant model_priority that is not yet ready, but which would reduce
3759 pressure if it became ready. This is designed to catch cases like:
3760
3761 (set (mem (reg R1)) (reg R2))
3762
3763 where the instruction is the last remaining use of R1 and where the
3764 value of R2 is not yet available (or vice versa). The death of R1
3765 means that this instruction already reduces pressure. It is of
3766 course possible that the computation of R2 involves other registers
3767 that are hard to kill, but such cases are rare enough for this
3768 heuristic to be a win in general.
3769
3770 Failing that, just pick the highest-priority instruction in the
3771 worklist. */
3772 count = param_max_sched_ready_insns;
3773 insn = model_worklist;
3774 fallback = 0;
3775 for (;;)
3776 {
3777 if (count == 0 || !insn)
3778 {
3779 insn = fallback ? fallback : model_worklist;
3780 break;
3781 }
3782 if (insn->unscheduled_preds)
3783 {
3784 if (model_worklist->model_priority == insn->model_priority
3785 && !fallback
3786 && model_classify_pressure (insn) < 0)
3787 fallback = insn;
3788 }
3789 else
3790 {
3791 if (model_classify_pressure (insn) <= 0)
3792 break;
3793 }
3794 count--;
3795 insn = insn->next;
3796 }
3797
3798 if (sched_verbose >= 7 && insn != model_worklist)
3799 {
3800 if (insn->unscheduled_preds)
3801 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3802 INSN_UID (insn->insn));
3803 else
3804 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3805 INSN_UID (insn->insn));
3806 }
3807 if (insn->unscheduled_preds)
3808 /* INSN isn't yet ready to issue. Give all its predecessors the
3809 highest priority. */
3810 model_promote_predecessors (insn);
3811 else
3812 {
3813 /* INSN is ready. Add it to the end of model_schedule and
3814 process its successors. */
3815 model_add_successors_to_worklist (insn);
3816 model_remove_from_worklist (insn);
3817 model_add_to_schedule (insn->insn);
3818 model_record_pressures (insn);
3819 update_register_pressure (insn->insn);
3820 }
3821 }
3822
3823 /* Restore all QUEUE_INDEXs to the values that they had before
3824 model_start_schedule was called. */
3825
3826 static void
3827 model_reset_queue_indices (void)
3828 {
3829 unsigned int i;
3830 rtx_insn *insn;
3831
3832 FOR_EACH_VEC_ELT (model_schedule, i, insn)
3833 QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3834 }
3835
3836 /* We have calculated the model schedule and spill costs. Print a summary
3837 to sched_dump. */
3838
3839 static void
3840 model_dump_pressure_summary (void)
3841 {
3842 int pci, cl;
3843
3844 fprintf (sched_dump, ";; Pressure summary:");
3845 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3846 {
3847 cl = ira_pressure_classes[pci];
3848 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3849 model_before_pressure.limits[pci].pressure);
3850 }
3851 fprintf (sched_dump, "\n\n");
3852 }
3853
3854 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3855 scheduling region. */
3856
3857 static void
3858 model_start_schedule (basic_block bb)
3859 {
3860 model_next_priority = 1;
3861 model_schedule.create (sched_max_luid);
3862 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3863
3864 gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3865 initiate_reg_pressure_info (df_get_live_in (bb));
3866
3867 model_analyze_insns ();
3868 model_init_pressure_group (&model_before_pressure);
3869 while (model_worklist)
3870 model_choose_insn ();
3871 gcc_assert (model_num_insns == (int) model_schedule.length ());
3872 if (sched_verbose >= 2)
3873 fprintf (sched_dump, "\n");
3874
3875 model_record_final_pressures (&model_before_pressure);
3876 model_reset_queue_indices ();
3877
3878 XDELETEVEC (model_insns);
3879
3880 model_curr_point = 0;
3881 initiate_reg_pressure_info (df_get_live_in (bb));
3882 if (sched_verbose >= 1)
3883 model_dump_pressure_summary ();
3884 }
3885
3886 /* Free the information associated with GROUP. */
3887
3888 static void
3889 model_finalize_pressure_group (struct model_pressure_group *group)
3890 {
3891 XDELETEVEC (group->model);
3892 }
3893
3894 /* Free the information created by model_start_schedule. */
3895
3896 static void
3897 model_end_schedule (void)
3898 {
3899 model_finalize_pressure_group (&model_before_pressure);
3900 model_schedule.release ();
3901 }
3902
3903 /* Prepare reg pressure scheduling for basic block BB. */
3904 static void
3905 sched_pressure_start_bb (basic_block bb)
3906 {
3907 /* Set the number of available registers for each class taking into account
3908 relative probability of current basic block versus function prologue and
3909 epilogue.
3910 * If the basic block executes much more often than the prologue/epilogue
3911 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3912 nil, so the effective number of available registers is
3913 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl] - 0).
3914 * If the basic block executes as often as the prologue/epilogue,
3915 then spill in the block is as costly as in the prologue, so the effective
3916 number of available registers is
3917 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3918 - call_saved_regs_num[cl]).
3919 Note that all-else-equal, we prefer to spill in the prologue, since that
3920 allows "extra" registers for other basic blocks of the function.
3921 * If the basic block is on the cold path of the function and executes
3922 rarely, then we should always prefer to spill in the block, rather than
3923 in the prologue/epilogue. The effective number of available register is
3924 (ira_class_hard_regs_num[cl] - fixed_regs_num[cl]
3925 - call_saved_regs_num[cl]). */
3926 {
3927 int i;
3928 int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.to_frequency (cfun);
3929 int bb_freq = bb->count.to_frequency (cfun);
3930
3931 if (bb_freq == 0)
3932 {
3933 if (entry_freq == 0)
3934 entry_freq = bb_freq = 1;
3935 }
3936 if (bb_freq < entry_freq)
3937 bb_freq = entry_freq;
3938
3939 for (i = 0; i < ira_pressure_classes_num; ++i)
3940 {
3941 enum reg_class cl = ira_pressure_classes[i];
3942 sched_class_regs_num[cl] = ira_class_hard_regs_num[cl]
3943 - fixed_regs_num[cl];
3944 sched_class_regs_num[cl]
3945 -= (call_saved_regs_num[cl] * entry_freq) / bb_freq;
3946 }
3947 }
3948
3949 if (sched_pressure == SCHED_PRESSURE_MODEL)
3950 model_start_schedule (bb);
3951 }
3952 \f
3953 /* A structure that holds local state for the loop in schedule_block. */
3954 struct sched_block_state
3955 {
3956 /* True if no real insns have been scheduled in the current cycle. */
3957 bool first_cycle_insn_p;
3958 /* True if a shadow insn has been scheduled in the current cycle, which
3959 means that no more normal insns can be issued. */
3960 bool shadows_only_p;
3961 /* True if we're winding down a modulo schedule, which means that we only
3962 issue insns with INSN_EXACT_TICK set. */
3963 bool modulo_epilogue;
3964 /* Initialized with the machine's issue rate every cycle, and updated
3965 by calls to the variable_issue hook. */
3966 int can_issue_more;
3967 };
3968
3969 /* INSN is the "currently executing insn". Launch each insn which was
3970 waiting on INSN. READY is the ready list which contains the insns
3971 that are ready to fire. CLOCK is the current cycle. The function
3972 returns necessary cycle advance after issuing the insn (it is not
3973 zero for insns in a schedule group). */
3974
3975 static int
3976 schedule_insn (rtx_insn *insn)
3977 {
3978 sd_iterator_def sd_it;
3979 dep_t dep;
3980 int i;
3981 int advance = 0;
3982
3983 if (sched_verbose >= 1)
3984 {
3985 struct reg_pressure_data *pressure_info;
3986 fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3987 clock_var, (*current_sched_info->print_insn) (insn, 1),
3988 str_pattern_slim (PATTERN (insn)));
3989
3990 if (recog_memoized (insn) < 0)
3991 fprintf (sched_dump, "nothing");
3992 else
3993 print_reservation (sched_dump, insn);
3994 pressure_info = INSN_REG_PRESSURE (insn);
3995 if (pressure_info != NULL)
3996 {
3997 fputc (':', sched_dump);
3998 for (i = 0; i < ira_pressure_classes_num; i++)
3999 fprintf (sched_dump, "%s%s%+d(%d)",
4000 scheduled_insns.length () > 1
4001 && INSN_LUID (insn)
4002 < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
4003 reg_class_names[ira_pressure_classes[i]],
4004 pressure_info[i].set_increase, pressure_info[i].change);
4005 }
4006 if (sched_pressure == SCHED_PRESSURE_MODEL
4007 && model_curr_point < model_num_insns
4008 && model_index (insn) == model_curr_point)
4009 fprintf (sched_dump, ":model %d", model_curr_point);
4010 fputc ('\n', sched_dump);
4011 }
4012
4013 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
4014 update_reg_and_insn_max_reg_pressure (insn);
4015
4016 /* Scheduling instruction should have all its dependencies resolved and
4017 should have been removed from the ready list. */
4018 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
4019
4020 /* Reset debug insns invalidated by moving this insn. */
4021 if (MAY_HAVE_DEBUG_BIND_INSNS && !DEBUG_INSN_P (insn))
4022 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
4023 sd_iterator_cond (&sd_it, &dep);)
4024 {
4025 rtx_insn *dbg = DEP_PRO (dep);
4026 struct reg_use_data *use, *next;
4027
4028 if (DEP_STATUS (dep) & DEP_CANCELLED)
4029 {
4030 sd_iterator_next (&sd_it);
4031 continue;
4032 }
4033
4034 gcc_assert (DEBUG_BIND_INSN_P (dbg));
4035
4036 if (sched_verbose >= 6)
4037 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
4038 INSN_UID (dbg));
4039
4040 /* ??? Rather than resetting the debug insn, we might be able
4041 to emit a debug temp before the just-scheduled insn, but
4042 this would involve checking that the expression at the
4043 point of the debug insn is equivalent to the expression
4044 before the just-scheduled insn. They might not be: the
4045 expression in the debug insn may depend on other insns not
4046 yet scheduled that set MEMs, REGs or even other debug
4047 insns. It's not clear that attempting to preserve debug
4048 information in these cases is worth the effort, given how
4049 uncommon these resets are and the likelihood that the debug
4050 temps introduced won't survive the schedule change. */
4051 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
4052 df_insn_rescan (dbg);
4053
4054 /* Unknown location doesn't use any registers. */
4055 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
4056 {
4057 struct reg_use_data *prev = use;
4058
4059 /* Remove use from the cyclic next_regno_use chain first. */
4060 while (prev->next_regno_use != use)
4061 prev = prev->next_regno_use;
4062 prev->next_regno_use = use->next_regno_use;
4063 next = use->next_insn_use;
4064 free (use);
4065 }
4066 INSN_REG_USE_LIST (dbg) = NULL;
4067
4068 /* We delete rather than resolve these deps, otherwise we
4069 crash in sched_free_deps(), because forward deps are
4070 expected to be released before backward deps. */
4071 sd_delete_dep (sd_it);
4072 }
4073
4074 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
4075 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
4076
4077 if (sched_pressure == SCHED_PRESSURE_MODEL
4078 && model_curr_point < model_num_insns
4079 && NONDEBUG_INSN_P (insn))
4080 {
4081 if (model_index (insn) == model_curr_point)
4082 do
4083 model_curr_point++;
4084 while (model_curr_point < model_num_insns
4085 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
4086 == QUEUE_SCHEDULED));
4087 else
4088 model_recompute (insn);
4089 model_update_limit_points ();
4090 update_register_pressure (insn);
4091 if (sched_verbose >= 2)
4092 print_curr_reg_pressure ();
4093 }
4094
4095 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
4096 if (INSN_TICK (insn) > clock_var)
4097 /* INSN has been prematurely moved from the queue to the ready list.
4098 This is possible only if following flags are set. */
4099 gcc_assert (flag_sched_stalled_insns || sched_fusion);
4100
4101 /* ??? Probably, if INSN is scheduled prematurely, we should leave
4102 INSN_TICK untouched. This is a machine-dependent issue, actually. */
4103 INSN_TICK (insn) = clock_var;
4104
4105 check_clobbered_conditions (insn);
4106
4107 /* Update dependent instructions. First, see if by scheduling this insn
4108 now we broke a dependence in a way that requires us to change another
4109 insn. */
4110 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
4111 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4112 {
4113 struct dep_replacement *desc = DEP_REPLACE (dep);
4114 rtx_insn *pro = DEP_PRO (dep);
4115 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
4116 && desc != NULL && desc->insn == pro)
4117 apply_replacement (dep, false);
4118 }
4119
4120 /* Go through and resolve forward dependencies. */
4121 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4122 sd_iterator_cond (&sd_it, &dep);)
4123 {
4124 rtx_insn *next = DEP_CON (dep);
4125 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4126
4127 /* Resolve the dependence between INSN and NEXT.
4128 sd_resolve_dep () moves current dep to another list thus
4129 advancing the iterator. */
4130 sd_resolve_dep (sd_it);
4131
4132 if (cancelled)
4133 {
4134 if (must_restore_pattern_p (next, dep))
4135 restore_pattern (dep, false);
4136 continue;
4137 }
4138
4139 /* Don't bother trying to mark next as ready if insn is a debug
4140 insn. If insn is the last hard dependency, it will have
4141 already been discounted. */
4142 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4143 continue;
4144
4145 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4146 {
4147 int effective_cost;
4148
4149 effective_cost = try_ready (next);
4150
4151 if (effective_cost >= 0
4152 && SCHED_GROUP_P (next)
4153 && advance < effective_cost)
4154 advance = effective_cost;
4155 }
4156 else
4157 /* Check always has only one forward dependence (to the first insn in
4158 the recovery block), therefore, this will be executed only once. */
4159 {
4160 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4161 fix_recovery_deps (RECOVERY_BLOCK (insn));
4162 }
4163 }
4164
4165 /* Annotate the instruction with issue information -- TImode
4166 indicates that the instruction is expected not to be able
4167 to issue on the same cycle as the previous insn. A machine
4168 may use this information to decide how the instruction should
4169 be aligned. */
4170 if (issue_rate > 1
4171 && GET_CODE (PATTERN (insn)) != USE
4172 && GET_CODE (PATTERN (insn)) != CLOBBER
4173 && !DEBUG_INSN_P (insn))
4174 {
4175 if (reload_completed)
4176 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4177 last_clock_var = clock_var;
4178 }
4179
4180 if (nonscheduled_insns_begin != NULL_RTX)
4181 /* Indicate to debug counters that INSN is scheduled. */
4182 nonscheduled_insns_begin = insn;
4183
4184 return advance;
4185 }
4186
4187 /* Functions for handling of notes. */
4188
4189 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4190 void
4191 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4192 {
4193 rtx_insn *from_start;
4194
4195 /* It's easy when have nothing to concat. */
4196 if (from_end == NULL)
4197 return;
4198
4199 /* It's also easy when destination is empty. */
4200 if (*to_endp == NULL)
4201 {
4202 *to_endp = from_end;
4203 return;
4204 }
4205
4206 from_start = from_end;
4207 while (PREV_INSN (from_start) != NULL)
4208 from_start = PREV_INSN (from_start);
4209
4210 SET_PREV_INSN (from_start) = *to_endp;
4211 SET_NEXT_INSN (*to_endp) = from_start;
4212 *to_endp = from_end;
4213 }
4214
4215 /* Delete notes between HEAD and TAIL and put them in the chain
4216 of notes ended by NOTE_LIST. */
4217 void
4218 remove_notes (rtx_insn *head, rtx_insn *tail)
4219 {
4220 rtx_insn *next_tail, *insn, *next;
4221
4222 note_list = 0;
4223 if (head == tail && !INSN_P (head))
4224 return;
4225
4226 next_tail = NEXT_INSN (tail);
4227 for (insn = head; insn != next_tail; insn = next)
4228 {
4229 next = NEXT_INSN (insn);
4230 if (!NOTE_P (insn))
4231 continue;
4232
4233 switch (NOTE_KIND (insn))
4234 {
4235 case NOTE_INSN_BASIC_BLOCK:
4236 continue;
4237
4238 case NOTE_INSN_EPILOGUE_BEG:
4239 if (insn != tail)
4240 {
4241 remove_insn (insn);
4242 add_reg_note (next, REG_SAVE_NOTE,
4243 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4244 break;
4245 }
4246 /* FALLTHRU */
4247
4248 default:
4249 remove_insn (insn);
4250
4251 /* Add the note to list that ends at NOTE_LIST. */
4252 SET_PREV_INSN (insn) = note_list;
4253 SET_NEXT_INSN (insn) = NULL_RTX;
4254 if (note_list)
4255 SET_NEXT_INSN (note_list) = insn;
4256 note_list = insn;
4257 break;
4258 }
4259
4260 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4261 }
4262 }
4263
4264 /* A structure to record enough data to allow us to backtrack the scheduler to
4265 a previous state. */
4266 struct haifa_saved_data
4267 {
4268 /* Next entry on the list. */
4269 struct haifa_saved_data *next;
4270
4271 /* Backtracking is associated with scheduling insns that have delay slots.
4272 DELAY_PAIR points to the structure that contains the insns involved, and
4273 the number of cycles between them. */
4274 struct delay_pair *delay_pair;
4275
4276 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4277 void *fe_saved_data;
4278 /* Data used by the backend. */
4279 void *be_saved_data;
4280
4281 /* Copies of global state. */
4282 int clock_var, last_clock_var;
4283 struct ready_list ready;
4284 state_t curr_state;
4285
4286 rtx_insn *last_scheduled_insn;
4287 rtx_insn *last_nondebug_scheduled_insn;
4288 rtx_insn *nonscheduled_insns_begin;
4289 int cycle_issued_insns;
4290
4291 /* Copies of state used in the inner loop of schedule_block. */
4292 struct sched_block_state sched_block;
4293
4294 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4295 to 0 when restoring. */
4296 int q_size;
4297 rtx_insn_list **insn_queue;
4298
4299 /* Describe pattern replacements that occurred since this backtrack point
4300 was queued. */
4301 vec<dep_t> replacement_deps;
4302 vec<int> replace_apply;
4303
4304 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4305 point. */
4306 vec<dep_t> next_cycle_deps;
4307 vec<int> next_cycle_apply;
4308 };
4309
4310 /* A record, in reverse order, of all scheduled insns which have delay slots
4311 and may require backtracking. */
4312 static struct haifa_saved_data *backtrack_queue;
4313
4314 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4315 to SET_P. */
4316 static void
4317 mark_backtrack_feeds (rtx_insn *insn, int set_p)
4318 {
4319 sd_iterator_def sd_it;
4320 dep_t dep;
4321 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4322 {
4323 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4324 }
4325 }
4326
4327 /* Save the current scheduler state so that we can backtrack to it
4328 later if necessary. PAIR gives the insns that make it necessary to
4329 save this point. SCHED_BLOCK is the local state of schedule_block
4330 that need to be saved. */
4331 static void
4332 save_backtrack_point (struct delay_pair *pair,
4333 struct sched_block_state sched_block)
4334 {
4335 int i;
4336 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4337
4338 save->curr_state = xmalloc (dfa_state_size);
4339 memcpy (save->curr_state, curr_state, dfa_state_size);
4340
4341 save->ready.first = ready.first;
4342 save->ready.n_ready = ready.n_ready;
4343 save->ready.n_debug = ready.n_debug;
4344 save->ready.veclen = ready.veclen;
4345 save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4346 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4347
4348 save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4349 save->q_size = q_size;
4350 for (i = 0; i <= max_insn_queue_index; i++)
4351 {
4352 int q = NEXT_Q_AFTER (q_ptr, i);
4353 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4354 }
4355
4356 save->clock_var = clock_var;
4357 save->last_clock_var = last_clock_var;
4358 save->cycle_issued_insns = cycle_issued_insns;
4359 save->last_scheduled_insn = last_scheduled_insn;
4360 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4361 save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4362
4363 save->sched_block = sched_block;
4364
4365 save->replacement_deps.create (0);
4366 save->replace_apply.create (0);
4367 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4368 save->next_cycle_apply = next_cycle_apply.copy ();
4369
4370 if (current_sched_info->save_state)
4371 save->fe_saved_data = (*current_sched_info->save_state) ();
4372
4373 if (targetm.sched.alloc_sched_context)
4374 {
4375 save->be_saved_data = targetm.sched.alloc_sched_context ();
4376 targetm.sched.init_sched_context (save->be_saved_data, false);
4377 }
4378 else
4379 save->be_saved_data = NULL;
4380
4381 save->delay_pair = pair;
4382
4383 save->next = backtrack_queue;
4384 backtrack_queue = save;
4385
4386 while (pair)
4387 {
4388 mark_backtrack_feeds (pair->i2, 1);
4389 INSN_TICK (pair->i2) = INVALID_TICK;
4390 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4391 SHADOW_P (pair->i2) = pair->stages == 0;
4392 pair = pair->next_same_i1;
4393 }
4394 }
4395
4396 /* Walk the ready list and all queues. If any insns have unresolved backwards
4397 dependencies, these must be cancelled deps, broken by predication. Set or
4398 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4399
4400 static void
4401 toggle_cancelled_flags (bool set)
4402 {
4403 int i;
4404 sd_iterator_def sd_it;
4405 dep_t dep;
4406
4407 if (ready.n_ready > 0)
4408 {
4409 rtx_insn **first = ready_lastpos (&ready);
4410 for (i = 0; i < ready.n_ready; i++)
4411 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4412 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4413 {
4414 if (set)
4415 DEP_STATUS (dep) |= DEP_CANCELLED;
4416 else
4417 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4418 }
4419 }
4420 for (i = 0; i <= max_insn_queue_index; i++)
4421 {
4422 int q = NEXT_Q_AFTER (q_ptr, i);
4423 rtx_insn_list *link;
4424 for (link = insn_queue[q]; link; link = link->next ())
4425 {
4426 rtx_insn *insn = link->insn ();
4427 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4428 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4429 {
4430 if (set)
4431 DEP_STATUS (dep) |= DEP_CANCELLED;
4432 else
4433 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4434 }
4435 }
4436 }
4437 }
4438
4439 /* Undo the replacements that have occurred after backtrack point SAVE
4440 was placed. */
4441 static void
4442 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4443 {
4444 while (!save->replacement_deps.is_empty ())
4445 {
4446 dep_t dep = save->replacement_deps.pop ();
4447 int apply_p = save->replace_apply.pop ();
4448
4449 if (apply_p)
4450 restore_pattern (dep, true);
4451 else
4452 apply_replacement (dep, true);
4453 }
4454 save->replacement_deps.release ();
4455 save->replace_apply.release ();
4456 }
4457
4458 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4459 Restore their dependencies to an unresolved state, and mark them as
4460 queued nowhere. */
4461
4462 static void
4463 unschedule_insns_until (rtx_insn *insn)
4464 {
4465 auto_vec<rtx_insn *> recompute_vec;
4466
4467 /* Make two passes over the insns to be unscheduled. First, we clear out
4468 dependencies and other trivial bookkeeping. */
4469 for (;;)
4470 {
4471 rtx_insn *last;
4472 sd_iterator_def sd_it;
4473 dep_t dep;
4474
4475 last = scheduled_insns.pop ();
4476
4477 /* This will be changed by restore_backtrack_point if the insn is in
4478 any queue. */
4479 QUEUE_INDEX (last) = QUEUE_NOWHERE;
4480 if (last != insn)
4481 INSN_TICK (last) = INVALID_TICK;
4482
4483 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4484 modulo_insns_scheduled--;
4485
4486 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4487 sd_iterator_cond (&sd_it, &dep);)
4488 {
4489 rtx_insn *con = DEP_CON (dep);
4490 sd_unresolve_dep (sd_it);
4491 if (!MUST_RECOMPUTE_SPEC_P (con))
4492 {
4493 MUST_RECOMPUTE_SPEC_P (con) = 1;
4494 recompute_vec.safe_push (con);
4495 }
4496 }
4497
4498 if (last == insn)
4499 break;
4500 }
4501
4502 /* A second pass, to update ready and speculation status for insns
4503 depending on the unscheduled ones. The first pass must have
4504 popped the scheduled_insns vector up to the point where we
4505 restart scheduling, as recompute_todo_spec requires it to be
4506 up-to-date. */
4507 while (!recompute_vec.is_empty ())
4508 {
4509 rtx_insn *con;
4510
4511 con = recompute_vec.pop ();
4512 MUST_RECOMPUTE_SPEC_P (con) = 0;
4513 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4514 {
4515 TODO_SPEC (con) = HARD_DEP;
4516 INSN_TICK (con) = INVALID_TICK;
4517 if (PREDICATED_PAT (con) != NULL_RTX)
4518 haifa_change_pattern (con, ORIG_PAT (con));
4519 }
4520 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4521 TODO_SPEC (con) = recompute_todo_spec (con, true);
4522 }
4523 }
4524
4525 /* Restore scheduler state from the topmost entry on the backtracking queue.
4526 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4527 overwrite with the saved data.
4528 The caller must already have called unschedule_insns_until. */
4529
4530 static void
4531 restore_last_backtrack_point (struct sched_block_state *psched_block)
4532 {
4533 int i;
4534 struct haifa_saved_data *save = backtrack_queue;
4535
4536 backtrack_queue = save->next;
4537
4538 if (current_sched_info->restore_state)
4539 (*current_sched_info->restore_state) (save->fe_saved_data);
4540
4541 if (targetm.sched.alloc_sched_context)
4542 {
4543 targetm.sched.set_sched_context (save->be_saved_data);
4544 targetm.sched.free_sched_context (save->be_saved_data);
4545 }
4546
4547 /* Do this first since it clobbers INSN_TICK of the involved
4548 instructions. */
4549 undo_replacements_for_backtrack (save);
4550
4551 /* Clear the QUEUE_INDEX of everything in the ready list or one
4552 of the queues. */
4553 if (ready.n_ready > 0)
4554 {
4555 rtx_insn **first = ready_lastpos (&ready);
4556 for (i = 0; i < ready.n_ready; i++)
4557 {
4558 rtx_insn *insn = first[i];
4559 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4560 INSN_TICK (insn) = INVALID_TICK;
4561 }
4562 }
4563 for (i = 0; i <= max_insn_queue_index; i++)
4564 {
4565 int q = NEXT_Q_AFTER (q_ptr, i);
4566
4567 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4568 {
4569 rtx_insn *x = link->insn ();
4570 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4571 INSN_TICK (x) = INVALID_TICK;
4572 }
4573 free_INSN_LIST_list (&insn_queue[q]);
4574 }
4575
4576 free (ready.vec);
4577 ready = save->ready;
4578
4579 if (ready.n_ready > 0)
4580 {
4581 rtx_insn **first = ready_lastpos (&ready);
4582 for (i = 0; i < ready.n_ready; i++)
4583 {
4584 rtx_insn *insn = first[i];
4585 QUEUE_INDEX (insn) = QUEUE_READY;
4586 TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4587 INSN_TICK (insn) = save->clock_var;
4588 }
4589 }
4590
4591 q_ptr = 0;
4592 q_size = save->q_size;
4593 for (i = 0; i <= max_insn_queue_index; i++)
4594 {
4595 int q = NEXT_Q_AFTER (q_ptr, i);
4596
4597 insn_queue[q] = save->insn_queue[q];
4598
4599 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4600 {
4601 rtx_insn *x = link->insn ();
4602 QUEUE_INDEX (x) = i;
4603 TODO_SPEC (x) = recompute_todo_spec (x, true);
4604 INSN_TICK (x) = save->clock_var + i;
4605 }
4606 }
4607 free (save->insn_queue);
4608
4609 toggle_cancelled_flags (true);
4610
4611 clock_var = save->clock_var;
4612 last_clock_var = save->last_clock_var;
4613 cycle_issued_insns = save->cycle_issued_insns;
4614 last_scheduled_insn = save->last_scheduled_insn;
4615 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4616 nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4617
4618 *psched_block = save->sched_block;
4619
4620 memcpy (curr_state, save->curr_state, dfa_state_size);
4621 free (save->curr_state);
4622
4623 mark_backtrack_feeds (save->delay_pair->i2, 0);
4624
4625 gcc_assert (next_cycle_replace_deps.is_empty ());
4626 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4627 next_cycle_apply = save->next_cycle_apply.copy ();
4628
4629 free (save);
4630
4631 for (save = backtrack_queue; save; save = save->next)
4632 {
4633 mark_backtrack_feeds (save->delay_pair->i2, 1);
4634 }
4635 }
4636
4637 /* Discard all data associated with the topmost entry in the backtrack
4638 queue. If RESET_TICK is false, we just want to free the data. If true,
4639 we are doing this because we discovered a reason to backtrack. In the
4640 latter case, also reset the INSN_TICK for the shadow insn. */
4641 static void
4642 free_topmost_backtrack_point (bool reset_tick)
4643 {
4644 struct haifa_saved_data *save = backtrack_queue;
4645 int i;
4646
4647 backtrack_queue = save->next;
4648
4649 if (reset_tick)
4650 {
4651 struct delay_pair *pair = save->delay_pair;
4652 while (pair)
4653 {
4654 INSN_TICK (pair->i2) = INVALID_TICK;
4655 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4656 pair = pair->next_same_i1;
4657 }
4658 undo_replacements_for_backtrack (save);
4659 }
4660 else
4661 {
4662 save->replacement_deps.release ();
4663 save->replace_apply.release ();
4664 }
4665
4666 if (targetm.sched.free_sched_context)
4667 targetm.sched.free_sched_context (save->be_saved_data);
4668 if (current_sched_info->restore_state)
4669 free (save->fe_saved_data);
4670 for (i = 0; i <= max_insn_queue_index; i++)
4671 free_INSN_LIST_list (&save->insn_queue[i]);
4672 free (save->insn_queue);
4673 free (save->curr_state);
4674 free (save->ready.vec);
4675 free (save);
4676 }
4677
4678 /* Free the entire backtrack queue. */
4679 static void
4680 free_backtrack_queue (void)
4681 {
4682 while (backtrack_queue)
4683 free_topmost_backtrack_point (false);
4684 }
4685
4686 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4687 may have to postpone the replacement until the start of the next cycle,
4688 at which point we will be called again with IMMEDIATELY true. This is
4689 only done for machines which have instruction packets with explicit
4690 parallelism however. */
4691 static void
4692 apply_replacement (dep_t dep, bool immediately)
4693 {
4694 struct dep_replacement *desc = DEP_REPLACE (dep);
4695 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4696 {
4697 next_cycle_replace_deps.safe_push (dep);
4698 next_cycle_apply.safe_push (1);
4699 }
4700 else
4701 {
4702 bool success;
4703
4704 if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4705 return;
4706
4707 if (sched_verbose >= 5)
4708 fprintf (sched_dump, "applying replacement for insn %d\n",
4709 INSN_UID (desc->insn));
4710
4711 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4712 gcc_assert (success);
4713
4714 rtx_insn *insn = DEP_PRO (dep);
4715
4716 /* Recompute priority since dependent priorities may have changed. */
4717 priority (insn, true);
4718 update_insn_after_change (desc->insn);
4719
4720 if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4721 fix_tick_ready (desc->insn);
4722
4723 if (backtrack_queue != NULL)
4724 {
4725 backtrack_queue->replacement_deps.safe_push (dep);
4726 backtrack_queue->replace_apply.safe_push (1);
4727 }
4728 }
4729 }
4730
4731 /* We have determined that a pattern involved in DEP must be restored.
4732 If IMMEDIATELY is false, we may have to postpone the replacement
4733 until the start of the next cycle, at which point we will be called
4734 again with IMMEDIATELY true. */
4735 static void
4736 restore_pattern (dep_t dep, bool immediately)
4737 {
4738 rtx_insn *next = DEP_CON (dep);
4739 int tick = INSN_TICK (next);
4740
4741 /* If we already scheduled the insn, the modified version is
4742 correct. */
4743 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4744 return;
4745
4746 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4747 {
4748 next_cycle_replace_deps.safe_push (dep);
4749 next_cycle_apply.safe_push (0);
4750 return;
4751 }
4752
4753
4754 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4755 {
4756 if (sched_verbose >= 5)
4757 fprintf (sched_dump, "restoring pattern for insn %d\n",
4758 INSN_UID (next));
4759 haifa_change_pattern (next, ORIG_PAT (next));
4760 }
4761 else
4762 {
4763 struct dep_replacement *desc = DEP_REPLACE (dep);
4764 bool success;
4765
4766 if (sched_verbose >= 5)
4767 fprintf (sched_dump, "restoring pattern for insn %d\n",
4768 INSN_UID (desc->insn));
4769 tick = INSN_TICK (desc->insn);
4770
4771 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4772 gcc_assert (success);
4773
4774 rtx_insn *insn = DEP_PRO (dep);
4775
4776 if (QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
4777 {
4778 /* Recompute priority since dependent priorities may have changed. */
4779 priority (insn, true);
4780 }
4781
4782 update_insn_after_change (desc->insn);
4783
4784 if (backtrack_queue != NULL)
4785 {
4786 backtrack_queue->replacement_deps.safe_push (dep);
4787 backtrack_queue->replace_apply.safe_push (0);
4788 }
4789 }
4790 INSN_TICK (next) = tick;
4791 if (TODO_SPEC (next) == DEP_POSTPONED)
4792 return;
4793
4794 if (sd_lists_empty_p (next, SD_LIST_BACK))
4795 TODO_SPEC (next) = 0;
4796 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4797 TODO_SPEC (next) = HARD_DEP;
4798 }
4799
4800 /* Perform pattern replacements that were queued up until the next
4801 cycle. */
4802 static void
4803 perform_replacements_new_cycle (void)
4804 {
4805 int i;
4806 dep_t dep;
4807 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4808 {
4809 int apply_p = next_cycle_apply[i];
4810 if (apply_p)
4811 apply_replacement (dep, true);
4812 else
4813 restore_pattern (dep, true);
4814 }
4815 next_cycle_replace_deps.truncate (0);
4816 next_cycle_apply.truncate (0);
4817 }
4818
4819 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4820 instructions we've previously encountered, a set bit prevents
4821 recursion. BUDGET is a limit on how far ahead we look, it is
4822 reduced on recursive calls. Return true if we produced a good
4823 estimate, or false if we exceeded the budget. */
4824 static bool
4825 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4826 {
4827 sd_iterator_def sd_it;
4828 dep_t dep;
4829 int earliest = INSN_TICK (insn);
4830
4831 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4832 {
4833 rtx_insn *pro = DEP_PRO (dep);
4834 int t;
4835
4836 if (DEP_STATUS (dep) & DEP_CANCELLED)
4837 continue;
4838
4839 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4840 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4841 else
4842 {
4843 int cost = dep_cost (dep);
4844 if (cost >= budget)
4845 return false;
4846 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4847 {
4848 if (!estimate_insn_tick (processed, pro, budget - cost))
4849 return false;
4850 }
4851 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4852 t = INSN_TICK_ESTIMATE (pro) + cost;
4853 if (earliest == INVALID_TICK || t > earliest)
4854 earliest = t;
4855 }
4856 }
4857 bitmap_set_bit (processed, INSN_LUID (insn));
4858 INSN_TICK_ESTIMATE (insn) = earliest;
4859 return true;
4860 }
4861
4862 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4863 infinite resources) the cycle in which the delayed shadow can be issued.
4864 Return the number of cycles that must pass before the real insn can be
4865 issued in order to meet this constraint. */
4866 static int
4867 estimate_shadow_tick (struct delay_pair *p)
4868 {
4869 auto_bitmap processed;
4870 int t;
4871 bool cutoff;
4872
4873 cutoff = !estimate_insn_tick (processed, p->i2,
4874 max_insn_queue_index + pair_delay (p));
4875 if (cutoff)
4876 return max_insn_queue_index;
4877 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4878 if (t > 0)
4879 return t;
4880 return 0;
4881 }
4882
4883 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4884 recursively resolve all its forward dependencies. */
4885 static void
4886 resolve_dependencies (rtx_insn *insn)
4887 {
4888 sd_iterator_def sd_it;
4889 dep_t dep;
4890
4891 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4892 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4893 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4894 return;
4895
4896 if (sched_verbose >= 4)
4897 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4898
4899 if (QUEUE_INDEX (insn) >= 0)
4900 queue_remove (insn);
4901
4902 scheduled_insns.safe_push (insn);
4903
4904 /* Update dependent instructions. */
4905 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4906 sd_iterator_cond (&sd_it, &dep);)
4907 {
4908 rtx_insn *next = DEP_CON (dep);
4909
4910 if (sched_verbose >= 4)
4911 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4912 INSN_UID (next));
4913
4914 /* Resolve the dependence between INSN and NEXT.
4915 sd_resolve_dep () moves current dep to another list thus
4916 advancing the iterator. */
4917 sd_resolve_dep (sd_it);
4918
4919 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4920 {
4921 resolve_dependencies (next);
4922 }
4923 else
4924 /* Check always has only one forward dependence (to the first insn in
4925 the recovery block), therefore, this will be executed only once. */
4926 {
4927 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4928 }
4929 }
4930 }
4931
4932
4933 /* Return the head and tail pointers of ebb starting at BEG and ending
4934 at END. */
4935 void
4936 get_ebb_head_tail (basic_block beg, basic_block end,
4937 rtx_insn **headp, rtx_insn **tailp)
4938 {
4939 rtx_insn *beg_head = BB_HEAD (beg);
4940 rtx_insn * beg_tail = BB_END (beg);
4941 rtx_insn * end_head = BB_HEAD (end);
4942 rtx_insn * end_tail = BB_END (end);
4943
4944 /* Don't include any notes or labels at the beginning of the BEG
4945 basic block, or notes at the end of the END basic blocks. */
4946
4947 if (LABEL_P (beg_head))
4948 beg_head = NEXT_INSN (beg_head);
4949
4950 while (beg_head != beg_tail)
4951 if (NOTE_P (beg_head))
4952 beg_head = NEXT_INSN (beg_head);
4953 else if (DEBUG_INSN_P (beg_head))
4954 {
4955 rtx_insn * note, *next;
4956
4957 for (note = NEXT_INSN (beg_head);
4958 note != beg_tail;
4959 note = next)
4960 {
4961 next = NEXT_INSN (note);
4962 if (NOTE_P (note))
4963 {
4964 if (sched_verbose >= 9)
4965 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4966
4967 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4968
4969 if (BLOCK_FOR_INSN (note) != beg)
4970 df_insn_change_bb (note, beg);
4971 }
4972 else if (!DEBUG_INSN_P (note))
4973 break;
4974 }
4975
4976 break;
4977 }
4978 else
4979 break;
4980
4981 *headp = beg_head;
4982
4983 if (beg == end)
4984 end_head = beg_head;
4985 else if (LABEL_P (end_head))
4986 end_head = NEXT_INSN (end_head);
4987
4988 while (end_head != end_tail)
4989 if (NOTE_P (end_tail))
4990 end_tail = PREV_INSN (end_tail);
4991 else if (DEBUG_INSN_P (end_tail))
4992 {
4993 rtx_insn * note, *prev;
4994
4995 for (note = PREV_INSN (end_tail);
4996 note != end_head;
4997 note = prev)
4998 {
4999 prev = PREV_INSN (note);
5000 if (NOTE_P (note))
5001 {
5002 if (sched_verbose >= 9)
5003 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
5004
5005 reorder_insns_nobb (note, note, end_tail);
5006
5007 if (end_tail == BB_END (end))
5008 BB_END (end) = note;
5009
5010 if (BLOCK_FOR_INSN (note) != end)
5011 df_insn_change_bb (note, end);
5012 }
5013 else if (!DEBUG_INSN_P (note))
5014 break;
5015 }
5016
5017 break;
5018 }
5019 else
5020 break;
5021
5022 *tailp = end_tail;
5023 }
5024
5025 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
5026
5027 int
5028 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
5029 {
5030 while (head != NEXT_INSN (tail))
5031 {
5032 if (!NOTE_P (head) && !LABEL_P (head))
5033 return 0;
5034 head = NEXT_INSN (head);
5035 }
5036 return 1;
5037 }
5038
5039 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
5040 previously found among the insns. Insert them just before HEAD. */
5041 rtx_insn *
5042 restore_other_notes (rtx_insn *head, basic_block head_bb)
5043 {
5044 if (note_list != 0)
5045 {
5046 rtx_insn *note_head = note_list;
5047
5048 if (head)
5049 head_bb = BLOCK_FOR_INSN (head);
5050 else
5051 head = NEXT_INSN (bb_note (head_bb));
5052
5053 while (PREV_INSN (note_head))
5054 {
5055 set_block_for_insn (note_head, head_bb);
5056 note_head = PREV_INSN (note_head);
5057 }
5058 /* In the above cycle we've missed this note. */
5059 set_block_for_insn (note_head, head_bb);
5060
5061 SET_PREV_INSN (note_head) = PREV_INSN (head);
5062 SET_NEXT_INSN (PREV_INSN (head)) = note_head;
5063 SET_PREV_INSN (head) = note_list;
5064 SET_NEXT_INSN (note_list) = head;
5065
5066 if (BLOCK_FOR_INSN (head) != head_bb)
5067 BB_END (head_bb) = note_list;
5068
5069 head = note_head;
5070 }
5071
5072 return head;
5073 }
5074
5075 /* When we know we are going to discard the schedule due to a failed attempt
5076 at modulo scheduling, undo all replacements. */
5077 static void
5078 undo_all_replacements (void)
5079 {
5080 rtx_insn *insn;
5081 int i;
5082
5083 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
5084 {
5085 sd_iterator_def sd_it;
5086 dep_t dep;
5087
5088 /* See if we must undo a replacement. */
5089 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
5090 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
5091 {
5092 struct dep_replacement *desc = DEP_REPLACE (dep);
5093 if (desc != NULL)
5094 validate_change (desc->insn, desc->loc, desc->orig, 0);
5095 }
5096 }
5097 }
5098
5099 /* Return first non-scheduled insn in the current scheduling block.
5100 This is mostly used for debug-counter purposes. */
5101 static rtx_insn *
5102 first_nonscheduled_insn (void)
5103 {
5104 rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
5105 ? nonscheduled_insns_begin
5106 : current_sched_info->prev_head);
5107
5108 do
5109 {
5110 insn = next_nonnote_nondebug_insn (insn);
5111 }
5112 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
5113
5114 return insn;
5115 }
5116
5117 /* Move insns that became ready to fire from queue to ready list. */
5118
5119 static void
5120 queue_to_ready (struct ready_list *ready)
5121 {
5122 rtx_insn *insn;
5123 rtx_insn_list *link;
5124 rtx_insn *skip_insn;
5125
5126 q_ptr = NEXT_Q (q_ptr);
5127
5128 if (dbg_cnt (sched_insn) == false)
5129 /* If debug counter is activated do not requeue the first
5130 nonscheduled insn. */
5131 skip_insn = first_nonscheduled_insn ();
5132 else
5133 skip_insn = NULL;
5134
5135 /* Add all pending insns that can be scheduled without stalls to the
5136 ready list. */
5137 for (link = insn_queue[q_ptr]; link; link = link->next ())
5138 {
5139 insn = link->insn ();
5140 q_size -= 1;
5141
5142 if (sched_verbose >= 2)
5143 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5144 (*current_sched_info->print_insn) (insn, 0));
5145
5146 /* If the ready list is full, delay the insn for 1 cycle.
5147 See the comment in schedule_block for the rationale. */
5148 if (!reload_completed
5149 && (ready->n_ready - ready->n_debug > param_max_sched_ready_insns
5150 || (sched_pressure == SCHED_PRESSURE_MODEL
5151 /* Limit pressure recalculations to
5152 param_max_sched_ready_insns instructions too. */
5153 && model_index (insn) > (model_curr_point
5154 + param_max_sched_ready_insns)))
5155 && !(sched_pressure == SCHED_PRESSURE_MODEL
5156 && model_curr_point < model_num_insns
5157 /* Always allow the next model instruction to issue. */
5158 && model_index (insn) == model_curr_point)
5159 && !SCHED_GROUP_P (insn)
5160 && insn != skip_insn)
5161 {
5162 if (sched_verbose >= 2)
5163 fprintf (sched_dump, "keeping in queue, ready full\n");
5164 queue_insn (insn, 1, "ready full");
5165 }
5166 else
5167 {
5168 ready_add (ready, insn, false);
5169 if (sched_verbose >= 2)
5170 fprintf (sched_dump, "moving to ready without stalls\n");
5171 }
5172 }
5173 free_INSN_LIST_list (&insn_queue[q_ptr]);
5174
5175 /* If there are no ready insns, stall until one is ready and add all
5176 of the pending insns at that point to the ready list. */
5177 if (ready->n_ready == 0)
5178 {
5179 int stalls;
5180
5181 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5182 {
5183 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5184 {
5185 for (; link; link = link->next ())
5186 {
5187 insn = link->insn ();
5188 q_size -= 1;
5189
5190 if (sched_verbose >= 2)
5191 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5192 (*current_sched_info->print_insn) (insn, 0));
5193
5194 ready_add (ready, insn, false);
5195 if (sched_verbose >= 2)
5196 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5197 }
5198 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5199
5200 advance_one_cycle ();
5201
5202 break;
5203 }
5204
5205 advance_one_cycle ();
5206 }
5207
5208 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5209 clock_var += stalls;
5210 if (sched_verbose >= 2)
5211 fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5212 stalls, clock_var);
5213 }
5214 }
5215
5216 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5217 prematurely move INSN from the queue to the ready list. Currently,
5218 if a target defines the hook 'is_costly_dependence', this function
5219 uses the hook to check whether there exist any dependences which are
5220 considered costly by the target, between INSN and other insns that
5221 have already been scheduled. Dependences are checked up to Y cycles
5222 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5223 controlling this value.
5224 (Other considerations could be taken into account instead (or in
5225 addition) depending on user flags and target hooks. */
5226
5227 static bool
5228 ok_for_early_queue_removal (rtx_insn *insn)
5229 {
5230 if (targetm.sched.is_costly_dependence)
5231 {
5232 int n_cycles;
5233 int i = scheduled_insns.length ();
5234 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5235 {
5236 while (i-- > 0)
5237 {
5238 int cost;
5239
5240 rtx_insn *prev_insn = scheduled_insns[i];
5241
5242 if (!NOTE_P (prev_insn))
5243 {
5244 dep_t dep;
5245
5246 dep = sd_find_dep_between (prev_insn, insn, true);
5247
5248 if (dep != NULL)
5249 {
5250 cost = dep_cost (dep);
5251
5252 if (targetm.sched.is_costly_dependence (dep, cost,
5253 flag_sched_stalled_insns_dep - n_cycles))
5254 return false;
5255 }
5256 }
5257
5258 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5259 break;
5260 }
5261
5262 if (i == 0)
5263 break;
5264 }
5265 }
5266
5267 return true;
5268 }
5269
5270
5271 /* Remove insns from the queue, before they become "ready" with respect
5272 to FU latency considerations. */
5273
5274 static int
5275 early_queue_to_ready (state_t state, struct ready_list *ready)
5276 {
5277 rtx_insn *insn;
5278 rtx_insn_list *link;
5279 rtx_insn_list *next_link;
5280 rtx_insn_list *prev_link;
5281 bool move_to_ready;
5282 int cost;
5283 state_t temp_state = alloca (dfa_state_size);
5284 int stalls;
5285 int insns_removed = 0;
5286
5287 /*
5288 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5289 function:
5290
5291 X == 0: There is no limit on how many queued insns can be removed
5292 prematurely. (flag_sched_stalled_insns = -1).
5293
5294 X >= 1: Only X queued insns can be removed prematurely in each
5295 invocation. (flag_sched_stalled_insns = X).
5296
5297 Otherwise: Early queue removal is disabled.
5298 (flag_sched_stalled_insns = 0)
5299 */
5300
5301 if (! flag_sched_stalled_insns)
5302 return 0;
5303
5304 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5305 {
5306 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5307 {
5308 if (sched_verbose > 6)
5309 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5310
5311 prev_link = 0;
5312 while (link)
5313 {
5314 next_link = link->next ();
5315 insn = link->insn ();
5316 if (insn && sched_verbose > 6)
5317 print_rtl_single (sched_dump, insn);
5318
5319 memcpy (temp_state, state, dfa_state_size);
5320 if (recog_memoized (insn) < 0)
5321 /* non-negative to indicate that it's not ready
5322 to avoid infinite Q->R->Q->R... */
5323 cost = 0;
5324 else
5325 cost = state_transition (temp_state, insn);
5326
5327 if (sched_verbose >= 6)
5328 fprintf (sched_dump, "transition cost = %d\n", cost);
5329
5330 move_to_ready = false;
5331 if (cost < 0)
5332 {
5333 move_to_ready = ok_for_early_queue_removal (insn);
5334 if (move_to_ready == true)
5335 {
5336 /* move from Q to R */
5337 q_size -= 1;
5338 ready_add (ready, insn, false);
5339
5340 if (prev_link)
5341 XEXP (prev_link, 1) = next_link;
5342 else
5343 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5344
5345 free_INSN_LIST_node (link);
5346
5347 if (sched_verbose >= 2)
5348 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5349 (*current_sched_info->print_insn) (insn, 0));
5350
5351 insns_removed++;
5352 if (insns_removed == flag_sched_stalled_insns)
5353 /* Remove no more than flag_sched_stalled_insns insns
5354 from Q at a time. */
5355 return insns_removed;
5356 }
5357 }
5358
5359 if (move_to_ready == false)
5360 prev_link = link;
5361
5362 link = next_link;
5363 } /* while link */
5364 } /* if link */
5365
5366 } /* for stalls.. */
5367
5368 return insns_removed;
5369 }
5370
5371
5372 /* Print the ready list for debugging purposes.
5373 If READY_TRY is non-zero then only print insns that max_issue
5374 will consider. */
5375 static void
5376 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5377 {
5378 rtx_insn **p;
5379 int i;
5380
5381 if (ready->n_ready == 0)
5382 {
5383 fprintf (sched_dump, "\n");
5384 return;
5385 }
5386
5387 p = ready_lastpos (ready);
5388 for (i = 0; i < ready->n_ready; i++)
5389 {
5390 if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5391 continue;
5392
5393 fprintf (sched_dump, " %s:%d",
5394 (*current_sched_info->print_insn) (p[i], 0),
5395 INSN_LUID (p[i]));
5396 if (sched_pressure != SCHED_PRESSURE_NONE)
5397 fprintf (sched_dump, "(cost=%d",
5398 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5399 fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5400 if (INSN_TICK (p[i]) > clock_var)
5401 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5402 if (sched_pressure == SCHED_PRESSURE_MODEL)
5403 fprintf (sched_dump, ":idx=%d",
5404 model_index (p[i]));
5405 if (sched_pressure != SCHED_PRESSURE_NONE)
5406 fprintf (sched_dump, ")");
5407 }
5408 fprintf (sched_dump, "\n");
5409 }
5410
5411 /* Print the ready list. Callable from debugger. */
5412 static void
5413 debug_ready_list (struct ready_list *ready)
5414 {
5415 debug_ready_list_1 (ready, NULL);
5416 }
5417
5418 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5419 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5420 replaces the epilogue note in the correct basic block. */
5421 void
5422 reemit_notes (rtx_insn *insn)
5423 {
5424 rtx note;
5425 rtx_insn *last = insn;
5426
5427 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5428 {
5429 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5430 {
5431 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5432
5433 last = emit_note_before (note_type, last);
5434 remove_note (insn, note);
5435 }
5436 }
5437 }
5438
5439 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5440 static void
5441 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5442 {
5443 if (PREV_INSN (insn) != last)
5444 {
5445 basic_block bb;
5446 rtx_insn *note;
5447 int jump_p = 0;
5448
5449 bb = BLOCK_FOR_INSN (insn);
5450
5451 /* BB_HEAD is either LABEL or NOTE. */
5452 gcc_assert (BB_HEAD (bb) != insn);
5453
5454 if (BB_END (bb) == insn)
5455 /* If this is last instruction in BB, move end marker one
5456 instruction up. */
5457 {
5458 /* Jumps are always placed at the end of basic block. */
5459 jump_p = control_flow_insn_p (insn);
5460
5461 gcc_assert (!jump_p
5462 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5463 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5464 || (common_sched_info->sched_pass_id
5465 == SCHED_EBB_PASS));
5466
5467 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5468
5469 BB_END (bb) = PREV_INSN (insn);
5470 }
5471
5472 gcc_assert (BB_END (bb) != last);
5473
5474 if (jump_p)
5475 /* We move the block note along with jump. */
5476 {
5477 gcc_assert (nt);
5478
5479 note = NEXT_INSN (insn);
5480 while (NOTE_NOT_BB_P (note) && note != nt)
5481 note = NEXT_INSN (note);
5482
5483 if (note != nt
5484 && (LABEL_P (note)
5485 || BARRIER_P (note)))
5486 note = NEXT_INSN (note);
5487
5488 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5489 }
5490 else
5491 note = insn;
5492
5493 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5494 SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5495
5496 SET_NEXT_INSN (note) = NEXT_INSN (last);
5497 SET_PREV_INSN (NEXT_INSN (last)) = note;
5498
5499 SET_NEXT_INSN (last) = insn;
5500 SET_PREV_INSN (insn) = last;
5501
5502 bb = BLOCK_FOR_INSN (last);
5503
5504 if (jump_p)
5505 {
5506 fix_jump_move (insn);
5507
5508 if (BLOCK_FOR_INSN (insn) != bb)
5509 move_block_after_check (insn);
5510
5511 gcc_assert (BB_END (bb) == last);
5512 }
5513
5514 df_insn_change_bb (insn, bb);
5515
5516 /* Update BB_END, if needed. */
5517 if (BB_END (bb) == last)
5518 BB_END (bb) = insn;
5519 }
5520
5521 SCHED_GROUP_P (insn) = 0;
5522 }
5523
5524 /* Return true if scheduling INSN will finish current clock cycle. */
5525 static bool
5526 insn_finishes_cycle_p (rtx_insn *insn)
5527 {
5528 if (SCHED_GROUP_P (insn))
5529 /* After issuing INSN, rest of the sched_group will be forced to issue
5530 in order. Don't make any plans for the rest of cycle. */
5531 return true;
5532
5533 /* Finishing the block will, apparently, finish the cycle. */
5534 if (current_sched_info->insn_finishes_block_p
5535 && current_sched_info->insn_finishes_block_p (insn))
5536 return true;
5537
5538 return false;
5539 }
5540
5541 /* Helper for autopref_multipass_init. Given a SET in PAT and whether
5542 we're expecting a memory WRITE or not, check that the insn is relevant to
5543 the autoprefetcher modelling code. Return true iff that is the case.
5544 If it is relevant, record the base register of the memory op in BASE and
5545 the offset in OFFSET. */
5546
5547 static bool
5548 analyze_set_insn_for_autopref (rtx pat, bool write, rtx *base, int *offset)
5549 {
5550 if (GET_CODE (pat) != SET)
5551 return false;
5552
5553 rtx mem = write ? SET_DEST (pat) : SET_SRC (pat);
5554 if (!MEM_P (mem))
5555 return false;
5556
5557 struct address_info info;
5558 decompose_mem_address (&info, mem);
5559
5560 /* TODO: Currently only (base+const) addressing is supported. */
5561 if (info.base == NULL || !REG_P (*info.base)
5562 || (info.disp != NULL && !CONST_INT_P (*info.disp)))
5563 return false;
5564
5565 *base = *info.base;
5566 *offset = info.disp ? INTVAL (*info.disp) : 0;
5567 return true;
5568 }
5569
5570 /* Functions to model cache auto-prefetcher.
5571
5572 Some of the CPUs have cache auto-prefetcher, which /seems/ to initiate
5573 memory prefetches if it sees instructions with consequitive memory accesses
5574 in the instruction stream. Details of such hardware units are not published,
5575 so we can only guess what exactly is going on there.
5576 In the scheduler, we model abstract auto-prefetcher. If there are memory
5577 insns in the ready list (or the queue) that have same memory base, but
5578 different offsets, then we delay the insns with larger offsets until insns
5579 with smaller offsets get scheduled. If PARAM_SCHED_AUTOPREF_QUEUE_DEPTH
5580 is "1", then we look at the ready list; if it is N>1, then we also look
5581 through N-1 queue entries.
5582 If the param is N>=0, then rank_for_schedule will consider auto-prefetching
5583 among its heuristics.
5584 Param value of "-1" disables modelling of the auto-prefetcher. */
5585
5586 /* Initialize autoprefetcher model data for INSN. */
5587 static void
5588 autopref_multipass_init (const rtx_insn *insn, int write)
5589 {
5590 autopref_multipass_data_t data = &INSN_AUTOPREF_MULTIPASS_DATA (insn)[write];
5591
5592 gcc_assert (data->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED);
5593 data->base = NULL_RTX;
5594 data->offset = 0;
5595 /* Set insn entry initialized, but not relevant for auto-prefetcher. */
5596 data->status = AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5597
5598 rtx pat = PATTERN (insn);
5599
5600 /* We have a multi-set insn like a load-multiple or store-multiple.
5601 We care about these as long as all the memory ops inside the PARALLEL
5602 have the same base register. We care about the minimum and maximum
5603 offsets from that base but don't check for the order of those offsets
5604 within the PARALLEL insn itself. */
5605 if (GET_CODE (pat) == PARALLEL)
5606 {
5607 int n_elems = XVECLEN (pat, 0);
5608
5609 int i, offset;
5610 rtx base, prev_base = NULL_RTX;
5611 int min_offset = INT_MAX;
5612
5613 for (i = 0; i < n_elems; i++)
5614 {
5615 rtx set = XVECEXP (pat, 0, i);
5616 if (GET_CODE (set) != SET)
5617 return;
5618
5619 if (!analyze_set_insn_for_autopref (set, write, &base, &offset))
5620 return;
5621
5622 /* Ensure that all memory operations in the PARALLEL use the same
5623 base register. */
5624 if (i > 0 && REGNO (base) != REGNO (prev_base))
5625 return;
5626 prev_base = base;
5627 min_offset = MIN (min_offset, offset);
5628 }
5629
5630 /* If we reached here then we have a valid PARALLEL of multiple memory ops
5631 with prev_base as the base and min_offset containing the offset. */
5632 gcc_assert (prev_base);
5633 data->base = prev_base;
5634 data->offset = min_offset;
5635 data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5636 return;
5637 }
5638
5639 /* Otherwise this is a single set memory operation. */
5640 rtx set = single_set (insn);
5641 if (set == NULL_RTX)
5642 return;
5643
5644 if (!analyze_set_insn_for_autopref (set, write, &data->base,
5645 &data->offset))
5646 return;
5647
5648 /* This insn is relevant for the auto-prefetcher.
5649 The base and offset fields will have been filled in the
5650 analyze_set_insn_for_autopref call above. */
5651 data->status = AUTOPREF_MULTIPASS_DATA_NORMAL;
5652 }
5653
5654 /* Helper function for rank_for_schedule sorting. */
5655 static int
5656 autopref_rank_for_schedule (const rtx_insn *insn1, const rtx_insn *insn2)
5657 {
5658 int r = 0;
5659 for (int write = 0; write < 2 && !r; ++write)
5660 {
5661 autopref_multipass_data_t data1
5662 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5663 autopref_multipass_data_t data2
5664 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5665
5666 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5667 autopref_multipass_init (insn1, write);
5668
5669 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5670 autopref_multipass_init (insn2, write);
5671
5672 int irrel1 = data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5673 int irrel2 = data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT;
5674
5675 if (!irrel1 && !irrel2)
5676 r = data1->offset - data2->offset;
5677 else
5678 r = irrel2 - irrel1;
5679 }
5680
5681 return r;
5682 }
5683
5684 /* True if header of debug dump was printed. */
5685 static bool autopref_multipass_dfa_lookahead_guard_started_dump_p;
5686
5687 /* Helper for autopref_multipass_dfa_lookahead_guard.
5688 Return "1" if INSN1 should be delayed in favor of INSN2. */
5689 static int
5690 autopref_multipass_dfa_lookahead_guard_1 (const rtx_insn *insn1,
5691 const rtx_insn *insn2, int write)
5692 {
5693 autopref_multipass_data_t data1
5694 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5695 autopref_multipass_data_t data2
5696 = &INSN_AUTOPREF_MULTIPASS_DATA (insn2)[write];
5697
5698 if (data2->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5699 autopref_multipass_init (insn2, write);
5700 if (data2->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5701 return 0;
5702
5703 if (rtx_equal_p (data1->base, data2->base)
5704 && data1->offset > data2->offset)
5705 {
5706 if (sched_verbose >= 2)
5707 {
5708 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5709 {
5710 fprintf (sched_dump,
5711 ";;\t\tnot trying in max_issue due to autoprefetch "
5712 "model: ");
5713 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5714 }
5715
5716 fprintf (sched_dump, " %d(%d)", INSN_UID (insn1), INSN_UID (insn2));
5717 }
5718
5719 return 1;
5720 }
5721
5722 return 0;
5723 }
5724
5725 /* General note:
5726
5727 We could have also hooked autoprefetcher model into
5728 first_cycle_multipass_backtrack / first_cycle_multipass_issue hooks
5729 to enable intelligent selection of "[r1+0]=r2; [r1+4]=r3" on the same cycle
5730 (e.g., once "[r1+0]=r2" is issued in max_issue(), "[r1+4]=r3" gets
5731 unblocked). We don't bother about this yet because target of interest
5732 (ARM Cortex-A15) can issue only 1 memory operation per cycle. */
5733
5734 /* Implementation of first_cycle_multipass_dfa_lookahead_guard hook.
5735 Return "1" if INSN1 should not be considered in max_issue due to
5736 auto-prefetcher considerations. */
5737 int
5738 autopref_multipass_dfa_lookahead_guard (rtx_insn *insn1, int ready_index)
5739 {
5740 int r = 0;
5741
5742 /* Exit early if the param forbids this or if we're not entering here through
5743 normal haifa scheduling. This can happen if selective scheduling is
5744 explicitly enabled. */
5745 if (!insn_queue || param_sched_autopref_queue_depth <= 0)
5746 return 0;
5747
5748 if (sched_verbose >= 2 && ready_index == 0)
5749 autopref_multipass_dfa_lookahead_guard_started_dump_p = false;
5750
5751 for (int write = 0; write < 2; ++write)
5752 {
5753 autopref_multipass_data_t data1
5754 = &INSN_AUTOPREF_MULTIPASS_DATA (insn1)[write];
5755
5756 if (data1->status == AUTOPREF_MULTIPASS_DATA_UNINITIALIZED)
5757 autopref_multipass_init (insn1, write);
5758 if (data1->status == AUTOPREF_MULTIPASS_DATA_IRRELEVANT)
5759 continue;
5760
5761 if (ready_index == 0
5762 && data1->status == AUTOPREF_MULTIPASS_DATA_DONT_DELAY)
5763 /* We allow only a single delay on priviledged instructions.
5764 Doing otherwise would cause infinite loop. */
5765 {
5766 if (sched_verbose >= 2)
5767 {
5768 if (!autopref_multipass_dfa_lookahead_guard_started_dump_p)
5769 {
5770 fprintf (sched_dump,
5771 ";;\t\tnot trying in max_issue due to autoprefetch "
5772 "model: ");
5773 autopref_multipass_dfa_lookahead_guard_started_dump_p = true;
5774 }
5775
5776 fprintf (sched_dump, " *%d*", INSN_UID (insn1));
5777 }
5778 continue;
5779 }
5780
5781 for (int i2 = 0; i2 < ready.n_ready; ++i2)
5782 {
5783 rtx_insn *insn2 = get_ready_element (i2);
5784 if (insn1 == insn2)
5785 continue;
5786 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2, write);
5787 if (r)
5788 {
5789 if (ready_index == 0)
5790 {
5791 r = -1;
5792 data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5793 }
5794 goto finish;
5795 }
5796 }
5797
5798 if (param_sched_autopref_queue_depth == 1)
5799 continue;
5800
5801 /* Everything from the current queue slot should have been moved to
5802 the ready list. */
5803 gcc_assert (insn_queue[NEXT_Q_AFTER (q_ptr, 0)] == NULL_RTX);
5804
5805 int n_stalls = param_sched_autopref_queue_depth - 1;
5806 if (n_stalls > max_insn_queue_index)
5807 n_stalls = max_insn_queue_index;
5808
5809 for (int stalls = 1; stalls <= n_stalls; ++stalls)
5810 {
5811 for (rtx_insn_list *link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)];
5812 link != NULL_RTX;
5813 link = link->next ())
5814 {
5815 rtx_insn *insn2 = link->insn ();
5816 r = autopref_multipass_dfa_lookahead_guard_1 (insn1, insn2,
5817 write);
5818 if (r)
5819 {
5820 /* Queue INSN1 until INSN2 can issue. */
5821 r = -stalls;
5822 if (ready_index == 0)
5823 data1->status = AUTOPREF_MULTIPASS_DATA_DONT_DELAY;
5824 goto finish;
5825 }
5826 }
5827 }
5828 }
5829
5830 finish:
5831 if (sched_verbose >= 2
5832 && autopref_multipass_dfa_lookahead_guard_started_dump_p
5833 && (ready_index == ready.n_ready - 1 || r < 0))
5834 /* This does not /always/ trigger. We don't output EOL if the last
5835 insn is not recognized (INSN_CODE < 0) and lookahead_guard is not
5836 called. We can live with this. */
5837 fprintf (sched_dump, "\n");
5838
5839 return r;
5840 }
5841
5842 /* Define type for target data used in multipass scheduling. */
5843 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5844 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5845 #endif
5846 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5847
5848 /* The following structure describe an entry of the stack of choices. */
5849 struct choice_entry
5850 {
5851 /* Ordinal number of the issued insn in the ready queue. */
5852 int index;
5853 /* The number of the rest insns whose issues we should try. */
5854 int rest;
5855 /* The number of issued essential insns. */
5856 int n;
5857 /* State after issuing the insn. */
5858 state_t state;
5859 /* Target-specific data. */
5860 first_cycle_multipass_data_t target_data;
5861 };
5862
5863 /* The following array is used to implement a stack of choices used in
5864 function max_issue. */
5865 static struct choice_entry *choice_stack;
5866
5867 /* This holds the value of the target dfa_lookahead hook. */
5868 int dfa_lookahead;
5869
5870 /* The following variable value is maximal number of tries of issuing
5871 insns for the first cycle multipass insn scheduling. We define
5872 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5873 need this constraint if all real insns (with non-negative codes)
5874 had reservations because in this case the algorithm complexity is
5875 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5876 might be incomplete and such insn might occur. For such
5877 descriptions, the complexity of algorithm (without the constraint)
5878 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5879 static int max_lookahead_tries;
5880
5881 /* The following function returns maximal (or close to maximal) number
5882 of insns which can be issued on the same cycle and one of which
5883 insns is insns with the best rank (the first insn in READY). To
5884 make this function tries different samples of ready insns. READY
5885 is current queue `ready'. Global array READY_TRY reflects what
5886 insns are already issued in this try. The function stops immediately,
5887 if it reached the such a solution, that all instruction can be issued.
5888 INDEX will contain index of the best insn in READY. The following
5889 function is used only for first cycle multipass scheduling.
5890
5891 PRIVILEGED_N >= 0
5892
5893 This function expects recognized insns only. All USEs,
5894 CLOBBERs, etc must be filtered elsewhere. */
5895 int
5896 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5897 bool first_cycle_insn_p, int *index)
5898 {
5899 int n, i, all, n_ready, best, delay, tries_num;
5900 int more_issue;
5901 struct choice_entry *top;
5902 rtx_insn *insn;
5903
5904 if (sched_fusion)
5905 return 0;
5906
5907 n_ready = ready->n_ready;
5908 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5909 && privileged_n <= n_ready);
5910
5911 /* Init MAX_LOOKAHEAD_TRIES. */
5912 if (max_lookahead_tries == 0)
5913 {
5914 max_lookahead_tries = 100;
5915 for (i = 0; i < issue_rate; i++)
5916 max_lookahead_tries *= dfa_lookahead;
5917 }
5918
5919 /* Init max_points. */
5920 more_issue = issue_rate - cycle_issued_insns;
5921 gcc_assert (more_issue >= 0);
5922
5923 /* The number of the issued insns in the best solution. */
5924 best = 0;
5925
5926 top = choice_stack;
5927
5928 /* Set initial state of the search. */
5929 memcpy (top->state, state, dfa_state_size);
5930 top->rest = dfa_lookahead;
5931 top->n = 0;
5932 if (targetm.sched.first_cycle_multipass_begin)
5933 targetm.sched.first_cycle_multipass_begin (&top->target_data,
5934 ready_try, n_ready,
5935 first_cycle_insn_p);
5936
5937 /* Count the number of the insns to search among. */
5938 for (all = i = 0; i < n_ready; i++)
5939 if (!ready_try [i])
5940 all++;
5941
5942 if (sched_verbose >= 2)
5943 {
5944 fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5945 debug_ready_list_1 (ready, ready_try);
5946 }
5947
5948 /* I is the index of the insn to try next. */
5949 i = 0;
5950 tries_num = 0;
5951 for (;;)
5952 {
5953 if (/* If we've reached a dead end or searched enough of what we have
5954 been asked... */
5955 top->rest == 0
5956 /* or have nothing else to try... */
5957 || i >= n_ready
5958 /* or should not issue more. */
5959 || top->n >= more_issue)
5960 {
5961 /* ??? (... || i == n_ready). */
5962 gcc_assert (i <= n_ready);
5963
5964 /* We should not issue more than issue_rate instructions. */
5965 gcc_assert (top->n <= more_issue);
5966
5967 if (top == choice_stack)
5968 break;
5969
5970 if (best < top - choice_stack)
5971 {
5972 if (privileged_n)
5973 {
5974 n = privileged_n;
5975 /* Try to find issued privileged insn. */
5976 while (n && !ready_try[--n])
5977 ;
5978 }
5979
5980 if (/* If all insns are equally good... */
5981 privileged_n == 0
5982 /* Or a privileged insn will be issued. */
5983 || ready_try[n])
5984 /* Then we have a solution. */
5985 {
5986 best = top - choice_stack;
5987 /* This is the index of the insn issued first in this
5988 solution. */
5989 *index = choice_stack [1].index;
5990 if (top->n == more_issue || best == all)
5991 break;
5992 }
5993 }
5994
5995 /* Set ready-list index to point to the last insn
5996 ('i++' below will advance it to the next insn). */
5997 i = top->index;
5998
5999 /* Backtrack. */
6000 ready_try [i] = 0;
6001
6002 if (targetm.sched.first_cycle_multipass_backtrack)
6003 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
6004 ready_try, n_ready);
6005
6006 top--;
6007 memcpy (state, top->state, dfa_state_size);
6008 }
6009 else if (!ready_try [i])
6010 {
6011 tries_num++;
6012 if (tries_num > max_lookahead_tries)
6013 break;
6014 insn = ready_element (ready, i);
6015 delay = state_transition (state, insn);
6016 if (delay < 0)
6017 {
6018 if (state_dead_lock_p (state)
6019 || insn_finishes_cycle_p (insn))
6020 /* We won't issue any more instructions in the next
6021 choice_state. */
6022 top->rest = 0;
6023 else
6024 top->rest--;
6025
6026 n = top->n;
6027 if (memcmp (top->state, state, dfa_state_size) != 0)
6028 n++;
6029
6030 /* Advance to the next choice_entry. */
6031 top++;
6032 /* Initialize it. */
6033 top->rest = dfa_lookahead;
6034 top->index = i;
6035 top->n = n;
6036 memcpy (top->state, state, dfa_state_size);
6037 ready_try [i] = 1;
6038
6039 if (targetm.sched.first_cycle_multipass_issue)
6040 targetm.sched.first_cycle_multipass_issue (&top->target_data,
6041 ready_try, n_ready,
6042 insn,
6043 &((top - 1)
6044 ->target_data));
6045
6046 i = -1;
6047 }
6048 }
6049
6050 /* Increase ready-list index. */
6051 i++;
6052 }
6053
6054 if (targetm.sched.first_cycle_multipass_end)
6055 targetm.sched.first_cycle_multipass_end (best != 0
6056 ? &choice_stack[1].target_data
6057 : NULL);
6058
6059 /* Restore the original state of the DFA. */
6060 memcpy (state, choice_stack->state, dfa_state_size);
6061
6062 return best;
6063 }
6064
6065 /* The following function chooses insn from READY and modifies
6066 READY. The following function is used only for first
6067 cycle multipass scheduling.
6068 Return:
6069 -1 if cycle should be advanced,
6070 0 if INSN_PTR is set to point to the desirable insn,
6071 1 if choose_ready () should be restarted without advancing the cycle. */
6072 static int
6073 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
6074 rtx_insn **insn_ptr)
6075 {
6076 if (dbg_cnt (sched_insn) == false)
6077 {
6078 if (nonscheduled_insns_begin == NULL_RTX)
6079 nonscheduled_insns_begin = current_sched_info->prev_head;
6080
6081 rtx_insn *insn = first_nonscheduled_insn ();
6082
6083 if (QUEUE_INDEX (insn) == QUEUE_READY)
6084 /* INSN is in the ready_list. */
6085 {
6086 ready_remove_insn (insn);
6087 *insn_ptr = insn;
6088 return 0;
6089 }
6090
6091 /* INSN is in the queue. Advance cycle to move it to the ready list. */
6092 gcc_assert (QUEUE_INDEX (insn) >= 0);
6093 return -1;
6094 }
6095
6096 if (dfa_lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
6097 || DEBUG_INSN_P (ready_element (ready, 0)))
6098 {
6099 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6100 *insn_ptr = ready_remove_first_dispatch (ready);
6101 else
6102 *insn_ptr = ready_remove_first (ready);
6103
6104 return 0;
6105 }
6106 else
6107 {
6108 /* Try to choose the best insn. */
6109 int index = 0, i;
6110 rtx_insn *insn;
6111
6112 insn = ready_element (ready, 0);
6113 if (INSN_CODE (insn) < 0)
6114 {
6115 *insn_ptr = ready_remove_first (ready);
6116 return 0;
6117 }
6118
6119 /* Filter the search space. */
6120 for (i = 0; i < ready->n_ready; i++)
6121 {
6122 ready_try[i] = 0;
6123
6124 insn = ready_element (ready, i);
6125
6126 /* If this insn is recognizable we should have already
6127 recognized it earlier.
6128 ??? Not very clear where this is supposed to be done.
6129 See dep_cost_1. */
6130 gcc_checking_assert (INSN_CODE (insn) >= 0
6131 || recog_memoized (insn) < 0);
6132 if (INSN_CODE (insn) < 0)
6133 {
6134 /* Non-recognized insns at position 0 are handled above. */
6135 gcc_assert (i > 0);
6136 ready_try[i] = 1;
6137 continue;
6138 }
6139
6140 if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
6141 {
6142 ready_try[i]
6143 = (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
6144 (insn, i));
6145
6146 if (ready_try[i] < 0)
6147 /* Queue instruction for several cycles.
6148 We need to restart choose_ready as we have changed
6149 the ready list. */
6150 {
6151 change_queue_index (insn, -ready_try[i]);
6152 return 1;
6153 }
6154
6155 /* Make sure that we didn't end up with 0'th insn filtered out.
6156 Don't be tempted to make life easier for backends and just
6157 requeue 0'th insn if (ready_try[0] == 0) and restart
6158 choose_ready. Backends should be very considerate about
6159 requeueing instructions -- especially the highest priority
6160 one at position 0. */
6161 gcc_assert (ready_try[i] == 0 || i > 0);
6162 if (ready_try[i])
6163 continue;
6164 }
6165
6166 gcc_assert (ready_try[i] == 0);
6167 /* INSN made it through the scrutiny of filters! */
6168 }
6169
6170 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
6171 {
6172 *insn_ptr = ready_remove_first (ready);
6173 if (sched_verbose >= 4)
6174 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
6175 (*current_sched_info->print_insn) (*insn_ptr, 0));
6176 return 0;
6177 }
6178 else
6179 {
6180 if (sched_verbose >= 4)
6181 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
6182 (*current_sched_info->print_insn)
6183 (ready_element (ready, index), 0));
6184
6185 *insn_ptr = ready_remove (ready, index);
6186 return 0;
6187 }
6188 }
6189 }
6190
6191 /* This function is called when we have successfully scheduled a
6192 block. It uses the schedule stored in the scheduled_insns vector
6193 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
6194 append the scheduled insns; TAIL is the insn after the scheduled
6195 block. TARGET_BB is the argument passed to schedule_block. */
6196
6197 static void
6198 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
6199 {
6200 unsigned int i;
6201 rtx_insn *insn;
6202
6203 last_scheduled_insn = prev_head;
6204 for (i = 0;
6205 scheduled_insns.iterate (i, &insn);
6206 i++)
6207 {
6208 if (control_flow_insn_p (last_scheduled_insn)
6209 || current_sched_info->advance_target_bb (*target_bb, insn))
6210 {
6211 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
6212
6213 if (sched_verbose)
6214 {
6215 rtx_insn *x;
6216
6217 x = next_real_insn (last_scheduled_insn);
6218 gcc_assert (x);
6219 dump_new_block_header (1, *target_bb, x, tail);
6220 }
6221
6222 last_scheduled_insn = bb_note (*target_bb);
6223 }
6224
6225 if (current_sched_info->begin_move_insn)
6226 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
6227 move_insn (insn, last_scheduled_insn,
6228 current_sched_info->next_tail);
6229 if (!DEBUG_INSN_P (insn))
6230 reemit_notes (insn);
6231 last_scheduled_insn = insn;
6232 }
6233
6234 scheduled_insns.truncate (0);
6235 }
6236
6237 /* Examine all insns on the ready list and queue those which can't be
6238 issued in this cycle. TEMP_STATE is temporary scheduler state we
6239 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
6240 have been issued for the current cycle, which means it is valid to
6241 issue an asm statement.
6242
6243 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
6244 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
6245 we only leave insns which have an INSN_EXACT_TICK. */
6246
6247 static void
6248 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
6249 bool shadows_only_p, bool modulo_epilogue_p)
6250 {
6251 int i, pass;
6252 bool sched_group_found = false;
6253 int min_cost_group = 0;
6254
6255 if (sched_fusion)
6256 return;
6257
6258 for (i = 0; i < ready.n_ready; i++)
6259 {
6260 rtx_insn *insn = ready_element (&ready, i);
6261 if (SCHED_GROUP_P (insn))
6262 {
6263 sched_group_found = true;
6264 break;
6265 }
6266 }
6267
6268 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
6269 such an insn first and note its cost. If at least one SCHED_GROUP_P insn
6270 gets queued, then all other insns get queued for one cycle later. */
6271 for (pass = sched_group_found ? 0 : 1; pass < 2; )
6272 {
6273 int n = ready.n_ready;
6274 for (i = 0; i < n; i++)
6275 {
6276 rtx_insn *insn = ready_element (&ready, i);
6277 int cost = 0;
6278 const char *reason = "resource conflict";
6279
6280 if (DEBUG_INSN_P (insn))
6281 continue;
6282
6283 if (sched_group_found && !SCHED_GROUP_P (insn)
6284 && ((pass == 0) || (min_cost_group >= 1)))
6285 {
6286 if (pass == 0)
6287 continue;
6288 cost = min_cost_group;
6289 reason = "not in sched group";
6290 }
6291 else if (modulo_epilogue_p
6292 && INSN_EXACT_TICK (insn) == INVALID_TICK)
6293 {
6294 cost = max_insn_queue_index;
6295 reason = "not an epilogue insn";
6296 }
6297 else if (shadows_only_p && !SHADOW_P (insn))
6298 {
6299 cost = 1;
6300 reason = "not a shadow";
6301 }
6302 else if (recog_memoized (insn) < 0)
6303 {
6304 if (!first_cycle_insn_p
6305 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
6306 || asm_noperands (PATTERN (insn)) >= 0))
6307 cost = 1;
6308 reason = "asm";
6309 }
6310 else if (sched_pressure != SCHED_PRESSURE_NONE)
6311 {
6312 if (sched_pressure == SCHED_PRESSURE_MODEL
6313 && INSN_TICK (insn) <= clock_var)
6314 {
6315 memcpy (temp_state, curr_state, dfa_state_size);
6316 if (state_transition (temp_state, insn) >= 0)
6317 INSN_TICK (insn) = clock_var + 1;
6318 }
6319 cost = 0;
6320 }
6321 else
6322 {
6323 int delay_cost = 0;
6324
6325 if (delay_htab)
6326 {
6327 struct delay_pair *delay_entry;
6328 delay_entry
6329 = delay_htab->find_with_hash (insn,
6330 htab_hash_pointer (insn));
6331 while (delay_entry && delay_cost == 0)
6332 {
6333 delay_cost = estimate_shadow_tick (delay_entry);
6334 if (delay_cost > max_insn_queue_index)
6335 delay_cost = max_insn_queue_index;
6336 delay_entry = delay_entry->next_same_i1;
6337 }
6338 }
6339
6340 memcpy (temp_state, curr_state, dfa_state_size);
6341 cost = state_transition (temp_state, insn);
6342 if (cost < 0)
6343 cost = 0;
6344 else if (cost == 0)
6345 cost = 1;
6346 if (cost < delay_cost)
6347 {
6348 cost = delay_cost;
6349 reason = "shadow tick";
6350 }
6351 }
6352 if (cost >= 1)
6353 {
6354 if (SCHED_GROUP_P (insn) && cost > min_cost_group)
6355 min_cost_group = cost;
6356 ready_remove (&ready, i);
6357 /* Normally we'd want to queue INSN for COST cycles. However,
6358 if SCHED_GROUP_P is set, then we must ensure that nothing
6359 else comes between INSN and its predecessor. If there is
6360 some other insn ready to fire on the next cycle, then that
6361 invariant would be broken.
6362
6363 So when SCHED_GROUP_P is set, just queue this insn for a
6364 single cycle. */
6365 queue_insn (insn, SCHED_GROUP_P (insn) ? 1 : cost, reason);
6366 if (i + 1 < n)
6367 break;
6368 }
6369 }
6370 if (i == n)
6371 pass++;
6372 }
6373 }
6374
6375 /* Called when we detect that the schedule is impossible. We examine the
6376 backtrack queue to find the earliest insn that caused this condition. */
6377
6378 static struct haifa_saved_data *
6379 verify_shadows (void)
6380 {
6381 struct haifa_saved_data *save, *earliest_fail = NULL;
6382 for (save = backtrack_queue; save; save = save->next)
6383 {
6384 int t;
6385 struct delay_pair *pair = save->delay_pair;
6386 rtx_insn *i1 = pair->i1;
6387
6388 for (; pair; pair = pair->next_same_i1)
6389 {
6390 rtx_insn *i2 = pair->i2;
6391
6392 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
6393 continue;
6394
6395 t = INSN_TICK (i1) + pair_delay (pair);
6396 if (t < clock_var)
6397 {
6398 if (sched_verbose >= 2)
6399 fprintf (sched_dump,
6400 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
6401 ", not ready\n",
6402 INSN_UID (pair->i1), INSN_UID (pair->i2),
6403 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6404 earliest_fail = save;
6405 break;
6406 }
6407 if (QUEUE_INDEX (i2) >= 0)
6408 {
6409 int queued_for = INSN_TICK (i2);
6410
6411 if (t < queued_for)
6412 {
6413 if (sched_verbose >= 2)
6414 fprintf (sched_dump,
6415 ";;\t\tfailed delay requirements for %d/%d"
6416 " (%d->%d), queued too late\n",
6417 INSN_UID (pair->i1), INSN_UID (pair->i2),
6418 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
6419 earliest_fail = save;
6420 break;
6421 }
6422 }
6423 }
6424 }
6425
6426 return earliest_fail;
6427 }
6428
6429 /* Print instructions together with useful scheduling information between
6430 HEAD and TAIL (inclusive). */
6431 static void
6432 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
6433 {
6434 fprintf (sched_dump, ";;\t| insn | prio |\n");
6435
6436 rtx_insn *next_tail = NEXT_INSN (tail);
6437 for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6438 {
6439 int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6440 const char *pattern = (NOTE_P (insn)
6441 ? "note"
6442 : str_pattern_slim (PATTERN (insn)));
6443
6444 fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6445 INSN_UID (insn), priority, pattern);
6446
6447 if (sched_verbose >= 4)
6448 {
6449 if (NOTE_P (insn) || LABEL_P (insn) || recog_memoized (insn) < 0)
6450 fprintf (sched_dump, "nothing");
6451 else
6452 print_reservation (sched_dump, insn);
6453 }
6454 fprintf (sched_dump, "\n");
6455 }
6456 }
6457
6458 /* Use forward list scheduling to rearrange insns of block pointed to by
6459 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6460 region. */
6461
6462 bool
6463 schedule_block (basic_block *target_bb, state_t init_state)
6464 {
6465 int i;
6466 bool success = modulo_ii == 0;
6467 struct sched_block_state ls;
6468 state_t temp_state = NULL; /* It is used for multipass scheduling. */
6469 int sort_p, advance, start_clock_var;
6470
6471 /* Head/tail info for this block. */
6472 rtx_insn *prev_head = current_sched_info->prev_head;
6473 rtx_insn *next_tail = current_sched_info->next_tail;
6474 rtx_insn *head = NEXT_INSN (prev_head);
6475 rtx_insn *tail = PREV_INSN (next_tail);
6476
6477 if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6478 && sched_pressure != SCHED_PRESSURE_MODEL && !sched_fusion)
6479 find_modifiable_mems (head, tail);
6480
6481 /* We used to have code to avoid getting parameters moved from hard
6482 argument registers into pseudos.
6483
6484 However, it was removed when it proved to be of marginal benefit
6485 and caused problems because schedule_block and compute_forward_dependences
6486 had different notions of what the "head" insn was. */
6487
6488 gcc_assert (head != tail || INSN_P (head));
6489
6490 haifa_recovery_bb_recently_added_p = false;
6491
6492 backtrack_queue = NULL;
6493
6494 /* Debug info. */
6495 if (sched_verbose)
6496 {
6497 dump_new_block_header (0, *target_bb, head, tail);
6498
6499 if (sched_verbose >= 2)
6500 {
6501 dump_insn_stream (head, tail);
6502 memset (&rank_for_schedule_stats, 0,
6503 sizeof (rank_for_schedule_stats));
6504 }
6505 }
6506
6507 if (init_state == NULL)
6508 state_reset (curr_state);
6509 else
6510 memcpy (curr_state, init_state, dfa_state_size);
6511
6512 /* Clear the ready list. */
6513 ready.first = ready.veclen - 1;
6514 ready.n_ready = 0;
6515 ready.n_debug = 0;
6516
6517 /* It is used for first cycle multipass scheduling. */
6518 temp_state = alloca (dfa_state_size);
6519
6520 if (targetm.sched.init)
6521 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6522
6523 /* We start inserting insns after PREV_HEAD. */
6524 last_scheduled_insn = prev_head;
6525 last_nondebug_scheduled_insn = NULL;
6526 nonscheduled_insns_begin = NULL;
6527
6528 gcc_assert ((NOTE_P (last_scheduled_insn)
6529 || DEBUG_INSN_P (last_scheduled_insn))
6530 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6531
6532 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6533 queue. */
6534 q_ptr = 0;
6535 q_size = 0;
6536
6537 insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6538 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6539
6540 /* Start just before the beginning of time. */
6541 clock_var = -1;
6542
6543 /* We need queue and ready lists and clock_var be initialized
6544 in try_ready () (which is called through init_ready_list ()). */
6545 (*current_sched_info->init_ready_list) ();
6546
6547 if (sched_pressure)
6548 sched_pressure_start_bb (*target_bb);
6549
6550 /* The algorithm is O(n^2) in the number of ready insns at any given
6551 time in the worst case. Before reload we are more likely to have
6552 big lists so truncate them to a reasonable size. */
6553 if (!reload_completed
6554 && ready.n_ready - ready.n_debug > param_max_sched_ready_insns)
6555 {
6556 ready_sort_debug (&ready);
6557 ready_sort_real (&ready);
6558
6559 /* Find first free-standing insn past param_max_sched_ready_insns.
6560 If there are debug insns, we know they're first. */
6561 for (i = param_max_sched_ready_insns + ready.n_debug; i < ready.n_ready;
6562 i++)
6563 if (!SCHED_GROUP_P (ready_element (&ready, i)))
6564 break;
6565
6566 if (sched_verbose >= 2)
6567 {
6568 fprintf (sched_dump,
6569 ";;\t\tReady list on entry: %d insns: ", ready.n_ready);
6570 debug_ready_list (&ready);
6571 fprintf (sched_dump,
6572 ";;\t\t before reload => truncated to %d insns\n", i);
6573 }
6574
6575 /* Delay all insns past it for 1 cycle. If debug counter is
6576 activated make an exception for the insn right after
6577 nonscheduled_insns_begin. */
6578 {
6579 rtx_insn *skip_insn;
6580
6581 if (dbg_cnt (sched_insn) == false)
6582 skip_insn = first_nonscheduled_insn ();
6583 else
6584 skip_insn = NULL;
6585
6586 while (i < ready.n_ready)
6587 {
6588 rtx_insn *insn;
6589
6590 insn = ready_remove (&ready, i);
6591
6592 if (insn != skip_insn)
6593 queue_insn (insn, 1, "list truncated");
6594 }
6595 if (skip_insn)
6596 ready_add (&ready, skip_insn, true);
6597 }
6598 }
6599
6600 /* Now we can restore basic block notes and maintain precise cfg. */
6601 restore_bb_notes (*target_bb);
6602
6603 last_clock_var = -1;
6604
6605 advance = 0;
6606
6607 gcc_assert (scheduled_insns.length () == 0);
6608 sort_p = TRUE;
6609 must_backtrack = false;
6610 modulo_insns_scheduled = 0;
6611
6612 ls.modulo_epilogue = false;
6613 ls.first_cycle_insn_p = true;
6614
6615 /* Loop until all the insns in BB are scheduled. */
6616 while ((*current_sched_info->schedule_more_p) ())
6617 {
6618 perform_replacements_new_cycle ();
6619 do
6620 {
6621 start_clock_var = clock_var;
6622
6623 clock_var++;
6624
6625 advance_one_cycle ();
6626
6627 /* Add to the ready list all pending insns that can be issued now.
6628 If there are no ready insns, increment clock until one
6629 is ready and add all pending insns at that point to the ready
6630 list. */
6631 queue_to_ready (&ready);
6632
6633 gcc_assert (ready.n_ready);
6634
6635 if (sched_verbose >= 2)
6636 {
6637 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6638 debug_ready_list (&ready);
6639 }
6640 advance -= clock_var - start_clock_var;
6641 }
6642 while (advance > 0);
6643
6644 if (ls.modulo_epilogue)
6645 {
6646 int stage = clock_var / modulo_ii;
6647 if (stage > modulo_last_stage * 2 + 2)
6648 {
6649 if (sched_verbose >= 2)
6650 fprintf (sched_dump,
6651 ";;\t\tmodulo scheduled succeeded at II %d\n",
6652 modulo_ii);
6653 success = true;
6654 goto end_schedule;
6655 }
6656 }
6657 else if (modulo_ii > 0)
6658 {
6659 int stage = clock_var / modulo_ii;
6660 if (stage > modulo_max_stages)
6661 {
6662 if (sched_verbose >= 2)
6663 fprintf (sched_dump,
6664 ";;\t\tfailing schedule due to excessive stages\n");
6665 goto end_schedule;
6666 }
6667 if (modulo_n_insns == modulo_insns_scheduled
6668 && stage > modulo_last_stage)
6669 {
6670 if (sched_verbose >= 2)
6671 fprintf (sched_dump,
6672 ";;\t\tfound kernel after %d stages, II %d\n",
6673 stage, modulo_ii);
6674 ls.modulo_epilogue = true;
6675 }
6676 }
6677
6678 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6679 if (ready.n_ready == 0)
6680 continue;
6681 if (must_backtrack)
6682 goto do_backtrack;
6683
6684 ls.shadows_only_p = false;
6685 cycle_issued_insns = 0;
6686 ls.can_issue_more = issue_rate;
6687 for (;;)
6688 {
6689 rtx_insn *insn;
6690 int cost;
6691 bool asm_p;
6692
6693 if (sort_p && ready.n_ready > 0)
6694 {
6695 /* Sort the ready list based on priority. This must be
6696 done every iteration through the loop, as schedule_insn
6697 may have readied additional insns that will not be
6698 sorted correctly. */
6699 ready_sort (&ready);
6700
6701 if (sched_verbose >= 2)
6702 {
6703 fprintf (sched_dump,
6704 ";;\t\tReady list after ready_sort: ");
6705 debug_ready_list (&ready);
6706 }
6707 }
6708
6709 /* We don't want md sched reorder to even see debug isns, so put
6710 them out right away. */
6711 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6712 && (*current_sched_info->schedule_more_p) ())
6713 {
6714 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6715 {
6716 rtx_insn *insn = ready_remove_first (&ready);
6717 gcc_assert (DEBUG_INSN_P (insn));
6718 (*current_sched_info->begin_schedule_ready) (insn);
6719 scheduled_insns.safe_push (insn);
6720 last_scheduled_insn = insn;
6721 advance = schedule_insn (insn);
6722 gcc_assert (advance == 0);
6723 if (ready.n_ready > 0)
6724 ready_sort (&ready);
6725 }
6726 }
6727
6728 if (ls.first_cycle_insn_p && !ready.n_ready)
6729 break;
6730
6731 resume_after_backtrack:
6732 /* Allow the target to reorder the list, typically for
6733 better instruction bundling. */
6734 if (sort_p
6735 && (ready.n_ready == 0
6736 || !SCHED_GROUP_P (ready_element (&ready, 0))))
6737 {
6738 if (ls.first_cycle_insn_p && targetm.sched.reorder)
6739 ls.can_issue_more
6740 = targetm.sched.reorder (sched_dump, sched_verbose,
6741 ready_lastpos (&ready),
6742 &ready.n_ready, clock_var);
6743 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6744 ls.can_issue_more
6745 = targetm.sched.reorder2 (sched_dump, sched_verbose,
6746 ready.n_ready
6747 ? ready_lastpos (&ready) : NULL,
6748 &ready.n_ready, clock_var);
6749 }
6750
6751 restart_choose_ready:
6752 if (sched_verbose >= 2)
6753 {
6754 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
6755 clock_var);
6756 debug_ready_list (&ready);
6757 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6758 print_curr_reg_pressure ();
6759 }
6760
6761 if (ready.n_ready == 0
6762 && ls.can_issue_more
6763 && reload_completed)
6764 {
6765 /* Allow scheduling insns directly from the queue in case
6766 there's nothing better to do (ready list is empty) but
6767 there are still vacant dispatch slots in the current cycle. */
6768 if (sched_verbose >= 6)
6769 fprintf (sched_dump,";;\t\tSecond chance\n");
6770 memcpy (temp_state, curr_state, dfa_state_size);
6771 if (early_queue_to_ready (temp_state, &ready))
6772 ready_sort (&ready);
6773 }
6774
6775 if (ready.n_ready == 0
6776 || !ls.can_issue_more
6777 || state_dead_lock_p (curr_state)
6778 || !(*current_sched_info->schedule_more_p) ())
6779 break;
6780
6781 /* Select and remove the insn from the ready list. */
6782 if (sort_p)
6783 {
6784 int res;
6785
6786 insn = NULL;
6787 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6788
6789 if (res < 0)
6790 /* Finish cycle. */
6791 break;
6792 if (res > 0)
6793 goto restart_choose_ready;
6794
6795 gcc_assert (insn != NULL_RTX);
6796 }
6797 else
6798 insn = ready_remove_first (&ready);
6799
6800 if (sched_pressure != SCHED_PRESSURE_NONE
6801 && INSN_TICK (insn) > clock_var)
6802 {
6803 ready_add (&ready, insn, true);
6804 advance = 1;
6805 break;
6806 }
6807
6808 if (targetm.sched.dfa_new_cycle
6809 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6810 insn, last_clock_var,
6811 clock_var, &sort_p))
6812 /* SORT_P is used by the target to override sorting
6813 of the ready list. This is needed when the target
6814 has modified its internal structures expecting that
6815 the insn will be issued next. As we need the insn
6816 to have the highest priority (so it will be returned by
6817 the ready_remove_first call above), we invoke
6818 ready_add (&ready, insn, true).
6819 But, still, there is one issue: INSN can be later
6820 discarded by scheduler's front end through
6821 current_sched_info->can_schedule_ready_p, hence, won't
6822 be issued next. */
6823 {
6824 ready_add (&ready, insn, true);
6825 break;
6826 }
6827
6828 sort_p = TRUE;
6829
6830 if (current_sched_info->can_schedule_ready_p
6831 && ! (*current_sched_info->can_schedule_ready_p) (insn))
6832 /* We normally get here only if we don't want to move
6833 insn from the split block. */
6834 {
6835 TODO_SPEC (insn) = DEP_POSTPONED;
6836 goto restart_choose_ready;
6837 }
6838
6839 if (delay_htab)
6840 {
6841 /* If this insn is the first part of a delay-slot pair, record a
6842 backtrack point. */
6843 struct delay_pair *delay_entry;
6844 delay_entry
6845 = delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6846 if (delay_entry)
6847 {
6848 save_backtrack_point (delay_entry, ls);
6849 if (sched_verbose >= 2)
6850 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6851 }
6852 }
6853
6854 /* DECISION is made. */
6855
6856 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6857 {
6858 modulo_insns_scheduled++;
6859 modulo_last_stage = clock_var / modulo_ii;
6860 }
6861 if (TODO_SPEC (insn) & SPECULATIVE)
6862 generate_recovery_code (insn);
6863
6864 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6865 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6866
6867 /* Update counters, etc in the scheduler's front end. */
6868 (*current_sched_info->begin_schedule_ready) (insn);
6869 scheduled_insns.safe_push (insn);
6870 gcc_assert (NONDEBUG_INSN_P (insn));
6871 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6872
6873 if (recog_memoized (insn) >= 0)
6874 {
6875 memcpy (temp_state, curr_state, dfa_state_size);
6876 cost = state_transition (curr_state, insn);
6877 if (sched_pressure != SCHED_PRESSURE_WEIGHTED && !sched_fusion)
6878 gcc_assert (cost < 0);
6879 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6880 cycle_issued_insns++;
6881 asm_p = false;
6882 }
6883 else
6884 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6885 || asm_noperands (PATTERN (insn)) >= 0);
6886
6887 if (targetm.sched.variable_issue)
6888 ls.can_issue_more =
6889 targetm.sched.variable_issue (sched_dump, sched_verbose,
6890 insn, ls.can_issue_more);
6891 /* A naked CLOBBER or USE generates no instruction, so do
6892 not count them against the issue rate. */
6893 else if (GET_CODE (PATTERN (insn)) != USE
6894 && GET_CODE (PATTERN (insn)) != CLOBBER)
6895 ls.can_issue_more--;
6896 advance = schedule_insn (insn);
6897
6898 if (SHADOW_P (insn))
6899 ls.shadows_only_p = true;
6900
6901 /* After issuing an asm insn we should start a new cycle. */
6902 if (advance == 0 && asm_p)
6903 advance = 1;
6904
6905 if (must_backtrack)
6906 break;
6907
6908 if (advance != 0)
6909 break;
6910
6911 ls.first_cycle_insn_p = false;
6912 if (ready.n_ready > 0)
6913 prune_ready_list (temp_state, false, ls.shadows_only_p,
6914 ls.modulo_epilogue);
6915 }
6916
6917 do_backtrack:
6918 if (!must_backtrack)
6919 for (i = 0; i < ready.n_ready; i++)
6920 {
6921 rtx_insn *insn = ready_element (&ready, i);
6922 if (INSN_EXACT_TICK (insn) == clock_var)
6923 {
6924 must_backtrack = true;
6925 clock_var++;
6926 break;
6927 }
6928 }
6929 if (must_backtrack && modulo_ii > 0)
6930 {
6931 if (modulo_backtracks_left == 0)
6932 goto end_schedule;
6933 modulo_backtracks_left--;
6934 }
6935 while (must_backtrack)
6936 {
6937 struct haifa_saved_data *failed;
6938 rtx_insn *failed_insn;
6939
6940 must_backtrack = false;
6941 failed = verify_shadows ();
6942 gcc_assert (failed);
6943
6944 failed_insn = failed->delay_pair->i1;
6945 /* Clear these queues. */
6946 perform_replacements_new_cycle ();
6947 toggle_cancelled_flags (false);
6948 unschedule_insns_until (failed_insn);
6949 while (failed != backtrack_queue)
6950 free_topmost_backtrack_point (true);
6951 restore_last_backtrack_point (&ls);
6952 if (sched_verbose >= 2)
6953 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6954 /* Delay by at least a cycle. This could cause additional
6955 backtracking. */
6956 queue_insn (failed_insn, 1, "backtracked");
6957 advance = 0;
6958 if (must_backtrack)
6959 continue;
6960 if (ready.n_ready > 0)
6961 goto resume_after_backtrack;
6962 else
6963 {
6964 if (clock_var == 0 && ls.first_cycle_insn_p)
6965 goto end_schedule;
6966 advance = 1;
6967 break;
6968 }
6969 }
6970 ls.first_cycle_insn_p = true;
6971 }
6972 if (ls.modulo_epilogue)
6973 success = true;
6974 end_schedule:
6975 if (!ls.first_cycle_insn_p || advance)
6976 advance_one_cycle ();
6977 perform_replacements_new_cycle ();
6978 if (modulo_ii > 0)
6979 {
6980 /* Once again, debug insn suckiness: they can be on the ready list
6981 even if they have unresolved dependencies. To make our view
6982 of the world consistent, remove such "ready" insns. */
6983 restart_debug_insn_loop:
6984 for (i = ready.n_ready - 1; i >= 0; i--)
6985 {
6986 rtx_insn *x;
6987
6988 x = ready_element (&ready, i);
6989 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6990 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6991 {
6992 ready_remove (&ready, i);
6993 goto restart_debug_insn_loop;
6994 }
6995 }
6996 for (i = ready.n_ready - 1; i >= 0; i--)
6997 {
6998 rtx_insn *x;
6999
7000 x = ready_element (&ready, i);
7001 resolve_dependencies (x);
7002 }
7003 for (i = 0; i <= max_insn_queue_index; i++)
7004 {
7005 rtx_insn_list *link;
7006 while ((link = insn_queue[i]) != NULL)
7007 {
7008 rtx_insn *x = link->insn ();
7009 insn_queue[i] = link->next ();
7010 QUEUE_INDEX (x) = QUEUE_NOWHERE;
7011 free_INSN_LIST_node (link);
7012 resolve_dependencies (x);
7013 }
7014 }
7015 }
7016
7017 if (!success)
7018 undo_all_replacements ();
7019
7020 /* Debug info. */
7021 if (sched_verbose)
7022 {
7023 fprintf (sched_dump, ";;\tReady list (final): ");
7024 debug_ready_list (&ready);
7025 }
7026
7027 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
7028 /* Sanity check -- queue must be empty now. Meaningless if region has
7029 multiple bbs. */
7030 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
7031 else if (modulo_ii == 0)
7032 {
7033 /* We must maintain QUEUE_INDEX between blocks in region. */
7034 for (i = ready.n_ready - 1; i >= 0; i--)
7035 {
7036 rtx_insn *x;
7037
7038 x = ready_element (&ready, i);
7039 QUEUE_INDEX (x) = QUEUE_NOWHERE;
7040 TODO_SPEC (x) = HARD_DEP;
7041 }
7042
7043 if (q_size)
7044 for (i = 0; i <= max_insn_queue_index; i++)
7045 {
7046 rtx_insn_list *link;
7047 for (link = insn_queue[i]; link; link = link->next ())
7048 {
7049 rtx_insn *x;
7050
7051 x = link->insn ();
7052 QUEUE_INDEX (x) = QUEUE_NOWHERE;
7053 TODO_SPEC (x) = HARD_DEP;
7054 }
7055 free_INSN_LIST_list (&insn_queue[i]);
7056 }
7057 }
7058
7059 if (sched_pressure == SCHED_PRESSURE_MODEL)
7060 model_end_schedule ();
7061
7062 if (success)
7063 {
7064 commit_schedule (prev_head, tail, target_bb);
7065 if (sched_verbose)
7066 fprintf (sched_dump, ";; total time = %d\n", clock_var);
7067 }
7068 else
7069 last_scheduled_insn = tail;
7070
7071 scheduled_insns.truncate (0);
7072
7073 if (!current_sched_info->queue_must_finish_empty
7074 || haifa_recovery_bb_recently_added_p)
7075 {
7076 /* INSN_TICK (minimum clock tick at which the insn becomes
7077 ready) may be not correct for the insn in the subsequent
7078 blocks of the region. We should use a correct value of
7079 `clock_var' or modify INSN_TICK. It is better to keep
7080 clock_var value equal to 0 at the start of a basic block.
7081 Therefore we modify INSN_TICK here. */
7082 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
7083 }
7084
7085 if (targetm.sched.finish)
7086 {
7087 targetm.sched.finish (sched_dump, sched_verbose);
7088 /* Target might have added some instructions to the scheduled block
7089 in its md_finish () hook. These new insns don't have any data
7090 initialized and to identify them we extend h_i_d so that they'll
7091 get zero luids. */
7092 sched_extend_luids ();
7093 }
7094
7095 /* Update head/tail boundaries. */
7096 head = NEXT_INSN (prev_head);
7097 tail = last_scheduled_insn;
7098
7099 if (sched_verbose)
7100 {
7101 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n",
7102 INSN_UID (head), INSN_UID (tail));
7103
7104 if (sched_verbose >= 2)
7105 {
7106 dump_insn_stream (head, tail);
7107 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats,
7108 NULL);
7109 }
7110
7111 fprintf (sched_dump, "\n");
7112 }
7113
7114 head = restore_other_notes (head, NULL);
7115
7116 current_sched_info->head = head;
7117 current_sched_info->tail = tail;
7118
7119 free_backtrack_queue ();
7120
7121 return success;
7122 }
7123 \f
7124 /* Set_priorities: compute priority of each insn in the block. */
7125
7126 int
7127 set_priorities (rtx_insn *head, rtx_insn *tail)
7128 {
7129 rtx_insn *insn;
7130 int n_insn;
7131 int sched_max_insns_priority =
7132 current_sched_info->sched_max_insns_priority;
7133 rtx_insn *prev_head;
7134
7135 if (head == tail && ! INSN_P (head))
7136 gcc_unreachable ();
7137
7138 n_insn = 0;
7139
7140 prev_head = PREV_INSN (head);
7141 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
7142 {
7143 if (!INSN_P (insn))
7144 continue;
7145
7146 n_insn++;
7147 (void) priority (insn);
7148
7149 gcc_assert (INSN_PRIORITY_KNOWN (insn));
7150
7151 sched_max_insns_priority = MAX (sched_max_insns_priority,
7152 INSN_PRIORITY (insn));
7153 }
7154
7155 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
7156
7157 return n_insn;
7158 }
7159
7160 /* Set sched_dump and sched_verbose for the desired debugging output. */
7161 void
7162 setup_sched_dump (void)
7163 {
7164 sched_verbose = sched_verbose_param;
7165 sched_dump = dump_file;
7166 if (!dump_file)
7167 sched_verbose = 0;
7168 }
7169
7170 /* Allocate data for register pressure sensitive scheduling. */
7171 static void
7172 alloc_global_sched_pressure_data (void)
7173 {
7174 if (sched_pressure != SCHED_PRESSURE_NONE)
7175 {
7176 int i, max_regno = max_reg_num ();
7177
7178 if (sched_dump != NULL)
7179 /* We need info about pseudos for rtl dumps about pseudo
7180 classes and costs. */
7181 regstat_init_n_sets_and_refs ();
7182 ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
7183 sched_regno_pressure_class
7184 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
7185 for (i = 0; i < max_regno; i++)
7186 sched_regno_pressure_class[i]
7187 = (i < FIRST_PSEUDO_REGISTER
7188 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
7189 : ira_pressure_class_translate[reg_allocno_class (i)]);
7190 curr_reg_live = BITMAP_ALLOC (NULL);
7191 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7192 {
7193 saved_reg_live = BITMAP_ALLOC (NULL);
7194 region_ref_regs = BITMAP_ALLOC (NULL);
7195 }
7196 if (sched_pressure == SCHED_PRESSURE_MODEL)
7197 tmp_bitmap = BITMAP_ALLOC (NULL);
7198
7199 /* Calculate number of CALL_SAVED_REGS and FIXED_REGS in register classes
7200 that we calculate register pressure for. */
7201 for (int c = 0; c < ira_pressure_classes_num; ++c)
7202 {
7203 enum reg_class cl = ira_pressure_classes[c];
7204
7205 call_saved_regs_num[cl] = 0;
7206 fixed_regs_num[cl] = 0;
7207
7208 for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
7209 {
7210 unsigned int regno = ira_class_hard_regs[cl][i];
7211 if (fixed_regs[regno])
7212 ++fixed_regs_num[cl];
7213 else if (!crtl->abi->clobbers_full_reg_p (regno))
7214 ++call_saved_regs_num[cl];
7215 }
7216 }
7217 }
7218 }
7219
7220 /* Free data for register pressure sensitive scheduling. Also called
7221 from schedule_region when stopping sched-pressure early. */
7222 void
7223 free_global_sched_pressure_data (void)
7224 {
7225 if (sched_pressure != SCHED_PRESSURE_NONE)
7226 {
7227 if (regstat_n_sets_and_refs != NULL)
7228 regstat_free_n_sets_and_refs ();
7229 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
7230 {
7231 BITMAP_FREE (region_ref_regs);
7232 BITMAP_FREE (saved_reg_live);
7233 }
7234 if (sched_pressure == SCHED_PRESSURE_MODEL)
7235 BITMAP_FREE (tmp_bitmap);
7236 BITMAP_FREE (curr_reg_live);
7237 free (sched_regno_pressure_class);
7238 }
7239 }
7240
7241 /* Initialize some global state for the scheduler. This function works
7242 with the common data shared between all the schedulers. It is called
7243 from the scheduler specific initialization routine. */
7244
7245 void
7246 sched_init (void)
7247 {
7248 /* Disable speculative loads in their presence if cc0 defined. */
7249 if (HAVE_cc0)
7250 flag_schedule_speculative_load = 0;
7251
7252 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
7253 targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
7254
7255 if (live_range_shrinkage_p)
7256 sched_pressure = SCHED_PRESSURE_WEIGHTED;
7257 else if (flag_sched_pressure
7258 && !reload_completed
7259 && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
7260 sched_pressure = ((enum sched_pressure_algorithm)
7261 param_sched_pressure_algorithm);
7262 else
7263 sched_pressure = SCHED_PRESSURE_NONE;
7264
7265 if (sched_pressure != SCHED_PRESSURE_NONE)
7266 ira_setup_eliminable_regset ();
7267
7268 /* Initialize SPEC_INFO. */
7269 if (targetm.sched.set_sched_flags)
7270 {
7271 spec_info = &spec_info_var;
7272 targetm.sched.set_sched_flags (spec_info);
7273
7274 if (spec_info->mask != 0)
7275 {
7276 spec_info->data_weakness_cutoff
7277 = (param_sched_spec_prob_cutoff * MAX_DEP_WEAK) / 100;
7278 spec_info->control_weakness_cutoff
7279 = (param_sched_spec_prob_cutoff * REG_BR_PROB_BASE) / 100;
7280 }
7281 else
7282 /* So we won't read anything accidentally. */
7283 spec_info = NULL;
7284
7285 }
7286 else
7287 /* So we won't read anything accidentally. */
7288 spec_info = 0;
7289
7290 /* Initialize issue_rate. */
7291 if (targetm.sched.issue_rate)
7292 issue_rate = targetm.sched.issue_rate ();
7293 else
7294 issue_rate = 1;
7295
7296 if (targetm.sched.first_cycle_multipass_dfa_lookahead
7297 /* Don't use max_issue with reg_pressure scheduling. Multipass
7298 scheduling and reg_pressure scheduling undo each other's decisions. */
7299 && sched_pressure == SCHED_PRESSURE_NONE)
7300 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
7301 else
7302 dfa_lookahead = 0;
7303
7304 /* Set to "0" so that we recalculate. */
7305 max_lookahead_tries = 0;
7306
7307 if (targetm.sched.init_dfa_pre_cycle_insn)
7308 targetm.sched.init_dfa_pre_cycle_insn ();
7309
7310 if (targetm.sched.init_dfa_post_cycle_insn)
7311 targetm.sched.init_dfa_post_cycle_insn ();
7312
7313 dfa_start ();
7314 dfa_state_size = state_size ();
7315
7316 init_alias_analysis ();
7317
7318 if (!sched_no_dce)
7319 df_set_flags (DF_LR_RUN_DCE);
7320 df_note_add_problem ();
7321
7322 /* More problems needed for interloop dep calculation in SMS. */
7323 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
7324 {
7325 df_rd_add_problem ();
7326 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
7327 }
7328
7329 df_analyze ();
7330
7331 /* Do not run DCE after reload, as this can kill nops inserted
7332 by bundling. */
7333 if (reload_completed)
7334 df_clear_flags (DF_LR_RUN_DCE);
7335
7336 regstat_compute_calls_crossed ();
7337
7338 if (targetm.sched.init_global)
7339 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
7340
7341 alloc_global_sched_pressure_data ();
7342
7343 curr_state = xmalloc (dfa_state_size);
7344 }
7345
7346 static void haifa_init_only_bb (basic_block, basic_block);
7347
7348 /* Initialize data structures specific to the Haifa scheduler. */
7349 void
7350 haifa_sched_init (void)
7351 {
7352 setup_sched_dump ();
7353 sched_init ();
7354
7355 scheduled_insns.create (0);
7356
7357 if (spec_info != NULL)
7358 {
7359 sched_deps_info->use_deps_list = 1;
7360 sched_deps_info->generate_spec_deps = 1;
7361 }
7362
7363 /* Initialize luids, dependency caches, target and h_i_d for the
7364 whole function. */
7365 {
7366 sched_init_bbs ();
7367
7368 auto_vec<basic_block> bbs (n_basic_blocks_for_fn (cfun));
7369 basic_block bb;
7370 FOR_EACH_BB_FN (bb, cfun)
7371 bbs.quick_push (bb);
7372 sched_init_luids (bbs);
7373 sched_deps_init (true);
7374 sched_extend_target ();
7375 haifa_init_h_i_d (bbs);
7376 }
7377
7378 sched_init_only_bb = haifa_init_only_bb;
7379 sched_split_block = sched_split_block_1;
7380 sched_create_empty_bb = sched_create_empty_bb_1;
7381 haifa_recovery_bb_ever_added_p = false;
7382
7383 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
7384 before_recovery = 0;
7385 after_recovery = 0;
7386
7387 modulo_ii = 0;
7388 }
7389
7390 /* Finish work with the data specific to the Haifa scheduler. */
7391 void
7392 haifa_sched_finish (void)
7393 {
7394 sched_create_empty_bb = NULL;
7395 sched_split_block = NULL;
7396 sched_init_only_bb = NULL;
7397
7398 if (spec_info && spec_info->dump)
7399 {
7400 char c = reload_completed ? 'a' : 'b';
7401
7402 fprintf (spec_info->dump,
7403 ";; %s:\n", current_function_name ());
7404
7405 fprintf (spec_info->dump,
7406 ";; Procedure %cr-begin-data-spec motions == %d\n",
7407 c, nr_begin_data);
7408 fprintf (spec_info->dump,
7409 ";; Procedure %cr-be-in-data-spec motions == %d\n",
7410 c, nr_be_in_data);
7411 fprintf (spec_info->dump,
7412 ";; Procedure %cr-begin-control-spec motions == %d\n",
7413 c, nr_begin_control);
7414 fprintf (spec_info->dump,
7415 ";; Procedure %cr-be-in-control-spec motions == %d\n",
7416 c, nr_be_in_control);
7417 }
7418
7419 scheduled_insns.release ();
7420
7421 /* Finalize h_i_d, dependency caches, and luids for the whole
7422 function. Target will be finalized in md_global_finish (). */
7423 sched_deps_finish ();
7424 sched_finish_luids ();
7425 current_sched_info = NULL;
7426 insn_queue = NULL;
7427 sched_finish ();
7428 }
7429
7430 /* Free global data used during insn scheduling. This function works with
7431 the common data shared between the schedulers. */
7432
7433 void
7434 sched_finish (void)
7435 {
7436 haifa_finish_h_i_d ();
7437 free_global_sched_pressure_data ();
7438 free (curr_state);
7439
7440 if (targetm.sched.finish_global)
7441 targetm.sched.finish_global (sched_dump, sched_verbose);
7442
7443 end_alias_analysis ();
7444
7445 regstat_free_calls_crossed ();
7446
7447 dfa_finish ();
7448 }
7449
7450 /* Free all delay_pair structures that were recorded. */
7451 void
7452 free_delay_pairs (void)
7453 {
7454 if (delay_htab)
7455 {
7456 delay_htab->empty ();
7457 delay_htab_i2->empty ();
7458 }
7459 }
7460
7461 /* Fix INSN_TICKs of the instructions in the current block as well as
7462 INSN_TICKs of their dependents.
7463 HEAD and TAIL are the begin and the end of the current scheduled block. */
7464 static void
7465 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7466 {
7467 /* Set of instructions with corrected INSN_TICK. */
7468 auto_bitmap processed;
7469 /* ??? It is doubtful if we should assume that cycle advance happens on
7470 basic block boundaries. Basically insns that are unconditionally ready
7471 on the start of the block are more preferable then those which have
7472 a one cycle dependency over insn from the previous block. */
7473 int next_clock = clock_var + 1;
7474
7475 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7476 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7477 across different blocks. */
7478 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7479 {
7480 if (INSN_P (head))
7481 {
7482 int tick;
7483 sd_iterator_def sd_it;
7484 dep_t dep;
7485
7486 tick = INSN_TICK (head);
7487 gcc_assert (tick >= MIN_TICK);
7488
7489 /* Fix INSN_TICK of instruction from just scheduled block. */
7490 if (bitmap_set_bit (processed, INSN_LUID (head)))
7491 {
7492 tick -= next_clock;
7493
7494 if (tick < MIN_TICK)
7495 tick = MIN_TICK;
7496
7497 INSN_TICK (head) = tick;
7498 }
7499
7500 if (DEBUG_INSN_P (head))
7501 continue;
7502
7503 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7504 {
7505 rtx_insn *next;
7506
7507 next = DEP_CON (dep);
7508 tick = INSN_TICK (next);
7509
7510 if (tick != INVALID_TICK
7511 /* If NEXT has its INSN_TICK calculated, fix it.
7512 If not - it will be properly calculated from
7513 scratch later in fix_tick_ready. */
7514 && bitmap_set_bit (processed, INSN_LUID (next)))
7515 {
7516 tick -= next_clock;
7517
7518 if (tick < MIN_TICK)
7519 tick = MIN_TICK;
7520
7521 if (tick > INTER_TICK (next))
7522 INTER_TICK (next) = tick;
7523 else
7524 tick = INTER_TICK (next);
7525
7526 INSN_TICK (next) = tick;
7527 }
7528 }
7529 }
7530 }
7531 }
7532
7533 /* Check if NEXT is ready to be added to the ready or queue list.
7534 If "yes", add it to the proper list.
7535 Returns:
7536 -1 - is not ready yet,
7537 0 - added to the ready list,
7538 0 < N - queued for N cycles. */
7539 int
7540 try_ready (rtx_insn *next)
7541 {
7542 ds_t old_ts, new_ts;
7543
7544 old_ts = TODO_SPEC (next);
7545
7546 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7547 && (old_ts == HARD_DEP
7548 || old_ts == DEP_POSTPONED
7549 || (old_ts & SPECULATIVE)
7550 || old_ts == DEP_CONTROL));
7551
7552 new_ts = recompute_todo_spec (next, false);
7553
7554 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7555 gcc_assert (new_ts == old_ts
7556 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
7557 else if (current_sched_info->new_ready)
7558 new_ts = current_sched_info->new_ready (next, new_ts);
7559
7560 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7561 have its original pattern or changed (speculative) one. This is due
7562 to changing ebb in region scheduling.
7563 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7564 has speculative pattern.
7565
7566 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7567 control-speculative NEXT could have been discarded by sched-rgn.c
7568 (the same case as when discarded by can_schedule_ready_p ()). */
7569
7570 if ((new_ts & SPECULATIVE)
7571 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7572 need to change anything. */
7573 && new_ts != old_ts)
7574 {
7575 int res;
7576 rtx new_pat;
7577
7578 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7579
7580 res = haifa_speculate_insn (next, new_ts, &new_pat);
7581
7582 switch (res)
7583 {
7584 case -1:
7585 /* It would be nice to change DEP_STATUS of all dependences,
7586 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7587 so we won't reanalyze anything. */
7588 new_ts = HARD_DEP;
7589 break;
7590
7591 case 0:
7592 /* We follow the rule, that every speculative insn
7593 has non-null ORIG_PAT. */
7594 if (!ORIG_PAT (next))
7595 ORIG_PAT (next) = PATTERN (next);
7596 break;
7597
7598 case 1:
7599 if (!ORIG_PAT (next))
7600 /* If we gonna to overwrite the original pattern of insn,
7601 save it. */
7602 ORIG_PAT (next) = PATTERN (next);
7603
7604 res = haifa_change_pattern (next, new_pat);
7605 gcc_assert (res);
7606 break;
7607
7608 default:
7609 gcc_unreachable ();
7610 }
7611 }
7612
7613 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7614 either correct (new_ts & SPECULATIVE),
7615 or we simply don't care (new_ts & HARD_DEP). */
7616
7617 gcc_assert (!ORIG_PAT (next)
7618 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7619
7620 TODO_SPEC (next) = new_ts;
7621
7622 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7623 {
7624 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7625 control-speculative NEXT could have been discarded by sched-rgn.c
7626 (the same case as when discarded by can_schedule_ready_p ()). */
7627 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7628
7629 change_queue_index (next, QUEUE_NOWHERE);
7630
7631 return -1;
7632 }
7633 else if (!(new_ts & BEGIN_SPEC)
7634 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7635 && !IS_SPECULATION_CHECK_P (next))
7636 /* We should change pattern of every previously speculative
7637 instruction - and we determine if NEXT was speculative by using
7638 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7639 pat too, so skip them. */
7640 {
7641 bool success = haifa_change_pattern (next, ORIG_PAT (next));
7642 gcc_assert (success);
7643 ORIG_PAT (next) = 0;
7644 }
7645
7646 if (sched_verbose >= 2)
7647 {
7648 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7649 (*current_sched_info->print_insn) (next, 0));
7650
7651 if (spec_info && spec_info->dump)
7652 {
7653 if (new_ts & BEGIN_DATA)
7654 fprintf (spec_info->dump, "; data-spec;");
7655 if (new_ts & BEGIN_CONTROL)
7656 fprintf (spec_info->dump, "; control-spec;");
7657 if (new_ts & BE_IN_CONTROL)
7658 fprintf (spec_info->dump, "; in-control-spec;");
7659 }
7660 if (TODO_SPEC (next) & DEP_CONTROL)
7661 fprintf (sched_dump, " predicated");
7662 fprintf (sched_dump, "\n");
7663 }
7664
7665 adjust_priority (next);
7666
7667 return fix_tick_ready (next);
7668 }
7669
7670 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7671 static int
7672 fix_tick_ready (rtx_insn *next)
7673 {
7674 int tick, delay;
7675
7676 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7677 {
7678 int full_p;
7679 sd_iterator_def sd_it;
7680 dep_t dep;
7681
7682 tick = INSN_TICK (next);
7683 /* if tick is not equal to INVALID_TICK, then update
7684 INSN_TICK of NEXT with the most recent resolved dependence
7685 cost. Otherwise, recalculate from scratch. */
7686 full_p = (tick == INVALID_TICK);
7687
7688 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7689 {
7690 rtx_insn *pro = DEP_PRO (dep);
7691 int tick1;
7692
7693 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7694
7695 tick1 = INSN_TICK (pro) + dep_cost (dep);
7696 if (tick1 > tick)
7697 tick = tick1;
7698
7699 if (!full_p)
7700 break;
7701 }
7702 }
7703 else
7704 tick = -1;
7705
7706 INSN_TICK (next) = tick;
7707
7708 delay = tick - clock_var;
7709 if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE || sched_fusion)
7710 delay = QUEUE_READY;
7711
7712 change_queue_index (next, delay);
7713
7714 return delay;
7715 }
7716
7717 /* Move NEXT to the proper queue list with (DELAY >= 1),
7718 or add it to the ready list (DELAY == QUEUE_READY),
7719 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7720 static void
7721 change_queue_index (rtx_insn *next, int delay)
7722 {
7723 int i = QUEUE_INDEX (next);
7724
7725 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7726 && delay != 0);
7727 gcc_assert (i != QUEUE_SCHEDULED);
7728
7729 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7730 || (delay < 0 && delay == i))
7731 /* We have nothing to do. */
7732 return;
7733
7734 /* Remove NEXT from wherever it is now. */
7735 if (i == QUEUE_READY)
7736 ready_remove_insn (next);
7737 else if (i >= 0)
7738 queue_remove (next);
7739
7740 /* Add it to the proper place. */
7741 if (delay == QUEUE_READY)
7742 ready_add (readyp, next, false);
7743 else if (delay >= 1)
7744 queue_insn (next, delay, "change queue index");
7745
7746 if (sched_verbose >= 2)
7747 {
7748 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7749 (*current_sched_info->print_insn) (next, 0));
7750
7751 if (delay == QUEUE_READY)
7752 fprintf (sched_dump, " into ready\n");
7753 else if (delay >= 1)
7754 fprintf (sched_dump, " into queue with cost=%d\n", delay);
7755 else
7756 fprintf (sched_dump, " removed from ready or queue lists\n");
7757 }
7758 }
7759
7760 static int sched_ready_n_insns = -1;
7761
7762 /* Initialize per region data structures. */
7763 void
7764 sched_extend_ready_list (int new_sched_ready_n_insns)
7765 {
7766 int i;
7767
7768 if (sched_ready_n_insns == -1)
7769 /* At the first call we need to initialize one more choice_stack
7770 entry. */
7771 {
7772 i = 0;
7773 sched_ready_n_insns = 0;
7774 scheduled_insns.reserve (new_sched_ready_n_insns);
7775 }
7776 else
7777 i = sched_ready_n_insns + 1;
7778
7779 ready.veclen = new_sched_ready_n_insns + issue_rate;
7780 ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7781
7782 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7783
7784 ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7785 sched_ready_n_insns,
7786 sizeof (*ready_try));
7787
7788 /* We allocate +1 element to save initial state in the choice_stack[0]
7789 entry. */
7790 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7791 new_sched_ready_n_insns + 1);
7792
7793 for (; i <= new_sched_ready_n_insns; i++)
7794 {
7795 choice_stack[i].state = xmalloc (dfa_state_size);
7796
7797 if (targetm.sched.first_cycle_multipass_init)
7798 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7799 .target_data));
7800 }
7801
7802 sched_ready_n_insns = new_sched_ready_n_insns;
7803 }
7804
7805 /* Free per region data structures. */
7806 void
7807 sched_finish_ready_list (void)
7808 {
7809 int i;
7810
7811 free (ready.vec);
7812 ready.vec = NULL;
7813 ready.veclen = 0;
7814
7815 free (ready_try);
7816 ready_try = NULL;
7817
7818 for (i = 0; i <= sched_ready_n_insns; i++)
7819 {
7820 if (targetm.sched.first_cycle_multipass_fini)
7821 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7822 .target_data));
7823
7824 free (choice_stack [i].state);
7825 }
7826 free (choice_stack);
7827 choice_stack = NULL;
7828
7829 sched_ready_n_insns = -1;
7830 }
7831
7832 static int
7833 haifa_luid_for_non_insn (rtx x)
7834 {
7835 gcc_assert (NOTE_P (x) || LABEL_P (x));
7836
7837 return 0;
7838 }
7839
7840 /* Generates recovery code for INSN. */
7841 static void
7842 generate_recovery_code (rtx_insn *insn)
7843 {
7844 if (TODO_SPEC (insn) & BEGIN_SPEC)
7845 begin_speculative_block (insn);
7846
7847 /* Here we have insn with no dependencies to
7848 instructions other then CHECK_SPEC ones. */
7849
7850 if (TODO_SPEC (insn) & BE_IN_SPEC)
7851 add_to_speculative_block (insn);
7852 }
7853
7854 /* Helper function.
7855 Tries to add speculative dependencies of type FS between instructions
7856 in deps_list L and TWIN. */
7857 static void
7858 process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
7859 {
7860 sd_iterator_def sd_it;
7861 dep_t dep;
7862
7863 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7864 {
7865 ds_t ds;
7866 rtx_insn *consumer;
7867
7868 consumer = DEP_CON (dep);
7869
7870 ds = DEP_STATUS (dep);
7871
7872 if (/* If we want to create speculative dep. */
7873 fs
7874 /* And we can do that because this is a true dep. */
7875 && (ds & DEP_TYPES) == DEP_TRUE)
7876 {
7877 gcc_assert (!(ds & BE_IN_SPEC));
7878
7879 if (/* If this dep can be overcome with 'begin speculation'. */
7880 ds & BEGIN_SPEC)
7881 /* Then we have a choice: keep the dep 'begin speculative'
7882 or transform it into 'be in speculative'. */
7883 {
7884 if (/* In try_ready we assert that if insn once became ready
7885 it can be removed from the ready (or queue) list only
7886 due to backend decision. Hence we can't let the
7887 probability of the speculative dep to decrease. */
7888 ds_weak (ds) <= ds_weak (fs))
7889 {
7890 ds_t new_ds;
7891
7892 new_ds = (ds & ~BEGIN_SPEC) | fs;
7893
7894 if (/* consumer can 'be in speculative'. */
7895 sched_insn_is_legitimate_for_speculation_p (consumer,
7896 new_ds))
7897 /* Transform it to be in speculative. */
7898 ds = new_ds;
7899 }
7900 }
7901 else
7902 /* Mark the dep as 'be in speculative'. */
7903 ds |= fs;
7904 }
7905
7906 {
7907 dep_def _new_dep, *new_dep = &_new_dep;
7908
7909 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7910 sd_add_dep (new_dep, false);
7911 }
7912 }
7913 }
7914
7915 /* Generates recovery code for BEGIN speculative INSN. */
7916 static void
7917 begin_speculative_block (rtx_insn *insn)
7918 {
7919 if (TODO_SPEC (insn) & BEGIN_DATA)
7920 nr_begin_data++;
7921 if (TODO_SPEC (insn) & BEGIN_CONTROL)
7922 nr_begin_control++;
7923
7924 create_check_block_twin (insn, false);
7925
7926 TODO_SPEC (insn) &= ~BEGIN_SPEC;
7927 }
7928
7929 static void haifa_init_insn (rtx_insn *);
7930
7931 /* Generates recovery code for BE_IN speculative INSN. */
7932 static void
7933 add_to_speculative_block (rtx_insn *insn)
7934 {
7935 ds_t ts;
7936 sd_iterator_def sd_it;
7937 dep_t dep;
7938 auto_vec<rtx_insn *, 10> twins;
7939
7940 ts = TODO_SPEC (insn);
7941 gcc_assert (!(ts & ~BE_IN_SPEC));
7942
7943 if (ts & BE_IN_DATA)
7944 nr_be_in_data++;
7945 if (ts & BE_IN_CONTROL)
7946 nr_be_in_control++;
7947
7948 TODO_SPEC (insn) &= ~BE_IN_SPEC;
7949 gcc_assert (!TODO_SPEC (insn));
7950
7951 DONE_SPEC (insn) |= ts;
7952
7953 /* First we convert all simple checks to branchy. */
7954 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7955 sd_iterator_cond (&sd_it, &dep);)
7956 {
7957 rtx_insn *check = DEP_PRO (dep);
7958
7959 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7960 {
7961 create_check_block_twin (check, true);
7962
7963 /* Restart search. */
7964 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7965 }
7966 else
7967 /* Continue search. */
7968 sd_iterator_next (&sd_it);
7969 }
7970
7971 auto_vec<rtx_insn *> priorities_roots;
7972 clear_priorities (insn, &priorities_roots);
7973
7974 while (1)
7975 {
7976 rtx_insn *check, *twin;
7977 basic_block rec;
7978
7979 /* Get the first backward dependency of INSN. */
7980 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7981 if (!sd_iterator_cond (&sd_it, &dep))
7982 /* INSN has no backward dependencies left. */
7983 break;
7984
7985 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7986 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7987 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7988
7989 check = DEP_PRO (dep);
7990
7991 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7992 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7993
7994 rec = BLOCK_FOR_INSN (check);
7995
7996 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7997 haifa_init_insn (twin);
7998
7999 sd_copy_back_deps (twin, insn, true);
8000
8001 if (sched_verbose && spec_info->dump)
8002 /* INSN_BB (insn) isn't determined for twin insns yet.
8003 So we can't use current_sched_info->print_insn. */
8004 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8005 INSN_UID (twin), rec->index);
8006
8007 twins.safe_push (twin);
8008
8009 /* Add dependences between TWIN and all appropriate
8010 instructions from REC. */
8011 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
8012 {
8013 rtx_insn *pro = DEP_PRO (dep);
8014
8015 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
8016
8017 /* INSN might have dependencies from the instructions from
8018 several recovery blocks. At this iteration we process those
8019 producers that reside in REC. */
8020 if (BLOCK_FOR_INSN (pro) == rec)
8021 {
8022 dep_def _new_dep, *new_dep = &_new_dep;
8023
8024 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
8025 sd_add_dep (new_dep, false);
8026 }
8027 }
8028
8029 process_insn_forw_deps_be_in_spec (insn, twin, ts);
8030
8031 /* Remove all dependencies between INSN and insns in REC. */
8032 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8033 sd_iterator_cond (&sd_it, &dep);)
8034 {
8035 rtx_insn *pro = DEP_PRO (dep);
8036
8037 if (BLOCK_FOR_INSN (pro) == rec)
8038 sd_delete_dep (sd_it);
8039 else
8040 sd_iterator_next (&sd_it);
8041 }
8042 }
8043
8044 /* We couldn't have added the dependencies between INSN and TWINS earlier
8045 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
8046 unsigned int i;
8047 rtx_insn *twin;
8048 FOR_EACH_VEC_ELT_REVERSE (twins, i, twin)
8049 {
8050 dep_def _new_dep, *new_dep = &_new_dep;
8051
8052 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8053 sd_add_dep (new_dep, false);
8054 }
8055
8056 calc_priorities (priorities_roots);
8057 }
8058
8059 /* Extends and fills with zeros (only the new part) array pointed to by P. */
8060 void *
8061 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
8062 {
8063 gcc_assert (new_nmemb >= old_nmemb);
8064 p = XRESIZEVAR (void, p, new_nmemb * size);
8065 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
8066 return p;
8067 }
8068
8069 /* Helper function.
8070 Find fallthru edge from PRED. */
8071 edge
8072 find_fallthru_edge_from (basic_block pred)
8073 {
8074 edge e;
8075 basic_block succ;
8076
8077 succ = pred->next_bb;
8078 gcc_assert (succ->prev_bb == pred);
8079
8080 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
8081 {
8082 e = find_fallthru_edge (pred->succs);
8083
8084 if (e)
8085 {
8086 gcc_assert (e->dest == succ || e->dest->index == EXIT_BLOCK);
8087 return e;
8088 }
8089 }
8090 else
8091 {
8092 e = find_fallthru_edge (succ->preds);
8093
8094 if (e)
8095 {
8096 gcc_assert (e->src == pred);
8097 return e;
8098 }
8099 }
8100
8101 return NULL;
8102 }
8103
8104 /* Extend per basic block data structures. */
8105 static void
8106 sched_extend_bb (void)
8107 {
8108 /* The following is done to keep current_sched_info->next_tail non null. */
8109 rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8110 rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
8111 if (NEXT_INSN (end) == 0
8112 || (!NOTE_P (insn)
8113 && !LABEL_P (insn)
8114 /* Don't emit a NOTE if it would end up before a BARRIER. */
8115 && !BARRIER_P (next_nondebug_insn (end))))
8116 {
8117 rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
8118 /* Make note appear outside BB. */
8119 set_block_for_insn (note, NULL);
8120 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
8121 }
8122 }
8123
8124 /* Init per basic block data structures. */
8125 void
8126 sched_init_bbs (void)
8127 {
8128 sched_extend_bb ();
8129 }
8130
8131 /* Initialize BEFORE_RECOVERY variable. */
8132 static void
8133 init_before_recovery (basic_block *before_recovery_ptr)
8134 {
8135 basic_block last;
8136 edge e;
8137
8138 last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8139 e = find_fallthru_edge_from (last);
8140
8141 if (e)
8142 {
8143 /* We create two basic blocks:
8144 1. Single instruction block is inserted right after E->SRC
8145 and has jump to
8146 2. Empty block right before EXIT_BLOCK.
8147 Between these two blocks recovery blocks will be emitted. */
8148
8149 basic_block single, empty;
8150
8151 /* If the fallthrough edge to exit we've found is from the block we've
8152 created before, don't do anything more. */
8153 if (last == after_recovery)
8154 return;
8155
8156 adding_bb_to_current_region_p = false;
8157
8158 single = sched_create_empty_bb (last);
8159 empty = sched_create_empty_bb (single);
8160
8161 /* Add new blocks to the root loop. */
8162 if (current_loops != NULL)
8163 {
8164 add_bb_to_loop (single, (*current_loops->larray)[0]);
8165 add_bb_to_loop (empty, (*current_loops->larray)[0]);
8166 }
8167
8168 single->count = last->count;
8169 empty->count = last->count;
8170 BB_COPY_PARTITION (single, last);
8171 BB_COPY_PARTITION (empty, last);
8172
8173 redirect_edge_succ (e, single);
8174 make_single_succ_edge (single, empty, 0);
8175 make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
8176 EDGE_FALLTHRU);
8177
8178 rtx_code_label *label = block_label (empty);
8179 rtx_jump_insn *x = emit_jump_insn_after (targetm.gen_jump (label),
8180 BB_END (single));
8181 JUMP_LABEL (x) = label;
8182 LABEL_NUSES (label)++;
8183 haifa_init_insn (x);
8184
8185 emit_barrier_after (x);
8186
8187 sched_init_only_bb (empty, NULL);
8188 sched_init_only_bb (single, NULL);
8189 sched_extend_bb ();
8190
8191 adding_bb_to_current_region_p = true;
8192 before_recovery = single;
8193 after_recovery = empty;
8194
8195 if (before_recovery_ptr)
8196 *before_recovery_ptr = before_recovery;
8197
8198 if (sched_verbose >= 2 && spec_info->dump)
8199 fprintf (spec_info->dump,
8200 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
8201 last->index, single->index, empty->index);
8202 }
8203 else
8204 before_recovery = last;
8205 }
8206
8207 /* Returns new recovery block. */
8208 basic_block
8209 sched_create_recovery_block (basic_block *before_recovery_ptr)
8210 {
8211 rtx_insn *barrier;
8212 basic_block rec;
8213
8214 haifa_recovery_bb_recently_added_p = true;
8215 haifa_recovery_bb_ever_added_p = true;
8216
8217 init_before_recovery (before_recovery_ptr);
8218
8219 barrier = get_last_bb_insn (before_recovery);
8220 gcc_assert (BARRIER_P (barrier));
8221
8222 rtx_insn *label = emit_label_after (gen_label_rtx (), barrier);
8223
8224 rec = create_basic_block (label, label, before_recovery);
8225
8226 /* A recovery block always ends with an unconditional jump. */
8227 emit_barrier_after (BB_END (rec));
8228
8229 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
8230 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
8231
8232 if (sched_verbose && spec_info->dump)
8233 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
8234 rec->index);
8235
8236 return rec;
8237 }
8238
8239 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
8240 and emit necessary jumps. */
8241 void
8242 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
8243 basic_block second_bb)
8244 {
8245 int edge_flags;
8246
8247 /* This is fixing of incoming edge. */
8248 /* ??? Which other flags should be specified? */
8249 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
8250 /* Partition type is the same, if it is "unpartitioned". */
8251 edge_flags = EDGE_CROSSING;
8252 else
8253 edge_flags = 0;
8254
8255 edge e2 = single_succ_edge (first_bb);
8256 edge e = make_edge (first_bb, rec, edge_flags);
8257
8258 /* TODO: The actual probability can be determined and is computed as
8259 'todo_spec' variable in create_check_block_twin and
8260 in sel-sched.c `check_ds' in create_speculation_check. */
8261 e->probability = profile_probability::very_unlikely ();
8262 rec->count = e->count ();
8263 e2->probability = e->probability.invert ();
8264
8265 rtx_code_label *label = block_label (second_bb);
8266 rtx_jump_insn *jump = emit_jump_insn_after (targetm.gen_jump (label),
8267 BB_END (rec));
8268 JUMP_LABEL (jump) = label;
8269 LABEL_NUSES (label)++;
8270
8271 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
8272 /* Partition type is the same, if it is "unpartitioned". */
8273 {
8274 /* Rewritten from cfgrtl.c. */
8275 if (crtl->has_bb_partition && targetm_common.have_named_sections)
8276 {
8277 /* We don't need the same note for the check because
8278 any_condjump_p (check) == true. */
8279 CROSSING_JUMP_P (jump) = 1;
8280 }
8281 edge_flags = EDGE_CROSSING;
8282 }
8283 else
8284 edge_flags = 0;
8285
8286 make_single_succ_edge (rec, second_bb, edge_flags);
8287 if (dom_info_available_p (CDI_DOMINATORS))
8288 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
8289 }
8290
8291 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
8292 INSN is a simple check, that should be converted to branchy one. */
8293 static void
8294 create_check_block_twin (rtx_insn *insn, bool mutate_p)
8295 {
8296 basic_block rec;
8297 rtx_insn *label, *check, *twin;
8298 rtx check_pat;
8299 ds_t fs;
8300 sd_iterator_def sd_it;
8301 dep_t dep;
8302 dep_def _new_dep, *new_dep = &_new_dep;
8303 ds_t todo_spec;
8304
8305 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
8306
8307 if (!mutate_p)
8308 todo_spec = TODO_SPEC (insn);
8309 else
8310 {
8311 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
8312 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
8313
8314 todo_spec = CHECK_SPEC (insn);
8315 }
8316
8317 todo_spec &= SPECULATIVE;
8318
8319 /* Create recovery block. */
8320 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
8321 {
8322 rec = sched_create_recovery_block (NULL);
8323 label = BB_HEAD (rec);
8324 }
8325 else
8326 {
8327 rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
8328 label = NULL;
8329 }
8330
8331 /* Emit CHECK. */
8332 check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
8333
8334 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8335 {
8336 /* To have mem_reg alive at the beginning of second_bb,
8337 we emit check BEFORE insn, so insn after splitting
8338 insn will be at the beginning of second_bb, which will
8339 provide us with the correct life information. */
8340 check = emit_jump_insn_before (check_pat, insn);
8341 JUMP_LABEL (check) = label;
8342 LABEL_NUSES (label)++;
8343 }
8344 else
8345 check = emit_insn_before (check_pat, insn);
8346
8347 /* Extend data structures. */
8348 haifa_init_insn (check);
8349
8350 /* CHECK is being added to current region. Extend ready list. */
8351 gcc_assert (sched_ready_n_insns != -1);
8352 sched_extend_ready_list (sched_ready_n_insns + 1);
8353
8354 if (current_sched_info->add_remove_insn)
8355 current_sched_info->add_remove_insn (insn, 0);
8356
8357 RECOVERY_BLOCK (check) = rec;
8358
8359 if (sched_verbose && spec_info->dump)
8360 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
8361 (*current_sched_info->print_insn) (check, 0));
8362
8363 gcc_assert (ORIG_PAT (insn));
8364
8365 /* Initialize TWIN (twin is a duplicate of original instruction
8366 in the recovery block). */
8367 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8368 {
8369 sd_iterator_def sd_it;
8370 dep_t dep;
8371
8372 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
8373 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
8374 {
8375 struct _dep _dep2, *dep2 = &_dep2;
8376
8377 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
8378
8379 sd_add_dep (dep2, true);
8380 }
8381
8382 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
8383 haifa_init_insn (twin);
8384
8385 if (sched_verbose && spec_info->dump)
8386 /* INSN_BB (insn) isn't determined for twin insns yet.
8387 So we can't use current_sched_info->print_insn. */
8388 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
8389 INSN_UID (twin), rec->index);
8390 }
8391 else
8392 {
8393 ORIG_PAT (check) = ORIG_PAT (insn);
8394 HAS_INTERNAL_DEP (check) = 1;
8395 twin = check;
8396 /* ??? We probably should change all OUTPUT dependencies to
8397 (TRUE | OUTPUT). */
8398 }
8399
8400 /* Copy all resolved back dependencies of INSN to TWIN. This will
8401 provide correct value for INSN_TICK (TWIN). */
8402 sd_copy_back_deps (twin, insn, true);
8403
8404 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8405 /* In case of branchy check, fix CFG. */
8406 {
8407 basic_block first_bb, second_bb;
8408 rtx_insn *jump;
8409
8410 first_bb = BLOCK_FOR_INSN (check);
8411 second_bb = sched_split_block (first_bb, check);
8412
8413 sched_create_recovery_edges (first_bb, rec, second_bb);
8414
8415 sched_init_only_bb (second_bb, first_bb);
8416 sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
8417
8418 jump = BB_END (rec);
8419 haifa_init_insn (jump);
8420 }
8421
8422 /* Move backward dependences from INSN to CHECK and
8423 move forward dependences from INSN to TWIN. */
8424
8425 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
8426 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8427 {
8428 rtx_insn *pro = DEP_PRO (dep);
8429 ds_t ds;
8430
8431 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
8432 check --TRUE--> producer ??? or ANTI ???
8433 twin --TRUE--> producer
8434 twin --ANTI--> check
8435
8436 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8437 check --ANTI--> producer
8438 twin --ANTI--> producer
8439 twin --ANTI--> check
8440
8441 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8442 check ~~TRUE~~> producer
8443 twin ~~TRUE~~> producer
8444 twin --ANTI--> check */
8445
8446 ds = DEP_STATUS (dep);
8447
8448 if (ds & BEGIN_SPEC)
8449 {
8450 gcc_assert (!mutate_p);
8451 ds &= ~BEGIN_SPEC;
8452 }
8453
8454 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8455 sd_add_dep (new_dep, false);
8456
8457 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8458 {
8459 DEP_CON (new_dep) = twin;
8460 sd_add_dep (new_dep, false);
8461 }
8462 }
8463
8464 /* Second, remove backward dependencies of INSN. */
8465 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8466 sd_iterator_cond (&sd_it, &dep);)
8467 {
8468 if ((DEP_STATUS (dep) & BEGIN_SPEC)
8469 || mutate_p)
8470 /* We can delete this dep because we overcome it with
8471 BEGIN_SPECULATION. */
8472 sd_delete_dep (sd_it);
8473 else
8474 sd_iterator_next (&sd_it);
8475 }
8476
8477 /* Future Speculations. Determine what BE_IN speculations will be like. */
8478 fs = 0;
8479
8480 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8481 here. */
8482
8483 gcc_assert (!DONE_SPEC (insn));
8484
8485 if (!mutate_p)
8486 {
8487 ds_t ts = TODO_SPEC (insn);
8488
8489 DONE_SPEC (insn) = ts & BEGIN_SPEC;
8490 CHECK_SPEC (check) = ts & BEGIN_SPEC;
8491
8492 /* Luckiness of future speculations solely depends upon initial
8493 BEGIN speculation. */
8494 if (ts & BEGIN_DATA)
8495 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8496 if (ts & BEGIN_CONTROL)
8497 fs = set_dep_weak (fs, BE_IN_CONTROL,
8498 get_dep_weak (ts, BEGIN_CONTROL));
8499 }
8500 else
8501 CHECK_SPEC (check) = CHECK_SPEC (insn);
8502
8503 /* Future speculations: call the helper. */
8504 process_insn_forw_deps_be_in_spec (insn, twin, fs);
8505
8506 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8507 {
8508 /* Which types of dependencies should we use here is,
8509 generally, machine-dependent question... But, for now,
8510 it is not. */
8511
8512 if (!mutate_p)
8513 {
8514 init_dep (new_dep, insn, check, REG_DEP_TRUE);
8515 sd_add_dep (new_dep, false);
8516
8517 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8518 sd_add_dep (new_dep, false);
8519 }
8520 else
8521 {
8522 if (spec_info->dump)
8523 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8524 (*current_sched_info->print_insn) (insn, 0));
8525
8526 /* Remove all dependencies of the INSN. */
8527 {
8528 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8529 | SD_LIST_BACK
8530 | SD_LIST_RES_BACK));
8531 while (sd_iterator_cond (&sd_it, &dep))
8532 sd_delete_dep (sd_it);
8533 }
8534
8535 /* If former check (INSN) already was moved to the ready (or queue)
8536 list, add new check (CHECK) there too. */
8537 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8538 try_ready (check);
8539
8540 /* Remove old check from instruction stream and free its
8541 data. */
8542 sched_remove_insn (insn);
8543 }
8544
8545 init_dep (new_dep, check, twin, REG_DEP_ANTI);
8546 sd_add_dep (new_dep, false);
8547 }
8548 else
8549 {
8550 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8551 sd_add_dep (new_dep, false);
8552 }
8553
8554 if (!mutate_p)
8555 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8556 because it'll be done later in add_to_speculative_block. */
8557 {
8558 auto_vec<rtx_insn *> priorities_roots;
8559
8560 clear_priorities (twin, &priorities_roots);
8561 calc_priorities (priorities_roots);
8562 }
8563 }
8564
8565 /* Removes dependency between instructions in the recovery block REC
8566 and usual region instructions. It keeps inner dependences so it
8567 won't be necessary to recompute them. */
8568 static void
8569 fix_recovery_deps (basic_block rec)
8570 {
8571 rtx_insn *note, *insn, *jump;
8572 auto_vec<rtx_insn *, 10> ready_list;
8573 auto_bitmap in_ready;
8574
8575 /* NOTE - a basic block note. */
8576 note = NEXT_INSN (BB_HEAD (rec));
8577 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8578 insn = BB_END (rec);
8579 gcc_assert (JUMP_P (insn));
8580 insn = PREV_INSN (insn);
8581
8582 do
8583 {
8584 sd_iterator_def sd_it;
8585 dep_t dep;
8586
8587 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8588 sd_iterator_cond (&sd_it, &dep);)
8589 {
8590 rtx_insn *consumer = DEP_CON (dep);
8591
8592 if (BLOCK_FOR_INSN (consumer) != rec)
8593 {
8594 sd_delete_dep (sd_it);
8595
8596 if (bitmap_set_bit (in_ready, INSN_LUID (consumer)))
8597 ready_list.safe_push (consumer);
8598 }
8599 else
8600 {
8601 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8602
8603 sd_iterator_next (&sd_it);
8604 }
8605 }
8606
8607 insn = PREV_INSN (insn);
8608 }
8609 while (insn != note);
8610
8611 /* Try to add instructions to the ready or queue list. */
8612 unsigned int i;
8613 rtx_insn *temp;
8614 FOR_EACH_VEC_ELT_REVERSE (ready_list, i, temp)
8615 try_ready (temp);
8616
8617 /* Fixing jump's dependences. */
8618 insn = BB_HEAD (rec);
8619 jump = BB_END (rec);
8620
8621 gcc_assert (LABEL_P (insn));
8622 insn = NEXT_INSN (insn);
8623
8624 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8625 add_jump_dependencies (insn, jump);
8626 }
8627
8628 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8629 instruction data. */
8630 static bool
8631 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8632 {
8633 int t;
8634
8635 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8636 if (!t)
8637 return false;
8638
8639 update_insn_after_change (insn);
8640 return true;
8641 }
8642
8643 /* -1 - can't speculate,
8644 0 - for speculation with REQUEST mode it is OK to use
8645 current instruction pattern,
8646 1 - need to change pattern for *NEW_PAT to be speculative. */
8647 int
8648 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8649 {
8650 gcc_assert (current_sched_info->flags & DO_SPECULATION
8651 && (request & SPECULATIVE)
8652 && sched_insn_is_legitimate_for_speculation_p (insn, request));
8653
8654 if ((request & spec_info->mask) != request)
8655 return -1;
8656
8657 if (request & BE_IN_SPEC
8658 && !(request & BEGIN_SPEC))
8659 return 0;
8660
8661 return targetm.sched.speculate_insn (insn, request, new_pat);
8662 }
8663
8664 static int
8665 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8666 {
8667 gcc_assert (sched_deps_info->generate_spec_deps
8668 && !IS_SPECULATION_CHECK_P (insn));
8669
8670 if (HAS_INTERNAL_DEP (insn)
8671 || SCHED_GROUP_P (insn))
8672 return -1;
8673
8674 return sched_speculate_insn (insn, request, new_pat);
8675 }
8676
8677 /* Print some information about block BB, which starts with HEAD and
8678 ends with TAIL, before scheduling it.
8679 I is zero, if scheduler is about to start with the fresh ebb. */
8680 static void
8681 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8682 {
8683 if (!i)
8684 fprintf (sched_dump,
8685 ";; ======================================================\n");
8686 else
8687 fprintf (sched_dump,
8688 ";; =====================ADVANCING TO=====================\n");
8689 fprintf (sched_dump,
8690 ";; -- basic block %d from %d to %d -- %s reload\n",
8691 bb->index, INSN_UID (head), INSN_UID (tail),
8692 (reload_completed ? "after" : "before"));
8693 fprintf (sched_dump,
8694 ";; ======================================================\n");
8695 fprintf (sched_dump, "\n");
8696 }
8697
8698 /* Unlink basic block notes and labels and saves them, so they
8699 can be easily restored. We unlink basic block notes in EBB to
8700 provide back-compatibility with the previous code, as target backends
8701 assume, that there'll be only instructions between
8702 current_sched_info->{head and tail}. We restore these notes as soon
8703 as we can.
8704 FIRST (LAST) is the first (last) basic block in the ebb.
8705 NB: In usual case (FIRST == LAST) nothing is really done. */
8706 void
8707 unlink_bb_notes (basic_block first, basic_block last)
8708 {
8709 /* We DON'T unlink basic block notes of the first block in the ebb. */
8710 if (first == last)
8711 return;
8712
8713 bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8714
8715 /* Make a sentinel. */
8716 if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8717 bb_header[last->next_bb->index] = 0;
8718
8719 first = first->next_bb;
8720 do
8721 {
8722 rtx_insn *prev, *label, *note, *next;
8723
8724 label = BB_HEAD (last);
8725 if (LABEL_P (label))
8726 note = NEXT_INSN (label);
8727 else
8728 note = label;
8729 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8730
8731 prev = PREV_INSN (label);
8732 next = NEXT_INSN (note);
8733 gcc_assert (prev && next);
8734
8735 SET_NEXT_INSN (prev) = next;
8736 SET_PREV_INSN (next) = prev;
8737
8738 bb_header[last->index] = label;
8739
8740 if (last == first)
8741 break;
8742
8743 last = last->prev_bb;
8744 }
8745 while (1);
8746 }
8747
8748 /* Restore basic block notes.
8749 FIRST is the first basic block in the ebb. */
8750 static void
8751 restore_bb_notes (basic_block first)
8752 {
8753 if (!bb_header)
8754 return;
8755
8756 /* We DON'T unlink basic block notes of the first block in the ebb. */
8757 first = first->next_bb;
8758 /* Remember: FIRST is actually a second basic block in the ebb. */
8759
8760 while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8761 && bb_header[first->index])
8762 {
8763 rtx_insn *prev, *label, *note, *next;
8764
8765 label = bb_header[first->index];
8766 prev = PREV_INSN (label);
8767 next = NEXT_INSN (prev);
8768
8769 if (LABEL_P (label))
8770 note = NEXT_INSN (label);
8771 else
8772 note = label;
8773 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8774
8775 bb_header[first->index] = 0;
8776
8777 SET_NEXT_INSN (prev) = label;
8778 SET_NEXT_INSN (note) = next;
8779 SET_PREV_INSN (next) = note;
8780
8781 first = first->next_bb;
8782 }
8783
8784 free (bb_header);
8785 bb_header = 0;
8786 }
8787
8788 /* Helper function.
8789 Fix CFG after both in- and inter-block movement of
8790 control_flow_insn_p JUMP. */
8791 static void
8792 fix_jump_move (rtx_insn *jump)
8793 {
8794 basic_block bb, jump_bb, jump_bb_next;
8795
8796 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8797 jump_bb = BLOCK_FOR_INSN (jump);
8798 jump_bb_next = jump_bb->next_bb;
8799
8800 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8801 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8802
8803 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8804 /* if jump_bb_next is not empty. */
8805 BB_END (jump_bb) = BB_END (jump_bb_next);
8806
8807 if (BB_END (bb) != PREV_INSN (jump))
8808 /* Then there are instruction after jump that should be placed
8809 to jump_bb_next. */
8810 BB_END (jump_bb_next) = BB_END (bb);
8811 else
8812 /* Otherwise jump_bb_next is empty. */
8813 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8814
8815 /* To make assertion in move_insn happy. */
8816 BB_END (bb) = PREV_INSN (jump);
8817
8818 update_bb_for_insn (jump_bb_next);
8819 }
8820
8821 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8822 static void
8823 move_block_after_check (rtx_insn *jump)
8824 {
8825 basic_block bb, jump_bb, jump_bb_next;
8826 vec<edge, va_gc> *t;
8827
8828 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8829 jump_bb = BLOCK_FOR_INSN (jump);
8830 jump_bb_next = jump_bb->next_bb;
8831
8832 update_bb_for_insn (jump_bb);
8833
8834 gcc_assert (IS_SPECULATION_CHECK_P (jump)
8835 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8836
8837 unlink_block (jump_bb_next);
8838 link_block (jump_bb_next, bb);
8839
8840 t = bb->succs;
8841 bb->succs = 0;
8842 move_succs (&(jump_bb->succs), bb);
8843 move_succs (&(jump_bb_next->succs), jump_bb);
8844 move_succs (&t, jump_bb_next);
8845
8846 df_mark_solutions_dirty ();
8847
8848 common_sched_info->fix_recovery_cfg
8849 (bb->index, jump_bb->index, jump_bb_next->index);
8850 }
8851
8852 /* Helper function for move_block_after_check.
8853 This functions attaches edge vector pointed to by SUCCSP to
8854 block TO. */
8855 static void
8856 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8857 {
8858 edge e;
8859 edge_iterator ei;
8860
8861 gcc_assert (to->succs == 0);
8862
8863 to->succs = *succsp;
8864
8865 FOR_EACH_EDGE (e, ei, to->succs)
8866 e->src = to;
8867
8868 *succsp = 0;
8869 }
8870
8871 /* Remove INSN from the instruction stream.
8872 INSN should have any dependencies. */
8873 static void
8874 sched_remove_insn (rtx_insn *insn)
8875 {
8876 sd_finish_insn (insn);
8877
8878 change_queue_index (insn, QUEUE_NOWHERE);
8879 current_sched_info->add_remove_insn (insn, 1);
8880 delete_insn (insn);
8881 }
8882
8883 /* Clear priorities of all instructions, that are forward dependent on INSN.
8884 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8885 be invoked to initialize all cleared priorities. */
8886 static void
8887 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8888 {
8889 sd_iterator_def sd_it;
8890 dep_t dep;
8891 bool insn_is_root_p = true;
8892
8893 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8894
8895 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8896 {
8897 rtx_insn *pro = DEP_PRO (dep);
8898
8899 if (INSN_PRIORITY_STATUS (pro) >= 0
8900 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8901 {
8902 /* If DEP doesn't contribute to priority then INSN itself should
8903 be added to priority roots. */
8904 if (contributes_to_priority_p (dep))
8905 insn_is_root_p = false;
8906
8907 INSN_PRIORITY_STATUS (pro) = -1;
8908 clear_priorities (pro, roots_ptr);
8909 }
8910 }
8911
8912 if (insn_is_root_p)
8913 roots_ptr->safe_push (insn);
8914 }
8915
8916 /* Recompute priorities of instructions, whose priorities might have been
8917 changed. ROOTS is a vector of instructions whose priority computation will
8918 trigger initialization of all cleared priorities. */
8919 static void
8920 calc_priorities (rtx_vec_t roots)
8921 {
8922 int i;
8923 rtx_insn *insn;
8924
8925 FOR_EACH_VEC_ELT (roots, i, insn)
8926 priority (insn);
8927 }
8928
8929
8930 /* Add dependences between JUMP and other instructions in the recovery
8931 block. INSN is the first insn the recovery block. */
8932 static void
8933 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8934 {
8935 do
8936 {
8937 insn = NEXT_INSN (insn);
8938 if (insn == jump)
8939 break;
8940
8941 if (dep_list_size (insn, SD_LIST_FORW) == 0)
8942 {
8943 dep_def _new_dep, *new_dep = &_new_dep;
8944
8945 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8946 sd_add_dep (new_dep, false);
8947 }
8948 }
8949 while (1);
8950
8951 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8952 }
8953
8954 /* Extend data structures for logical insn UID. */
8955 void
8956 sched_extend_luids (void)
8957 {
8958 int new_luids_max_uid = get_max_uid () + 1;
8959
8960 sched_luids.safe_grow_cleared (new_luids_max_uid);
8961 }
8962
8963 /* Initialize LUID for INSN. */
8964 void
8965 sched_init_insn_luid (rtx_insn *insn)
8966 {
8967 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8968 int luid;
8969
8970 if (i >= 0)
8971 {
8972 luid = sched_max_luid;
8973 sched_max_luid += i;
8974 }
8975 else
8976 luid = -1;
8977
8978 SET_INSN_LUID (insn, luid);
8979 }
8980
8981 /* Initialize luids for BBS.
8982 The hook common_sched_info->luid_for_non_insn () is used to determine
8983 if notes, labels, etc. need luids. */
8984 void
8985 sched_init_luids (bb_vec_t bbs)
8986 {
8987 int i;
8988 basic_block bb;
8989
8990 sched_extend_luids ();
8991 FOR_EACH_VEC_ELT (bbs, i, bb)
8992 {
8993 rtx_insn *insn;
8994
8995 FOR_BB_INSNS (bb, insn)
8996 sched_init_insn_luid (insn);
8997 }
8998 }
8999
9000 /* Free LUIDs. */
9001 void
9002 sched_finish_luids (void)
9003 {
9004 sched_luids.release ();
9005 sched_max_luid = 1;
9006 }
9007
9008 /* Return logical uid of INSN. Helpful while debugging. */
9009 int
9010 insn_luid (rtx_insn *insn)
9011 {
9012 return INSN_LUID (insn);
9013 }
9014
9015 /* Extend per insn data in the target. */
9016 void
9017 sched_extend_target (void)
9018 {
9019 if (targetm.sched.h_i_d_extended)
9020 targetm.sched.h_i_d_extended ();
9021 }
9022
9023 /* Extend global scheduler structures (those, that live across calls to
9024 schedule_block) to include information about just emitted INSN. */
9025 static void
9026 extend_h_i_d (void)
9027 {
9028 int reserve = (get_max_uid () + 1 - h_i_d.length ());
9029 if (reserve > 0
9030 && ! h_i_d.space (reserve))
9031 {
9032 h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
9033 sched_extend_target ();
9034 }
9035 }
9036
9037 /* Initialize h_i_d entry of the INSN with default values.
9038 Values, that are not explicitly initialized here, hold zero. */
9039 static void
9040 init_h_i_d (rtx_insn *insn)
9041 {
9042 if (INSN_LUID (insn) > 0)
9043 {
9044 INSN_COST (insn) = -1;
9045 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
9046 INSN_TICK (insn) = INVALID_TICK;
9047 INSN_EXACT_TICK (insn) = INVALID_TICK;
9048 INTER_TICK (insn) = INVALID_TICK;
9049 TODO_SPEC (insn) = HARD_DEP;
9050 INSN_AUTOPREF_MULTIPASS_DATA (insn)[0].status
9051 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9052 INSN_AUTOPREF_MULTIPASS_DATA (insn)[1].status
9053 = AUTOPREF_MULTIPASS_DATA_UNINITIALIZED;
9054 }
9055 }
9056
9057 /* Initialize haifa_insn_data for BBS. */
9058 void
9059 haifa_init_h_i_d (bb_vec_t bbs)
9060 {
9061 int i;
9062 basic_block bb;
9063
9064 extend_h_i_d ();
9065 FOR_EACH_VEC_ELT (bbs, i, bb)
9066 {
9067 rtx_insn *insn;
9068
9069 FOR_BB_INSNS (bb, insn)
9070 init_h_i_d (insn);
9071 }
9072 }
9073
9074 /* Finalize haifa_insn_data. */
9075 void
9076 haifa_finish_h_i_d (void)
9077 {
9078 int i;
9079 haifa_insn_data_t data;
9080 reg_use_data *use, *next_use;
9081 reg_set_data *set, *next_set;
9082
9083 FOR_EACH_VEC_ELT (h_i_d, i, data)
9084 {
9085 free (data->max_reg_pressure);
9086 free (data->reg_pressure);
9087 for (use = data->reg_use_list; use != NULL; use = next_use)
9088 {
9089 next_use = use->next_insn_use;
9090 free (use);
9091 }
9092 for (set = data->reg_set_list; set != NULL; set = next_set)
9093 {
9094 next_set = set->next_insn_set;
9095 free (set);
9096 }
9097
9098 }
9099 h_i_d.release ();
9100 }
9101
9102 /* Init data for the new insn INSN. */
9103 static void
9104 haifa_init_insn (rtx_insn *insn)
9105 {
9106 gcc_assert (insn != NULL);
9107
9108 sched_extend_luids ();
9109 sched_init_insn_luid (insn);
9110 sched_extend_target ();
9111 sched_deps_init (false);
9112 extend_h_i_d ();
9113 init_h_i_d (insn);
9114
9115 if (adding_bb_to_current_region_p)
9116 {
9117 sd_init_insn (insn);
9118
9119 /* Extend dependency caches by one element. */
9120 extend_dependency_caches (1, false);
9121 }
9122 if (sched_pressure != SCHED_PRESSURE_NONE)
9123 init_insn_reg_pressure_info (insn);
9124 }
9125
9126 /* Init data for the new basic block BB which comes after AFTER. */
9127 static void
9128 haifa_init_only_bb (basic_block bb, basic_block after)
9129 {
9130 gcc_assert (bb != NULL);
9131
9132 sched_init_bbs ();
9133
9134 if (common_sched_info->add_block)
9135 /* This changes only data structures of the front-end. */
9136 common_sched_info->add_block (bb, after);
9137 }
9138
9139 /* A generic version of sched_split_block (). */
9140 basic_block
9141 sched_split_block_1 (basic_block first_bb, rtx after)
9142 {
9143 edge e;
9144
9145 e = split_block (first_bb, after);
9146 gcc_assert (e->src == first_bb);
9147
9148 /* sched_split_block emits note if *check == BB_END. Probably it
9149 is better to rip that note off. */
9150
9151 return e->dest;
9152 }
9153
9154 /* A generic version of sched_create_empty_bb (). */
9155 basic_block
9156 sched_create_empty_bb_1 (basic_block after)
9157 {
9158 return create_empty_bb (after);
9159 }
9160
9161 /* Insert PAT as an INSN into the schedule and update the necessary data
9162 structures to account for it. */
9163 rtx_insn *
9164 sched_emit_insn (rtx pat)
9165 {
9166 rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
9167 haifa_init_insn (insn);
9168
9169 if (current_sched_info->add_remove_insn)
9170 current_sched_info->add_remove_insn (insn, 0);
9171
9172 (*current_sched_info->begin_schedule_ready) (insn);
9173 scheduled_insns.safe_push (insn);
9174
9175 last_scheduled_insn = insn;
9176 return insn;
9177 }
9178
9179 /* This function returns a candidate satisfying dispatch constraints from
9180 the ready list. */
9181
9182 static rtx_insn *
9183 ready_remove_first_dispatch (struct ready_list *ready)
9184 {
9185 int i;
9186 rtx_insn *insn = ready_element (ready, 0);
9187
9188 if (ready->n_ready == 1
9189 || !INSN_P (insn)
9190 || INSN_CODE (insn) < 0
9191 || !active_insn_p (insn)
9192 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9193 return ready_remove_first (ready);
9194
9195 for (i = 1; i < ready->n_ready; i++)
9196 {
9197 insn = ready_element (ready, i);
9198
9199 if (!INSN_P (insn)
9200 || INSN_CODE (insn) < 0
9201 || !active_insn_p (insn))
9202 continue;
9203
9204 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
9205 {
9206 /* Return ith element of ready. */
9207 insn = ready_remove (ready, i);
9208 return insn;
9209 }
9210 }
9211
9212 if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
9213 return ready_remove_first (ready);
9214
9215 for (i = 1; i < ready->n_ready; i++)
9216 {
9217 insn = ready_element (ready, i);
9218
9219 if (!INSN_P (insn)
9220 || INSN_CODE (insn) < 0
9221 || !active_insn_p (insn))
9222 continue;
9223
9224 /* Return i-th element of ready. */
9225 if (targetm.sched.dispatch (insn, IS_CMP))
9226 return ready_remove (ready, i);
9227 }
9228
9229 return ready_remove_first (ready);
9230 }
9231
9232 /* Get number of ready insn in the ready list. */
9233
9234 int
9235 number_in_ready (void)
9236 {
9237 return ready.n_ready;
9238 }
9239
9240 /* Get number of ready's in the ready list. */
9241
9242 rtx_insn *
9243 get_ready_element (int i)
9244 {
9245 return ready_element (&ready, i);
9246 }
9247
9248 #endif /* INSN_SCHEDULING */