Account for prologue spills in reg_pressure scheduling
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Instruction scheduling pass. This file, along with sched-deps.c,
23 contains the generic parts. The actual entry point for
24 the normal instruction scheduling pass is found in sched-rgn.c.
25
26 We compute insn priorities based on data dependencies. Flow
27 analysis only creates a fraction of the data-dependencies we must
28 observe: namely, only those dependencies which the combiner can be
29 expected to use. For this pass, we must therefore create the
30 remaining dependencies we need to observe: register dependencies,
31 memory dependencies, dependencies to keep function calls in order,
32 and the dependence between a conditional branch and the setting of
33 condition codes are all dealt with here.
34
35 The scheduler first traverses the data flow graph, starting with
36 the last instruction, and proceeding to the first, assigning values
37 to insn_priority as it goes. This sorts the instructions
38 topologically by data dependence.
39
40 Once priorities have been established, we order the insns using
41 list scheduling. This works as follows: starting with a list of
42 all the ready insns, and sorted according to priority number, we
43 schedule the insn from the end of the list by placing its
44 predecessors in the list according to their priority order. We
45 consider this insn scheduled by setting the pointer to the "end" of
46 the list to point to the previous insn. When an insn has no
47 predecessors, we either queue it until sufficient time has elapsed
48 or add it to the ready list. As the instructions are scheduled or
49 when stalls are introduced, the queue advances and dumps insns into
50 the ready list. When all insns down to the lowest priority have
51 been scheduled, the critical path of the basic block has been made
52 as short as possible. The remaining insns are then scheduled in
53 remaining slots.
54
55 The following list shows the order in which we want to break ties
56 among insns in the ready list:
57
58 1. choose insn with the longest path to end of bb, ties
59 broken by
60 2. choose insn with least contribution to register pressure,
61 ties broken by
62 3. prefer in-block upon interblock motion, ties broken by
63 4. prefer useful upon speculative motion, ties broken by
64 5. choose insn with largest control flow probability, ties
65 broken by
66 6. choose insn with the least dependences upon the previously
67 scheduled insn, or finally
68 7 choose the insn which has the most insns dependent on it.
69 8. choose insn with lowest UID.
70
71 Memory references complicate matters. Only if we can be certain
72 that memory references are not part of the data dependency graph
73 (via true, anti, or output dependence), can we move operations past
74 memory references. To first approximation, reads can be done
75 independently, while writes introduce dependencies. Better
76 approximations will yield fewer dependencies.
77
78 Before reload, an extended analysis of interblock data dependences
79 is required for interblock scheduling. This is performed in
80 compute_block_dependences ().
81
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using insn backward dependences
84 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
85 INSN_FORW_DEPS for the purpose of forward list scheduling.
86
87 Having optimized the critical path, we may have also unduly
88 extended the lifetimes of some registers. If an operation requires
89 that constants be loaded into registers, it is certainly desirable
90 to load those constants as early as necessary, but no earlier.
91 I.e., it will not do to load up a bunch of registers at the
92 beginning of a basic block only to use them at the end, if they
93 could be loaded later, since this may result in excessive register
94 utilization.
95
96 Note that since branches are never in basic blocks, but only end
97 basic blocks, this pass will not move branches. But that is ok,
98 since we can use GNU's delayed branch scheduling pass to take care
99 of this case.
100
101 Also note that no further optimizations based on algebraic
102 identities are performed, so this pass would be a good one to
103 perform instruction splitting, such as breaking up a multiply
104 instruction into shifts and adds where that is profitable.
105
106 Given the memory aliasing analysis that this pass should perform,
107 it should be possible to remove redundant stores to memory, and to
108 load values from registers instead of hitting memory.
109
110 Before reload, speculative insns are moved only if a 'proof' exists
111 that no exception will be caused by this, and if no live registers
112 exist that inhibit the motion (live registers constraints are not
113 represented by data dependence edges).
114
115 This pass must update information that subsequent passes expect to
116 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
117 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
118
119 The information in the line number notes is carefully retained by
120 this pass. Notes that refer to the starting and ending of
121 exception regions are also carefully retained by this pass. All
122 other NOTE insns are grouped in their same relative order at the
123 beginning of basic blocks and regions that have been scheduled. */
124 \f
125 #include "config.h"
126 #include "system.h"
127 #include "coretypes.h"
128 #include "tm.h"
129 #include "diagnostic-core.h"
130 #include "hard-reg-set.h"
131 #include "rtl.h"
132 #include "tm_p.h"
133 #include "regs.h"
134 #include "hashtab.h"
135 #include "hash-set.h"
136 #include "vec.h"
137 #include "machmode.h"
138 #include "input.h"
139 #include "function.h"
140 #include "flags.h"
141 #include "insn-config.h"
142 #include "insn-attr.h"
143 #include "except.h"
144 #include "recog.h"
145 #include "sched-int.h"
146 #include "target.h"
147 #include "common/common-target.h"
148 #include "params.h"
149 #include "dbgcnt.h"
150 #include "cfgloop.h"
151 #include "ira.h"
152 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
153 #include "hash-table.h"
154 #include "dumpfile.h"
155
156 #ifdef INSN_SCHEDULING
157
158 /* True if we do register pressure relief through live-range
159 shrinkage. */
160 static bool live_range_shrinkage_p;
161
162 /* Switch on live range shrinkage. */
163 void
164 initialize_live_range_shrinkage (void)
165 {
166 live_range_shrinkage_p = true;
167 }
168
169 /* Switch off live range shrinkage. */
170 void
171 finish_live_range_shrinkage (void)
172 {
173 live_range_shrinkage_p = false;
174 }
175
176 /* issue_rate is the number of insns that can be scheduled in the same
177 machine cycle. It can be defined in the config/mach/mach.h file,
178 otherwise we set it to 1. */
179
180 int issue_rate;
181
182 /* This can be set to true by a backend if the scheduler should not
183 enable a DCE pass. */
184 bool sched_no_dce;
185
186 /* The current initiation interval used when modulo scheduling. */
187 static int modulo_ii;
188
189 /* The maximum number of stages we are prepared to handle. */
190 static int modulo_max_stages;
191
192 /* The number of insns that exist in each iteration of the loop. We use this
193 to detect when we've scheduled all insns from the first iteration. */
194 static int modulo_n_insns;
195
196 /* The current count of insns in the first iteration of the loop that have
197 already been scheduled. */
198 static int modulo_insns_scheduled;
199
200 /* The maximum uid of insns from the first iteration of the loop. */
201 static int modulo_iter0_max_uid;
202
203 /* The number of times we should attempt to backtrack when modulo scheduling.
204 Decreased each time we have to backtrack. */
205 static int modulo_backtracks_left;
206
207 /* The stage in which the last insn from the original loop was
208 scheduled. */
209 static int modulo_last_stage;
210
211 /* sched-verbose controls the amount of debugging output the
212 scheduler prints. It is controlled by -fsched-verbose=N:
213 N>0 and no -DSR : the output is directed to stderr.
214 N>=10 will direct the printouts to stderr (regardless of -dSR).
215 N=1: same as -dSR.
216 N=2: bb's probabilities, detailed ready list info, unit/insn info.
217 N=3: rtl at abort point, control-flow, regions info.
218 N=5: dependences info. */
219
220 int sched_verbose = 0;
221
222 /* Debugging file. All printouts are sent to dump, which is always set,
223 either to stderr, or to the dump listing file (-dRS). */
224 FILE *sched_dump = 0;
225
226 /* This is a placeholder for the scheduler parameters common
227 to all schedulers. */
228 struct common_sched_info_def *common_sched_info;
229
230 #define INSN_TICK(INSN) (HID (INSN)->tick)
231 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
232 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
233 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
234 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
235 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
236 #define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
237 /* Cached cost of the instruction. Use insn_cost to get cost of the
238 insn. -1 here means that the field is not initialized. */
239 #define INSN_COST(INSN) (HID (INSN)->cost)
240
241 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
242 then it should be recalculated from scratch. */
243 #define INVALID_TICK (-(max_insn_queue_index + 1))
244 /* The minimal value of the INSN_TICK of an instruction. */
245 #define MIN_TICK (-max_insn_queue_index)
246
247 /* List of important notes we must keep around. This is a pointer to the
248 last element in the list. */
249 rtx_insn *note_list;
250
251 static struct spec_info_def spec_info_var;
252 /* Description of the speculative part of the scheduling.
253 If NULL - no speculation. */
254 spec_info_t spec_info = NULL;
255
256 /* True, if recovery block was added during scheduling of current block.
257 Used to determine, if we need to fix INSN_TICKs. */
258 static bool haifa_recovery_bb_recently_added_p;
259
260 /* True, if recovery block was added during this scheduling pass.
261 Used to determine if we should have empty memory pools of dependencies
262 after finishing current region. */
263 bool haifa_recovery_bb_ever_added_p;
264
265 /* Counters of different types of speculative instructions. */
266 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
267
268 /* Array used in {unlink, restore}_bb_notes. */
269 static rtx_insn **bb_header = 0;
270
271 /* Basic block after which recovery blocks will be created. */
272 static basic_block before_recovery;
273
274 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
275 created it. */
276 basic_block after_recovery;
277
278 /* FALSE if we add bb to another region, so we don't need to initialize it. */
279 bool adding_bb_to_current_region_p = true;
280
281 /* Queues, etc. */
282
283 /* An instruction is ready to be scheduled when all insns preceding it
284 have already been scheduled. It is important to ensure that all
285 insns which use its result will not be executed until its result
286 has been computed. An insn is maintained in one of four structures:
287
288 (P) the "Pending" set of insns which cannot be scheduled until
289 their dependencies have been satisfied.
290 (Q) the "Queued" set of insns that can be scheduled when sufficient
291 time has passed.
292 (R) the "Ready" list of unscheduled, uncommitted insns.
293 (S) the "Scheduled" list of insns.
294
295 Initially, all insns are either "Pending" or "Ready" depending on
296 whether their dependencies are satisfied.
297
298 Insns move from the "Ready" list to the "Scheduled" list as they
299 are committed to the schedule. As this occurs, the insns in the
300 "Pending" list have their dependencies satisfied and move to either
301 the "Ready" list or the "Queued" set depending on whether
302 sufficient time has passed to make them ready. As time passes,
303 insns move from the "Queued" set to the "Ready" list.
304
305 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
306 unscheduled insns, i.e., those that are ready, queued, and pending.
307 The "Queued" set (Q) is implemented by the variable `insn_queue'.
308 The "Ready" list (R) is implemented by the variables `ready' and
309 `n_ready'.
310 The "Scheduled" list (S) is the new insn chain built by this pass.
311
312 The transition (R->S) is implemented in the scheduling loop in
313 `schedule_block' when the best insn to schedule is chosen.
314 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
315 insns move from the ready list to the scheduled list.
316 The transition (Q->R) is implemented in 'queue_to_insn' as time
317 passes or stalls are introduced. */
318
319 /* Implement a circular buffer to delay instructions until sufficient
320 time has passed. For the new pipeline description interface,
321 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
322 than maximal time of instruction execution computed by genattr.c on
323 the base maximal time of functional unit reservations and getting a
324 result. This is the longest time an insn may be queued. */
325
326 static rtx_insn_list **insn_queue;
327 static int q_ptr = 0;
328 static int q_size = 0;
329 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
330 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
331
332 #define QUEUE_SCHEDULED (-3)
333 #define QUEUE_NOWHERE (-2)
334 #define QUEUE_READY (-1)
335 /* QUEUE_SCHEDULED - INSN is scheduled.
336 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
337 queue or ready list.
338 QUEUE_READY - INSN is in ready list.
339 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
340
341 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
342
343 /* The following variable value refers for all current and future
344 reservations of the processor units. */
345 state_t curr_state;
346
347 /* The following variable value is size of memory representing all
348 current and future reservations of the processor units. */
349 size_t dfa_state_size;
350
351 /* The following array is used to find the best insn from ready when
352 the automaton pipeline interface is used. */
353 signed char *ready_try = NULL;
354
355 /* The ready list. */
356 struct ready_list ready = {NULL, 0, 0, 0, 0};
357
358 /* The pointer to the ready list (to be removed). */
359 static struct ready_list *readyp = &ready;
360
361 /* Scheduling clock. */
362 static int clock_var;
363
364 /* Clock at which the previous instruction was issued. */
365 static int last_clock_var;
366
367 /* Set to true if, when queuing a shadow insn, we discover that it would be
368 scheduled too late. */
369 static bool must_backtrack;
370
371 /* The following variable value is number of essential insns issued on
372 the current cycle. An insn is essential one if it changes the
373 processors state. */
374 int cycle_issued_insns;
375
376 /* This records the actual schedule. It is built up during the main phase
377 of schedule_block, and afterwards used to reorder the insns in the RTL. */
378 static vec<rtx_insn *> scheduled_insns;
379
380 static int may_trap_exp (const_rtx, int);
381
382 /* Nonzero iff the address is comprised from at most 1 register. */
383 #define CONST_BASED_ADDRESS_P(x) \
384 (REG_P (x) \
385 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
386 || (GET_CODE (x) == LO_SUM)) \
387 && (CONSTANT_P (XEXP (x, 0)) \
388 || CONSTANT_P (XEXP (x, 1)))))
389
390 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
391 as found by analyzing insn's expression. */
392
393 \f
394 static int haifa_luid_for_non_insn (rtx x);
395
396 /* Haifa version of sched_info hooks common to all headers. */
397 const struct common_sched_info_def haifa_common_sched_info =
398 {
399 NULL, /* fix_recovery_cfg */
400 NULL, /* add_block */
401 NULL, /* estimate_number_of_insns */
402 haifa_luid_for_non_insn, /* luid_for_non_insn */
403 SCHED_PASS_UNKNOWN /* sched_pass_id */
404 };
405
406 /* Mapping from instruction UID to its Logical UID. */
407 vec<int> sched_luids = vNULL;
408
409 /* Next LUID to assign to an instruction. */
410 int sched_max_luid = 1;
411
412 /* Haifa Instruction Data. */
413 vec<haifa_insn_data_def> h_i_d = vNULL;
414
415 void (* sched_init_only_bb) (basic_block, basic_block);
416
417 /* Split block function. Different schedulers might use different functions
418 to handle their internal data consistent. */
419 basic_block (* sched_split_block) (basic_block, rtx);
420
421 /* Create empty basic block after the specified block. */
422 basic_block (* sched_create_empty_bb) (basic_block);
423
424 /* Return the number of cycles until INSN is expected to be ready.
425 Return zero if it already is. */
426 static int
427 insn_delay (rtx_insn *insn)
428 {
429 return MAX (INSN_TICK (insn) - clock_var, 0);
430 }
431
432 static int
433 may_trap_exp (const_rtx x, int is_store)
434 {
435 enum rtx_code code;
436
437 if (x == 0)
438 return TRAP_FREE;
439 code = GET_CODE (x);
440 if (is_store)
441 {
442 if (code == MEM && may_trap_p (x))
443 return TRAP_RISKY;
444 else
445 return TRAP_FREE;
446 }
447 if (code == MEM)
448 {
449 /* The insn uses memory: a volatile load. */
450 if (MEM_VOLATILE_P (x))
451 return IRISKY;
452 /* An exception-free load. */
453 if (!may_trap_p (x))
454 return IFREE;
455 /* A load with 1 base register, to be further checked. */
456 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
457 return PFREE_CANDIDATE;
458 /* No info on the load, to be further checked. */
459 return PRISKY_CANDIDATE;
460 }
461 else
462 {
463 const char *fmt;
464 int i, insn_class = TRAP_FREE;
465
466 /* Neither store nor load, check if it may cause a trap. */
467 if (may_trap_p (x))
468 return TRAP_RISKY;
469 /* Recursive step: walk the insn... */
470 fmt = GET_RTX_FORMAT (code);
471 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
472 {
473 if (fmt[i] == 'e')
474 {
475 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
476 insn_class = WORST_CLASS (insn_class, tmp_class);
477 }
478 else if (fmt[i] == 'E')
479 {
480 int j;
481 for (j = 0; j < XVECLEN (x, i); j++)
482 {
483 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
484 insn_class = WORST_CLASS (insn_class, tmp_class);
485 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
486 break;
487 }
488 }
489 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
490 break;
491 }
492 return insn_class;
493 }
494 }
495
496 /* Classifies rtx X of an insn for the purpose of verifying that X can be
497 executed speculatively (and consequently the insn can be moved
498 speculatively), by examining X, returning:
499 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
500 TRAP_FREE: non-load insn.
501 IFREE: load from a globally safe location.
502 IRISKY: volatile load.
503 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
504 being either PFREE or PRISKY. */
505
506 static int
507 haifa_classify_rtx (const_rtx x)
508 {
509 int tmp_class = TRAP_FREE;
510 int insn_class = TRAP_FREE;
511 enum rtx_code code;
512
513 if (GET_CODE (x) == PARALLEL)
514 {
515 int i, len = XVECLEN (x, 0);
516
517 for (i = len - 1; i >= 0; i--)
518 {
519 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
520 insn_class = WORST_CLASS (insn_class, tmp_class);
521 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
522 break;
523 }
524 }
525 else
526 {
527 code = GET_CODE (x);
528 switch (code)
529 {
530 case CLOBBER:
531 /* Test if it is a 'store'. */
532 tmp_class = may_trap_exp (XEXP (x, 0), 1);
533 break;
534 case SET:
535 /* Test if it is a store. */
536 tmp_class = may_trap_exp (SET_DEST (x), 1);
537 if (tmp_class == TRAP_RISKY)
538 break;
539 /* Test if it is a load. */
540 tmp_class =
541 WORST_CLASS (tmp_class,
542 may_trap_exp (SET_SRC (x), 0));
543 break;
544 case COND_EXEC:
545 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
546 if (tmp_class == TRAP_RISKY)
547 break;
548 tmp_class = WORST_CLASS (tmp_class,
549 may_trap_exp (COND_EXEC_TEST (x), 0));
550 break;
551 case TRAP_IF:
552 tmp_class = TRAP_RISKY;
553 break;
554 default:;
555 }
556 insn_class = tmp_class;
557 }
558
559 return insn_class;
560 }
561
562 int
563 haifa_classify_insn (const_rtx insn)
564 {
565 return haifa_classify_rtx (PATTERN (insn));
566 }
567 \f
568 /* After the scheduler initialization function has been called, this function
569 can be called to enable modulo scheduling. II is the initiation interval
570 we should use, it affects the delays for delay_pairs that were recorded as
571 separated by a given number of stages.
572
573 MAX_STAGES provides us with a limit
574 after which we give up scheduling; the caller must have unrolled at least
575 as many copies of the loop body and recorded delay_pairs for them.
576
577 INSNS is the number of real (non-debug) insns in one iteration of
578 the loop. MAX_UID can be used to test whether an insn belongs to
579 the first iteration of the loop; all of them have a uid lower than
580 MAX_UID. */
581 void
582 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
583 {
584 modulo_ii = ii;
585 modulo_max_stages = max_stages;
586 modulo_n_insns = insns;
587 modulo_iter0_max_uid = max_uid;
588 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
589 }
590
591 /* A structure to record a pair of insns where the first one is a real
592 insn that has delay slots, and the second is its delayed shadow.
593 I1 is scheduled normally and will emit an assembly instruction,
594 while I2 describes the side effect that takes place at the
595 transition between cycles CYCLES and (CYCLES + 1) after I1. */
596 struct delay_pair
597 {
598 struct delay_pair *next_same_i1;
599 rtx_insn *i1, *i2;
600 int cycles;
601 /* When doing modulo scheduling, we a delay_pair can also be used to
602 show that I1 and I2 are the same insn in a different stage. If that
603 is the case, STAGES will be nonzero. */
604 int stages;
605 };
606
607 /* Helpers for delay hashing. */
608
609 struct delay_i1_hasher : typed_noop_remove <delay_pair>
610 {
611 typedef delay_pair value_type;
612 typedef void compare_type;
613 static inline hashval_t hash (const value_type *);
614 static inline bool equal (const value_type *, const compare_type *);
615 };
616
617 /* Returns a hash value for X, based on hashing just I1. */
618
619 inline hashval_t
620 delay_i1_hasher::hash (const value_type *x)
621 {
622 return htab_hash_pointer (x->i1);
623 }
624
625 /* Return true if I1 of pair X is the same as that of pair Y. */
626
627 inline bool
628 delay_i1_hasher::equal (const value_type *x, const compare_type *y)
629 {
630 return x->i1 == y;
631 }
632
633 struct delay_i2_hasher : typed_free_remove <delay_pair>
634 {
635 typedef delay_pair value_type;
636 typedef void compare_type;
637 static inline hashval_t hash (const value_type *);
638 static inline bool equal (const value_type *, const compare_type *);
639 };
640
641 /* Returns a hash value for X, based on hashing just I2. */
642
643 inline hashval_t
644 delay_i2_hasher::hash (const value_type *x)
645 {
646 return htab_hash_pointer (x->i2);
647 }
648
649 /* Return true if I2 of pair X is the same as that of pair Y. */
650
651 inline bool
652 delay_i2_hasher::equal (const value_type *x, const compare_type *y)
653 {
654 return x->i2 == y;
655 }
656
657 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
658 indexed by I2. */
659 static hash_table<delay_i1_hasher> *delay_htab;
660 static hash_table<delay_i2_hasher> *delay_htab_i2;
661
662 /* Called through htab_traverse. Walk the hashtable using I2 as
663 index, and delete all elements involving an UID higher than
664 that pointed to by *DATA. */
665 int
666 haifa_htab_i2_traverse (delay_pair **slot, int *data)
667 {
668 int maxuid = *data;
669 struct delay_pair *p = *slot;
670 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
671 {
672 delay_htab_i2->clear_slot (slot);
673 }
674 return 1;
675 }
676
677 /* Called through htab_traverse. Walk the hashtable using I2 as
678 index, and delete all elements involving an UID higher than
679 that pointed to by *DATA. */
680 int
681 haifa_htab_i1_traverse (delay_pair **pslot, int *data)
682 {
683 int maxuid = *data;
684 struct delay_pair *p, *first, **pprev;
685
686 if (INSN_UID ((*pslot)->i1) >= maxuid)
687 {
688 delay_htab->clear_slot (pslot);
689 return 1;
690 }
691 pprev = &first;
692 for (p = *pslot; p; p = p->next_same_i1)
693 {
694 if (INSN_UID (p->i2) < maxuid)
695 {
696 *pprev = p;
697 pprev = &p->next_same_i1;
698 }
699 }
700 *pprev = NULL;
701 if (first == NULL)
702 delay_htab->clear_slot (pslot);
703 else
704 *pslot = first;
705 return 1;
706 }
707
708 /* Discard all delay pairs which involve an insn with an UID higher
709 than MAX_UID. */
710 void
711 discard_delay_pairs_above (int max_uid)
712 {
713 delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
714 delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
715 }
716
717 /* This function can be called by a port just before it starts the final
718 scheduling pass. It records the fact that an instruction with delay
719 slots has been split into two insns, I1 and I2. The first one will be
720 scheduled normally and initiates the operation. The second one is a
721 shadow which must follow a specific number of cycles after I1; its only
722 purpose is to show the side effect that occurs at that cycle in the RTL.
723 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
724 while I2 retains the original insn type.
725
726 There are two ways in which the number of cycles can be specified,
727 involving the CYCLES and STAGES arguments to this function. If STAGES
728 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
729 which is multiplied by MODULO_II to give the number of cycles. This is
730 only useful if the caller also calls set_modulo_params to enable modulo
731 scheduling. */
732
733 void
734 record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
735 {
736 struct delay_pair *p = XNEW (struct delay_pair);
737 struct delay_pair **slot;
738
739 p->i1 = i1;
740 p->i2 = i2;
741 p->cycles = cycles;
742 p->stages = stages;
743
744 if (!delay_htab)
745 {
746 delay_htab = new hash_table<delay_i1_hasher> (10);
747 delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
748 }
749 slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
750 p->next_same_i1 = *slot;
751 *slot = p;
752 slot = delay_htab_i2->find_slot (p, INSERT);
753 *slot = p;
754 }
755
756 /* Examine the delay pair hashtable to see if INSN is a shadow for another,
757 and return the other insn if so. Return NULL otherwise. */
758 rtx_insn *
759 real_insn_for_shadow (rtx_insn *insn)
760 {
761 struct delay_pair *pair;
762
763 if (!delay_htab)
764 return NULL;
765
766 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
767 if (!pair || pair->stages > 0)
768 return NULL;
769 return pair->i1;
770 }
771
772 /* For a pair P of insns, return the fixed distance in cycles from the first
773 insn after which the second must be scheduled. */
774 static int
775 pair_delay (struct delay_pair *p)
776 {
777 if (p->stages == 0)
778 return p->cycles;
779 else
780 return p->stages * modulo_ii;
781 }
782
783 /* Given an insn INSN, add a dependence on its delayed shadow if it
784 has one. Also try to find situations where shadows depend on each other
785 and add dependencies to the real insns to limit the amount of backtracking
786 needed. */
787 void
788 add_delay_dependencies (rtx_insn *insn)
789 {
790 struct delay_pair *pair;
791 sd_iterator_def sd_it;
792 dep_t dep;
793
794 if (!delay_htab)
795 return;
796
797 pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
798 if (!pair)
799 return;
800 add_dependence (insn, pair->i1, REG_DEP_ANTI);
801 if (pair->stages)
802 return;
803
804 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
805 {
806 rtx_insn *pro = DEP_PRO (dep);
807 struct delay_pair *other_pair
808 = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
809 if (!other_pair || other_pair->stages)
810 continue;
811 if (pair_delay (other_pair) >= pair_delay (pair))
812 {
813 if (sched_verbose >= 4)
814 {
815 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
816 INSN_UID (other_pair->i1),
817 INSN_UID (pair->i1));
818 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
819 INSN_UID (pair->i1),
820 INSN_UID (pair->i2),
821 pair_delay (pair));
822 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
823 INSN_UID (other_pair->i1),
824 INSN_UID (other_pair->i2),
825 pair_delay (other_pair));
826 }
827 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
828 }
829 }
830 }
831 \f
832 /* Forward declarations. */
833
834 static int priority (rtx_insn *);
835 static int rank_for_schedule (const void *, const void *);
836 static void swap_sort (rtx_insn **, int);
837 static void queue_insn (rtx_insn *, int, const char *);
838 static int schedule_insn (rtx_insn *);
839 static void adjust_priority (rtx_insn *);
840 static void advance_one_cycle (void);
841 static void extend_h_i_d (void);
842
843
844 /* Notes handling mechanism:
845 =========================
846 Generally, NOTES are saved before scheduling and restored after scheduling.
847 The scheduler distinguishes between two types of notes:
848
849 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
850 Before scheduling a region, a pointer to the note is added to the insn
851 that follows or precedes it. (This happens as part of the data dependence
852 computation). After scheduling an insn, the pointer contained in it is
853 used for regenerating the corresponding note (in reemit_notes).
854
855 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
856 these notes are put in a list (in rm_other_notes() and
857 unlink_other_notes ()). After scheduling the block, these notes are
858 inserted at the beginning of the block (in schedule_block()). */
859
860 static void ready_add (struct ready_list *, rtx_insn *, bool);
861 static rtx_insn *ready_remove_first (struct ready_list *);
862 static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
863
864 static void queue_to_ready (struct ready_list *);
865 static int early_queue_to_ready (state_t, struct ready_list *);
866
867 /* The following functions are used to implement multi-pass scheduling
868 on the first cycle. */
869 static rtx_insn *ready_remove (struct ready_list *, int);
870 static void ready_remove_insn (rtx);
871
872 static void fix_inter_tick (rtx_insn *, rtx_insn *);
873 static int fix_tick_ready (rtx_insn *);
874 static void change_queue_index (rtx_insn *, int);
875
876 /* The following functions are used to implement scheduling of data/control
877 speculative instructions. */
878
879 static void extend_h_i_d (void);
880 static void init_h_i_d (rtx_insn *);
881 static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
882 static void generate_recovery_code (rtx_insn *);
883 static void process_insn_forw_deps_be_in_spec (rtx, rtx_insn *, ds_t);
884 static void begin_speculative_block (rtx_insn *);
885 static void add_to_speculative_block (rtx_insn *);
886 static void init_before_recovery (basic_block *);
887 static void create_check_block_twin (rtx_insn *, bool);
888 static void fix_recovery_deps (basic_block);
889 static bool haifa_change_pattern (rtx_insn *, rtx);
890 static void dump_new_block_header (int, basic_block, rtx_insn *, rtx_insn *);
891 static void restore_bb_notes (basic_block);
892 static void fix_jump_move (rtx_insn *);
893 static void move_block_after_check (rtx_insn *);
894 static void move_succs (vec<edge, va_gc> **, basic_block);
895 static void sched_remove_insn (rtx_insn *);
896 static void clear_priorities (rtx_insn *, rtx_vec_t *);
897 static void calc_priorities (rtx_vec_t);
898 static void add_jump_dependencies (rtx_insn *, rtx_insn *);
899
900 #endif /* INSN_SCHEDULING */
901 \f
902 /* Point to state used for the current scheduling pass. */
903 struct haifa_sched_info *current_sched_info;
904 \f
905 #ifndef INSN_SCHEDULING
906 void
907 schedule_insns (void)
908 {
909 }
910 #else
911
912 /* Do register pressure sensitive insn scheduling if the flag is set
913 up. */
914 enum sched_pressure_algorithm sched_pressure;
915
916 /* Map regno -> its pressure class. The map defined only when
917 SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
918 enum reg_class *sched_regno_pressure_class;
919
920 /* The current register pressure. Only elements corresponding pressure
921 classes are defined. */
922 static int curr_reg_pressure[N_REG_CLASSES];
923
924 /* Saved value of the previous array. */
925 static int saved_reg_pressure[N_REG_CLASSES];
926
927 /* Register living at given scheduling point. */
928 static bitmap curr_reg_live;
929
930 /* Saved value of the previous array. */
931 static bitmap saved_reg_live;
932
933 /* Registers mentioned in the current region. */
934 static bitmap region_ref_regs;
935
936 /* Effective number of available registers of a given class (see comment
937 in sched_pressure_start_bb). */
938 static int sched_class_regs_num[N_REG_CLASSES];
939 /* Number of call_used_regs. This is a helper for calculating of
940 sched_class_regs_num. */
941 static int call_used_regs_num[N_REG_CLASSES];
942
943 /* Initiate register pressure relative info for scheduling the current
944 region. Currently it is only clearing register mentioned in the
945 current region. */
946 void
947 sched_init_region_reg_pressure_info (void)
948 {
949 bitmap_clear (region_ref_regs);
950 }
951
952 /* PRESSURE[CL] describes the pressure on register class CL. Update it
953 for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
954 LIVE tracks the set of live registers; if it is null, assume that
955 every birth or death is genuine. */
956 static inline void
957 mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
958 {
959 enum reg_class pressure_class;
960
961 pressure_class = sched_regno_pressure_class[regno];
962 if (regno >= FIRST_PSEUDO_REGISTER)
963 {
964 if (pressure_class != NO_REGS)
965 {
966 if (birth_p)
967 {
968 if (!live || bitmap_set_bit (live, regno))
969 pressure[pressure_class]
970 += (ira_reg_class_max_nregs
971 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
972 }
973 else
974 {
975 if (!live || bitmap_clear_bit (live, regno))
976 pressure[pressure_class]
977 -= (ira_reg_class_max_nregs
978 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
979 }
980 }
981 }
982 else if (pressure_class != NO_REGS
983 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
984 {
985 if (birth_p)
986 {
987 if (!live || bitmap_set_bit (live, regno))
988 pressure[pressure_class]++;
989 }
990 else
991 {
992 if (!live || bitmap_clear_bit (live, regno))
993 pressure[pressure_class]--;
994 }
995 }
996 }
997
998 /* Initiate current register pressure related info from living
999 registers given by LIVE. */
1000 static void
1001 initiate_reg_pressure_info (bitmap live)
1002 {
1003 int i;
1004 unsigned int j;
1005 bitmap_iterator bi;
1006
1007 for (i = 0; i < ira_pressure_classes_num; i++)
1008 curr_reg_pressure[ira_pressure_classes[i]] = 0;
1009 bitmap_clear (curr_reg_live);
1010 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
1011 if (sched_pressure == SCHED_PRESSURE_MODEL
1012 || current_nr_blocks == 1
1013 || bitmap_bit_p (region_ref_regs, j))
1014 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
1015 }
1016
1017 /* Mark registers in X as mentioned in the current region. */
1018 static void
1019 setup_ref_regs (rtx x)
1020 {
1021 int i, j, regno;
1022 const RTX_CODE code = GET_CODE (x);
1023 const char *fmt;
1024
1025 if (REG_P (x))
1026 {
1027 regno = REGNO (x);
1028 if (HARD_REGISTER_NUM_P (regno))
1029 bitmap_set_range (region_ref_regs, regno,
1030 hard_regno_nregs[regno][GET_MODE (x)]);
1031 else
1032 bitmap_set_bit (region_ref_regs, REGNO (x));
1033 return;
1034 }
1035 fmt = GET_RTX_FORMAT (code);
1036 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1037 if (fmt[i] == 'e')
1038 setup_ref_regs (XEXP (x, i));
1039 else if (fmt[i] == 'E')
1040 {
1041 for (j = 0; j < XVECLEN (x, i); j++)
1042 setup_ref_regs (XVECEXP (x, i, j));
1043 }
1044 }
1045
1046 /* Initiate current register pressure related info at the start of
1047 basic block BB. */
1048 static void
1049 initiate_bb_reg_pressure_info (basic_block bb)
1050 {
1051 unsigned int i ATTRIBUTE_UNUSED;
1052 rtx_insn *insn;
1053
1054 if (current_nr_blocks > 1)
1055 FOR_BB_INSNS (bb, insn)
1056 if (NONDEBUG_INSN_P (insn))
1057 setup_ref_regs (PATTERN (insn));
1058 initiate_reg_pressure_info (df_get_live_in (bb));
1059 #ifdef EH_RETURN_DATA_REGNO
1060 if (bb_has_eh_pred (bb))
1061 for (i = 0; ; ++i)
1062 {
1063 unsigned int regno = EH_RETURN_DATA_REGNO (i);
1064
1065 if (regno == INVALID_REGNUM)
1066 break;
1067 if (! bitmap_bit_p (df_get_live_in (bb), regno))
1068 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
1069 regno, true);
1070 }
1071 #endif
1072 }
1073
1074 /* Save current register pressure related info. */
1075 static void
1076 save_reg_pressure (void)
1077 {
1078 int i;
1079
1080 for (i = 0; i < ira_pressure_classes_num; i++)
1081 saved_reg_pressure[ira_pressure_classes[i]]
1082 = curr_reg_pressure[ira_pressure_classes[i]];
1083 bitmap_copy (saved_reg_live, curr_reg_live);
1084 }
1085
1086 /* Restore saved register pressure related info. */
1087 static void
1088 restore_reg_pressure (void)
1089 {
1090 int i;
1091
1092 for (i = 0; i < ira_pressure_classes_num; i++)
1093 curr_reg_pressure[ira_pressure_classes[i]]
1094 = saved_reg_pressure[ira_pressure_classes[i]];
1095 bitmap_copy (curr_reg_live, saved_reg_live);
1096 }
1097
1098 /* Return TRUE if the register is dying after its USE. */
1099 static bool
1100 dying_use_p (struct reg_use_data *use)
1101 {
1102 struct reg_use_data *next;
1103
1104 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1105 if (NONDEBUG_INSN_P (next->insn)
1106 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1107 return false;
1108 return true;
1109 }
1110
1111 /* Print info about the current register pressure and its excess for
1112 each pressure class. */
1113 static void
1114 print_curr_reg_pressure (void)
1115 {
1116 int i;
1117 enum reg_class cl;
1118
1119 fprintf (sched_dump, ";;\t");
1120 for (i = 0; i < ira_pressure_classes_num; i++)
1121 {
1122 cl = ira_pressure_classes[i];
1123 gcc_assert (curr_reg_pressure[cl] >= 0);
1124 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1125 curr_reg_pressure[cl],
1126 curr_reg_pressure[cl] - sched_class_regs_num[cl]);
1127 }
1128 fprintf (sched_dump, "\n");
1129 }
1130 \f
1131 /* Determine if INSN has a condition that is clobbered if a register
1132 in SET_REGS is modified. */
1133 static bool
1134 cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
1135 {
1136 rtx pat = PATTERN (insn);
1137 gcc_assert (GET_CODE (pat) == COND_EXEC);
1138 if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
1139 {
1140 sd_iterator_def sd_it;
1141 dep_t dep;
1142 haifa_change_pattern (insn, ORIG_PAT (insn));
1143 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1144 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1145 TODO_SPEC (insn) = HARD_DEP;
1146 if (sched_verbose >= 2)
1147 fprintf (sched_dump,
1148 ";;\t\tdequeue insn %s because of clobbered condition\n",
1149 (*current_sched_info->print_insn) (insn, 0));
1150 return true;
1151 }
1152
1153 return false;
1154 }
1155
1156 /* This function should be called after modifying the pattern of INSN,
1157 to update scheduler data structures as needed. */
1158 static void
1159 update_insn_after_change (rtx_insn *insn)
1160 {
1161 sd_iterator_def sd_it;
1162 dep_t dep;
1163
1164 dfa_clear_single_insn_cache (insn);
1165
1166 sd_it = sd_iterator_start (insn,
1167 SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
1168 while (sd_iterator_cond (&sd_it, &dep))
1169 {
1170 DEP_COST (dep) = UNKNOWN_DEP_COST;
1171 sd_iterator_next (&sd_it);
1172 }
1173
1174 /* Invalidate INSN_COST, so it'll be recalculated. */
1175 INSN_COST (insn) = -1;
1176 /* Invalidate INSN_TICK, so it'll be recalculated. */
1177 INSN_TICK (insn) = INVALID_TICK;
1178 }
1179
1180
1181 /* Two VECs, one to hold dependencies for which pattern replacements
1182 need to be applied or restored at the start of the next cycle, and
1183 another to hold an integer that is either one, to apply the
1184 corresponding replacement, or zero to restore it. */
1185 static vec<dep_t> next_cycle_replace_deps;
1186 static vec<int> next_cycle_apply;
1187
1188 static void apply_replacement (dep_t, bool);
1189 static void restore_pattern (dep_t, bool);
1190
1191 /* Look at the remaining dependencies for insn NEXT, and compute and return
1192 the TODO_SPEC value we should use for it. This is called after one of
1193 NEXT's dependencies has been resolved.
1194 We also perform pattern replacements for predication, and for broken
1195 replacement dependencies. The latter is only done if FOR_BACKTRACK is
1196 false. */
1197
1198 static ds_t
1199 recompute_todo_spec (rtx_insn *next, bool for_backtrack)
1200 {
1201 ds_t new_ds;
1202 sd_iterator_def sd_it;
1203 dep_t dep, modify_dep = NULL;
1204 int n_spec = 0;
1205 int n_control = 0;
1206 int n_replace = 0;
1207 bool first_p = true;
1208
1209 if (sd_lists_empty_p (next, SD_LIST_BACK))
1210 /* NEXT has all its dependencies resolved. */
1211 return 0;
1212
1213 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
1214 return HARD_DEP;
1215
1216 /* Now we've got NEXT with speculative deps only.
1217 1. Look at the deps to see what we have to do.
1218 2. Check if we can do 'todo'. */
1219 new_ds = 0;
1220
1221 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1222 {
1223 rtx_insn *pro = DEP_PRO (dep);
1224 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
1225
1226 if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
1227 continue;
1228
1229 if (ds)
1230 {
1231 n_spec++;
1232 if (first_p)
1233 {
1234 first_p = false;
1235
1236 new_ds = ds;
1237 }
1238 else
1239 new_ds = ds_merge (new_ds, ds);
1240 }
1241 else if (DEP_TYPE (dep) == REG_DEP_CONTROL)
1242 {
1243 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1244 {
1245 n_control++;
1246 modify_dep = dep;
1247 }
1248 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1249 }
1250 else if (DEP_REPLACE (dep) != NULL)
1251 {
1252 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED)
1253 {
1254 n_replace++;
1255 modify_dep = dep;
1256 }
1257 DEP_STATUS (dep) &= ~DEP_CANCELLED;
1258 }
1259 }
1260
1261 if (n_replace > 0 && n_control == 0 && n_spec == 0)
1262 {
1263 if (!dbg_cnt (sched_breakdep))
1264 return HARD_DEP;
1265 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
1266 {
1267 struct dep_replacement *desc = DEP_REPLACE (dep);
1268 if (desc != NULL)
1269 {
1270 if (desc->insn == next && !for_backtrack)
1271 {
1272 gcc_assert (n_replace == 1);
1273 apply_replacement (dep, true);
1274 }
1275 DEP_STATUS (dep) |= DEP_CANCELLED;
1276 }
1277 }
1278 return 0;
1279 }
1280
1281 else if (n_control == 1 && n_replace == 0 && n_spec == 0)
1282 {
1283 rtx_insn *pro, *other;
1284 rtx new_pat;
1285 rtx cond = NULL_RTX;
1286 bool success;
1287 rtx_insn *prev = NULL;
1288 int i;
1289 unsigned regno;
1290
1291 if ((current_sched_info->flags & DO_PREDICATION) == 0
1292 || (ORIG_PAT (next) != NULL_RTX
1293 && PREDICATED_PAT (next) == NULL_RTX))
1294 return HARD_DEP;
1295
1296 pro = DEP_PRO (modify_dep);
1297 other = real_insn_for_shadow (pro);
1298 if (other != NULL_RTX)
1299 pro = other;
1300
1301 cond = sched_get_reverse_condition_uncached (pro);
1302 regno = REGNO (XEXP (cond, 0));
1303
1304 /* Find the last scheduled insn that modifies the condition register.
1305 We can stop looking once we find the insn we depend on through the
1306 REG_DEP_CONTROL; if the condition register isn't modified after it,
1307 we know that it still has the right value. */
1308 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
1309 FOR_EACH_VEC_ELT_REVERSE (scheduled_insns, i, prev)
1310 {
1311 HARD_REG_SET t;
1312
1313 find_all_hard_reg_sets (prev, &t, true);
1314 if (TEST_HARD_REG_BIT (t, regno))
1315 return HARD_DEP;
1316 if (prev == pro)
1317 break;
1318 }
1319 if (ORIG_PAT (next) == NULL_RTX)
1320 {
1321 ORIG_PAT (next) = PATTERN (next);
1322
1323 new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
1324 success = haifa_change_pattern (next, new_pat);
1325 if (!success)
1326 return HARD_DEP;
1327 PREDICATED_PAT (next) = new_pat;
1328 }
1329 else if (PATTERN (next) != PREDICATED_PAT (next))
1330 {
1331 bool success = haifa_change_pattern (next,
1332 PREDICATED_PAT (next));
1333 gcc_assert (success);
1334 }
1335 DEP_STATUS (modify_dep) |= DEP_CANCELLED;
1336 return DEP_CONTROL;
1337 }
1338
1339 if (PREDICATED_PAT (next) != NULL_RTX)
1340 {
1341 int tick = INSN_TICK (next);
1342 bool success = haifa_change_pattern (next,
1343 ORIG_PAT (next));
1344 INSN_TICK (next) = tick;
1345 gcc_assert (success);
1346 }
1347
1348 /* We can't handle the case where there are both speculative and control
1349 dependencies, so we return HARD_DEP in such a case. Also fail if
1350 we have speculative dependencies with not enough points, or more than
1351 one control dependency. */
1352 if ((n_spec > 0 && (n_control > 0 || n_replace > 0))
1353 || (n_spec > 0
1354 /* Too few points? */
1355 && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
1356 || n_control > 0
1357 || n_replace > 0)
1358 return HARD_DEP;
1359
1360 return new_ds;
1361 }
1362 \f
1363 /* Pointer to the last instruction scheduled. */
1364 static rtx_insn *last_scheduled_insn;
1365
1366 /* Pointer to the last nondebug instruction scheduled within the
1367 block, or the prev_head of the scheduling block. Used by
1368 rank_for_schedule, so that insns independent of the last scheduled
1369 insn will be preferred over dependent instructions. */
1370 static rtx last_nondebug_scheduled_insn;
1371
1372 /* Pointer that iterates through the list of unscheduled insns if we
1373 have a dbg_cnt enabled. It always points at an insn prior to the
1374 first unscheduled one. */
1375 static rtx_insn *nonscheduled_insns_begin;
1376
1377 /* Compute cost of executing INSN.
1378 This is the number of cycles between instruction issue and
1379 instruction results. */
1380 int
1381 insn_cost (rtx_insn *insn)
1382 {
1383 int cost;
1384
1385 if (sel_sched_p ())
1386 {
1387 if (recog_memoized (insn) < 0)
1388 return 0;
1389
1390 cost = insn_default_latency (insn);
1391 if (cost < 0)
1392 cost = 0;
1393
1394 return cost;
1395 }
1396
1397 cost = INSN_COST (insn);
1398
1399 if (cost < 0)
1400 {
1401 /* A USE insn, or something else we don't need to
1402 understand. We can't pass these directly to
1403 result_ready_cost or insn_default_latency because it will
1404 trigger a fatal error for unrecognizable insns. */
1405 if (recog_memoized (insn) < 0)
1406 {
1407 INSN_COST (insn) = 0;
1408 return 0;
1409 }
1410 else
1411 {
1412 cost = insn_default_latency (insn);
1413 if (cost < 0)
1414 cost = 0;
1415
1416 INSN_COST (insn) = cost;
1417 }
1418 }
1419
1420 return cost;
1421 }
1422
1423 /* Compute cost of dependence LINK.
1424 This is the number of cycles between instruction issue and
1425 instruction results.
1426 ??? We also use this function to call recog_memoized on all insns. */
1427 int
1428 dep_cost_1 (dep_t link, dw_t dw)
1429 {
1430 rtx_insn *insn = DEP_PRO (link);
1431 rtx_insn *used = DEP_CON (link);
1432 int cost;
1433
1434 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1435 return DEP_COST (link);
1436
1437 if (delay_htab)
1438 {
1439 struct delay_pair *delay_entry;
1440 delay_entry
1441 = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
1442 if (delay_entry)
1443 {
1444 if (delay_entry->i1 == insn)
1445 {
1446 DEP_COST (link) = pair_delay (delay_entry);
1447 return DEP_COST (link);
1448 }
1449 }
1450 }
1451
1452 /* A USE insn should never require the value used to be computed.
1453 This allows the computation of a function's result and parameter
1454 values to overlap the return and call. We don't care about the
1455 dependence cost when only decreasing register pressure. */
1456 if (recog_memoized (used) < 0)
1457 {
1458 cost = 0;
1459 recog_memoized (insn);
1460 }
1461 else
1462 {
1463 enum reg_note dep_type = DEP_TYPE (link);
1464
1465 cost = insn_cost (insn);
1466
1467 if (INSN_CODE (insn) >= 0)
1468 {
1469 if (dep_type == REG_DEP_ANTI)
1470 cost = 0;
1471 else if (dep_type == REG_DEP_OUTPUT)
1472 {
1473 cost = (insn_default_latency (insn)
1474 - insn_default_latency (used));
1475 if (cost <= 0)
1476 cost = 1;
1477 }
1478 else if (bypass_p (insn))
1479 cost = insn_latency (insn, used);
1480 }
1481
1482
1483 if (targetm.sched.adjust_cost_2)
1484 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1485 dw);
1486 else if (targetm.sched.adjust_cost != NULL)
1487 {
1488 /* This variable is used for backward compatibility with the
1489 targets. */
1490 rtx_insn_list *dep_cost_rtx_link =
1491 alloc_INSN_LIST (NULL_RTX, NULL);
1492
1493 /* Make it self-cycled, so that if some tries to walk over this
1494 incomplete list he/she will be caught in an endless loop. */
1495 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1496
1497 /* Targets use only REG_NOTE_KIND of the link. */
1498 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1499
1500 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1501 insn, cost);
1502
1503 free_INSN_LIST_node (dep_cost_rtx_link);
1504 }
1505
1506 if (cost < 0)
1507 cost = 0;
1508 }
1509
1510 DEP_COST (link) = cost;
1511 return cost;
1512 }
1513
1514 /* Compute cost of dependence LINK.
1515 This is the number of cycles between instruction issue and
1516 instruction results. */
1517 int
1518 dep_cost (dep_t link)
1519 {
1520 return dep_cost_1 (link, 0);
1521 }
1522
1523 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1524 INSN_PRIORITY explicitly. */
1525 void
1526 increase_insn_priority (rtx_insn *insn, int amount)
1527 {
1528 if (!sel_sched_p ())
1529 {
1530 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1531 if (INSN_PRIORITY_KNOWN (insn))
1532 INSN_PRIORITY (insn) += amount;
1533 }
1534 else
1535 {
1536 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1537 Use EXPR_PRIORITY instead. */
1538 sel_add_to_insn_priority (insn, amount);
1539 }
1540 }
1541
1542 /* Return 'true' if DEP should be included in priority calculations. */
1543 static bool
1544 contributes_to_priority_p (dep_t dep)
1545 {
1546 if (DEBUG_INSN_P (DEP_CON (dep))
1547 || DEBUG_INSN_P (DEP_PRO (dep)))
1548 return false;
1549
1550 /* Critical path is meaningful in block boundaries only. */
1551 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1552 DEP_PRO (dep)))
1553 return false;
1554
1555 if (DEP_REPLACE (dep) != NULL)
1556 return false;
1557
1558 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1559 then speculative instructions will less likely be
1560 scheduled. That is because the priority of
1561 their producers will increase, and, thus, the
1562 producers will more likely be scheduled, thus,
1563 resolving the dependence. */
1564 if (sched_deps_info->generate_spec_deps
1565 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1566 && (DEP_STATUS (dep) & SPECULATIVE))
1567 return false;
1568
1569 return true;
1570 }
1571
1572 /* Compute the number of nondebug deps in list LIST for INSN. */
1573
1574 static int
1575 dep_list_size (rtx insn, sd_list_types_def list)
1576 {
1577 sd_iterator_def sd_it;
1578 dep_t dep;
1579 int dbgcount = 0, nodbgcount = 0;
1580
1581 if (!MAY_HAVE_DEBUG_INSNS)
1582 return sd_lists_size (insn, list);
1583
1584 FOR_EACH_DEP (insn, list, sd_it, dep)
1585 {
1586 if (DEBUG_INSN_P (DEP_CON (dep)))
1587 dbgcount++;
1588 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1589 nodbgcount++;
1590 }
1591
1592 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
1593
1594 return nodbgcount;
1595 }
1596
1597 /* Compute the priority number for INSN. */
1598 static int
1599 priority (rtx_insn *insn)
1600 {
1601 if (! INSN_P (insn))
1602 return 0;
1603
1604 /* We should not be interested in priority of an already scheduled insn. */
1605 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1606
1607 if (!INSN_PRIORITY_KNOWN (insn))
1608 {
1609 int this_priority = -1;
1610
1611 if (dep_list_size (insn, SD_LIST_FORW) == 0)
1612 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1613 some forward deps but all of them are ignored by
1614 contributes_to_priority hook. At the moment we set priority of
1615 such insn to 0. */
1616 this_priority = insn_cost (insn);
1617 else
1618 {
1619 rtx_insn *prev_first, *twin;
1620 basic_block rec;
1621
1622 /* For recovery check instructions we calculate priority slightly
1623 different than that of normal instructions. Instead of walking
1624 through INSN_FORW_DEPS (check) list, we walk through
1625 INSN_FORW_DEPS list of each instruction in the corresponding
1626 recovery block. */
1627
1628 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1629 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1630 if (!rec || rec == EXIT_BLOCK_PTR_FOR_FN (cfun))
1631 {
1632 prev_first = PREV_INSN (insn);
1633 twin = insn;
1634 }
1635 else
1636 {
1637 prev_first = NEXT_INSN (BB_HEAD (rec));
1638 twin = PREV_INSN (BB_END (rec));
1639 }
1640
1641 do
1642 {
1643 sd_iterator_def sd_it;
1644 dep_t dep;
1645
1646 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1647 {
1648 rtx_insn *next;
1649 int next_priority;
1650
1651 next = DEP_CON (dep);
1652
1653 if (BLOCK_FOR_INSN (next) != rec)
1654 {
1655 int cost;
1656
1657 if (!contributes_to_priority_p (dep))
1658 continue;
1659
1660 if (twin == insn)
1661 cost = dep_cost (dep);
1662 else
1663 {
1664 struct _dep _dep1, *dep1 = &_dep1;
1665
1666 init_dep (dep1, insn, next, REG_DEP_ANTI);
1667
1668 cost = dep_cost (dep1);
1669 }
1670
1671 next_priority = cost + priority (next);
1672
1673 if (next_priority > this_priority)
1674 this_priority = next_priority;
1675 }
1676 }
1677
1678 twin = PREV_INSN (twin);
1679 }
1680 while (twin != prev_first);
1681 }
1682
1683 if (this_priority < 0)
1684 {
1685 gcc_assert (this_priority == -1);
1686
1687 this_priority = insn_cost (insn);
1688 }
1689
1690 INSN_PRIORITY (insn) = this_priority;
1691 INSN_PRIORITY_STATUS (insn) = 1;
1692 }
1693
1694 return INSN_PRIORITY (insn);
1695 }
1696 \f
1697 /* Macros and functions for keeping the priority queue sorted, and
1698 dealing with queuing and dequeuing of instructions. */
1699
1700 /* For each pressure class CL, set DEATH[CL] to the number of registers
1701 in that class that die in INSN. */
1702
1703 static void
1704 calculate_reg_deaths (rtx_insn *insn, int *death)
1705 {
1706 int i;
1707 struct reg_use_data *use;
1708
1709 for (i = 0; i < ira_pressure_classes_num; i++)
1710 death[ira_pressure_classes[i]] = 0;
1711 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1712 if (dying_use_p (use))
1713 mark_regno_birth_or_death (0, death, use->regno, true);
1714 }
1715
1716 /* Setup info about the current register pressure impact of scheduling
1717 INSN at the current scheduling point. */
1718 static void
1719 setup_insn_reg_pressure_info (rtx_insn *insn)
1720 {
1721 int i, change, before, after, hard_regno;
1722 int excess_cost_change;
1723 enum machine_mode mode;
1724 enum reg_class cl;
1725 struct reg_pressure_data *pressure_info;
1726 int *max_reg_pressure;
1727 static int death[N_REG_CLASSES];
1728
1729 gcc_checking_assert (!DEBUG_INSN_P (insn));
1730
1731 excess_cost_change = 0;
1732 calculate_reg_deaths (insn, death);
1733 pressure_info = INSN_REG_PRESSURE (insn);
1734 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1735 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1736 for (i = 0; i < ira_pressure_classes_num; i++)
1737 {
1738 cl = ira_pressure_classes[i];
1739 gcc_assert (curr_reg_pressure[cl] >= 0);
1740 change = (int) pressure_info[i].set_increase - death[cl];
1741 before = MAX (0, max_reg_pressure[i] - sched_class_regs_num[cl]);
1742 after = MAX (0, max_reg_pressure[i] + change
1743 - sched_class_regs_num[cl]);
1744 hard_regno = ira_class_hard_regs[cl][0];
1745 gcc_assert (hard_regno >= 0);
1746 mode = reg_raw_mode[hard_regno];
1747 excess_cost_change += ((after - before)
1748 * (ira_memory_move_cost[mode][cl][0]
1749 + ira_memory_move_cost[mode][cl][1]));
1750 }
1751 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1752 }
1753 \f
1754 /* This is the first page of code related to SCHED_PRESSURE_MODEL.
1755 It tries to make the scheduler take register pressure into account
1756 without introducing too many unnecessary stalls. It hooks into the
1757 main scheduling algorithm at several points:
1758
1759 - Before scheduling starts, model_start_schedule constructs a
1760 "model schedule" for the current block. This model schedule is
1761 chosen solely to keep register pressure down. It does not take the
1762 target's pipeline or the original instruction order into account,
1763 except as a tie-breaker. It also doesn't work to a particular
1764 pressure limit.
1765
1766 This model schedule gives us an idea of what pressure can be
1767 achieved for the block and gives us an example of a schedule that
1768 keeps to that pressure. It also makes the final schedule less
1769 dependent on the original instruction order. This is important
1770 because the original order can either be "wide" (many values live
1771 at once, such as in user-scheduled code) or "narrow" (few values
1772 live at once, such as after loop unrolling, where several
1773 iterations are executed sequentially).
1774
1775 We do not apply this model schedule to the rtx stream. We simply
1776 record it in model_schedule. We also compute the maximum pressure,
1777 MP, that was seen during this schedule.
1778
1779 - Instructions are added to the ready queue even if they require
1780 a stall. The length of the stall is instead computed as:
1781
1782 MAX (INSN_TICK (INSN) - clock_var, 0)
1783
1784 (= insn_delay). This allows rank_for_schedule to choose between
1785 introducing a deliberate stall or increasing pressure.
1786
1787 - Before sorting the ready queue, model_set_excess_costs assigns
1788 a pressure-based cost to each ready instruction in the queue.
1789 This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
1790 (ECC for short) and is effectively measured in cycles.
1791
1792 - rank_for_schedule ranks instructions based on:
1793
1794 ECC (insn) + insn_delay (insn)
1795
1796 then as:
1797
1798 insn_delay (insn)
1799
1800 So, for example, an instruction X1 with an ECC of 1 that can issue
1801 now will win over an instruction X0 with an ECC of zero that would
1802 introduce a stall of one cycle. However, an instruction X2 with an
1803 ECC of 2 that can issue now will lose to both X0 and X1.
1804
1805 - When an instruction is scheduled, model_recompute updates the model
1806 schedule with the new pressures (some of which might now exceed the
1807 original maximum pressure MP). model_update_limit_points then searches
1808 for the new point of maximum pressure, if not already known. */
1809
1810 /* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
1811 from surrounding debug information. */
1812 #define MODEL_BAR \
1813 ";;\t\t+------------------------------------------------------\n"
1814
1815 /* Information about the pressure on a particular register class at a
1816 particular point of the model schedule. */
1817 struct model_pressure_data {
1818 /* The pressure at this point of the model schedule, or -1 if the
1819 point is associated with an instruction that has already been
1820 scheduled. */
1821 int ref_pressure;
1822
1823 /* The maximum pressure during or after this point of the model schedule. */
1824 int max_pressure;
1825 };
1826
1827 /* Per-instruction information that is used while building the model
1828 schedule. Here, "schedule" refers to the model schedule rather
1829 than the main schedule. */
1830 struct model_insn_info {
1831 /* The instruction itself. */
1832 rtx_insn *insn;
1833
1834 /* If this instruction is in model_worklist, these fields link to the
1835 previous (higher-priority) and next (lower-priority) instructions
1836 in the list. */
1837 struct model_insn_info *prev;
1838 struct model_insn_info *next;
1839
1840 /* While constructing the schedule, QUEUE_INDEX describes whether an
1841 instruction has already been added to the schedule (QUEUE_SCHEDULED),
1842 is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
1843 old_queue records the value that QUEUE_INDEX had before scheduling
1844 started, so that we can restore it once the schedule is complete. */
1845 int old_queue;
1846
1847 /* The relative importance of an unscheduled instruction. Higher
1848 values indicate greater importance. */
1849 unsigned int model_priority;
1850
1851 /* The length of the longest path of satisfied true dependencies
1852 that leads to this instruction. */
1853 unsigned int depth;
1854
1855 /* The length of the longest path of dependencies of any kind
1856 that leads from this instruction. */
1857 unsigned int alap;
1858
1859 /* The number of predecessor nodes that must still be scheduled. */
1860 int unscheduled_preds;
1861 };
1862
1863 /* Information about the pressure limit for a particular register class.
1864 This structure is used when applying a model schedule to the main
1865 schedule. */
1866 struct model_pressure_limit {
1867 /* The maximum register pressure seen in the original model schedule. */
1868 int orig_pressure;
1869
1870 /* The maximum register pressure seen in the current model schedule
1871 (which excludes instructions that have already been scheduled). */
1872 int pressure;
1873
1874 /* The point of the current model schedule at which PRESSURE is first
1875 reached. It is set to -1 if the value needs to be recomputed. */
1876 int point;
1877 };
1878
1879 /* Describes a particular way of measuring register pressure. */
1880 struct model_pressure_group {
1881 /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
1882 struct model_pressure_limit limits[N_REG_CLASSES];
1883
1884 /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
1885 on register class ira_pressure_classes[PCI] at point POINT of the
1886 current model schedule. A POINT of model_num_insns describes the
1887 pressure at the end of the schedule. */
1888 struct model_pressure_data *model;
1889 };
1890
1891 /* Index POINT gives the instruction at point POINT of the model schedule.
1892 This array doesn't change during main scheduling. */
1893 static vec<rtx_insn *> model_schedule;
1894
1895 /* The list of instructions in the model worklist, sorted in order of
1896 decreasing priority. */
1897 static struct model_insn_info *model_worklist;
1898
1899 /* Index I describes the instruction with INSN_LUID I. */
1900 static struct model_insn_info *model_insns;
1901
1902 /* The number of instructions in the model schedule. */
1903 static int model_num_insns;
1904
1905 /* The index of the first instruction in model_schedule that hasn't yet been
1906 added to the main schedule, or model_num_insns if all of them have. */
1907 static int model_curr_point;
1908
1909 /* Describes the pressure before each instruction in the model schedule. */
1910 static struct model_pressure_group model_before_pressure;
1911
1912 /* The first unused model_priority value (as used in model_insn_info). */
1913 static unsigned int model_next_priority;
1914
1915
1916 /* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
1917 at point POINT of the model schedule. */
1918 #define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
1919 (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
1920
1921 /* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
1922 after point POINT of the model schedule. */
1923 #define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
1924 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
1925
1926 /* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
1927 of the model schedule. */
1928 #define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
1929 (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
1930
1931 /* Information about INSN that is used when creating the model schedule. */
1932 #define MODEL_INSN_INFO(INSN) \
1933 (&model_insns[INSN_LUID (INSN)])
1934
1935 /* The instruction at point POINT of the model schedule. */
1936 #define MODEL_INSN(POINT) \
1937 (model_schedule[POINT])
1938
1939
1940 /* Return INSN's index in the model schedule, or model_num_insns if it
1941 doesn't belong to that schedule. */
1942
1943 static int
1944 model_index (rtx_insn *insn)
1945 {
1946 if (INSN_MODEL_INDEX (insn) == 0)
1947 return model_num_insns;
1948 return INSN_MODEL_INDEX (insn) - 1;
1949 }
1950
1951 /* Make sure that GROUP->limits is up-to-date for the current point
1952 of the model schedule. */
1953
1954 static void
1955 model_update_limit_points_in_group (struct model_pressure_group *group)
1956 {
1957 int pci, max_pressure, point;
1958
1959 for (pci = 0; pci < ira_pressure_classes_num; pci++)
1960 {
1961 /* We may have passed the final point at which the pressure in
1962 group->limits[pci].pressure was reached. Update the limit if so. */
1963 max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
1964 group->limits[pci].pressure = max_pressure;
1965
1966 /* Find the point at which MAX_PRESSURE is first reached. We need
1967 to search in three cases:
1968
1969 - We've already moved past the previous pressure point.
1970 In this case we search forward from model_curr_point.
1971
1972 - We scheduled the previous point of maximum pressure ahead of
1973 its position in the model schedule, but doing so didn't bring
1974 the pressure point earlier. In this case we search forward
1975 from that previous pressure point.
1976
1977 - Scheduling an instruction early caused the maximum pressure
1978 to decrease. In this case we will have set the pressure
1979 point to -1, and we search forward from model_curr_point. */
1980 point = MAX (group->limits[pci].point, model_curr_point);
1981 while (point < model_num_insns
1982 && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
1983 point++;
1984 group->limits[pci].point = point;
1985
1986 gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
1987 gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
1988 }
1989 }
1990
1991 /* Make sure that all register-pressure limits are up-to-date for the
1992 current position in the model schedule. */
1993
1994 static void
1995 model_update_limit_points (void)
1996 {
1997 model_update_limit_points_in_group (&model_before_pressure);
1998 }
1999
2000 /* Return the model_index of the last unscheduled use in chain USE
2001 outside of USE's instruction. Return -1 if there are no other uses,
2002 or model_num_insns if the register is live at the end of the block. */
2003
2004 static int
2005 model_last_use_except (struct reg_use_data *use)
2006 {
2007 struct reg_use_data *next;
2008 int last, index;
2009
2010 last = -1;
2011 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
2012 if (NONDEBUG_INSN_P (next->insn)
2013 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
2014 {
2015 index = model_index (next->insn);
2016 if (index == model_num_insns)
2017 return model_num_insns;
2018 if (last < index)
2019 last = index;
2020 }
2021 return last;
2022 }
2023
2024 /* An instruction with model_index POINT has just been scheduled, and it
2025 adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
2026 Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
2027 MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
2028
2029 static void
2030 model_start_update_pressure (struct model_pressure_group *group,
2031 int point, int pci, int delta)
2032 {
2033 int next_max_pressure;
2034
2035 if (point == model_num_insns)
2036 {
2037 /* The instruction wasn't part of the model schedule; it was moved
2038 from a different block. Update the pressure for the end of
2039 the model schedule. */
2040 MODEL_REF_PRESSURE (group, point, pci) += delta;
2041 MODEL_MAX_PRESSURE (group, point, pci) += delta;
2042 }
2043 else
2044 {
2045 /* Record that this instruction has been scheduled. Nothing now
2046 changes between POINT and POINT + 1, so get the maximum pressure
2047 from the latter. If the maximum pressure decreases, the new
2048 pressure point may be before POINT. */
2049 MODEL_REF_PRESSURE (group, point, pci) = -1;
2050 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2051 if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
2052 {
2053 MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
2054 if (group->limits[pci].point == point)
2055 group->limits[pci].point = -1;
2056 }
2057 }
2058 }
2059
2060 /* Record that scheduling a later instruction has changed the pressure
2061 at point POINT of the model schedule by DELTA (which might be 0).
2062 Update GROUP accordingly. Return nonzero if these changes might
2063 trigger changes to previous points as well. */
2064
2065 static int
2066 model_update_pressure (struct model_pressure_group *group,
2067 int point, int pci, int delta)
2068 {
2069 int ref_pressure, max_pressure, next_max_pressure;
2070
2071 /* If POINT hasn't yet been scheduled, update its pressure. */
2072 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
2073 if (ref_pressure >= 0 && delta != 0)
2074 {
2075 ref_pressure += delta;
2076 MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
2077
2078 /* Check whether the maximum pressure in the overall schedule
2079 has increased. (This means that the MODEL_MAX_PRESSURE of
2080 every point <= POINT will need to increase too; see below.) */
2081 if (group->limits[pci].pressure < ref_pressure)
2082 group->limits[pci].pressure = ref_pressure;
2083
2084 /* If we are at maximum pressure, and the maximum pressure
2085 point was previously unknown or later than POINT,
2086 bring it forward. */
2087 if (group->limits[pci].pressure == ref_pressure
2088 && !IN_RANGE (group->limits[pci].point, 0, point))
2089 group->limits[pci].point = point;
2090
2091 /* If POINT used to be the point of maximum pressure, but isn't
2092 any longer, we need to recalculate it using a forward walk. */
2093 if (group->limits[pci].pressure > ref_pressure
2094 && group->limits[pci].point == point)
2095 group->limits[pci].point = -1;
2096 }
2097
2098 /* Update the maximum pressure at POINT. Changes here might also
2099 affect the maximum pressure at POINT - 1. */
2100 next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
2101 max_pressure = MAX (ref_pressure, next_max_pressure);
2102 if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
2103 {
2104 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
2105 return 1;
2106 }
2107 return 0;
2108 }
2109
2110 /* INSN has just been scheduled. Update the model schedule accordingly. */
2111
2112 static void
2113 model_recompute (rtx_insn *insn)
2114 {
2115 struct {
2116 int last_use;
2117 int regno;
2118 } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
2119 struct reg_use_data *use;
2120 struct reg_pressure_data *reg_pressure;
2121 int delta[N_REG_CLASSES];
2122 int pci, point, mix, new_last, cl, ref_pressure, queue;
2123 unsigned int i, num_uses, num_pending_births;
2124 bool print_p;
2125
2126 /* The destinations of INSN were previously live from POINT onwards, but are
2127 now live from model_curr_point onwards. Set up DELTA accordingly. */
2128 point = model_index (insn);
2129 reg_pressure = INSN_REG_PRESSURE (insn);
2130 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2131 {
2132 cl = ira_pressure_classes[pci];
2133 delta[cl] = reg_pressure[pci].set_increase;
2134 }
2135
2136 /* Record which registers previously died at POINT, but which now die
2137 before POINT. Adjust DELTA so that it represents the effect of
2138 this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
2139 registers that will be born in the range [model_curr_point, POINT). */
2140 num_uses = 0;
2141 num_pending_births = 0;
2142 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2143 {
2144 new_last = model_last_use_except (use);
2145 if (new_last < point)
2146 {
2147 gcc_assert (num_uses < ARRAY_SIZE (uses));
2148 uses[num_uses].last_use = new_last;
2149 uses[num_uses].regno = use->regno;
2150 /* This register is no longer live after POINT - 1. */
2151 mark_regno_birth_or_death (NULL, delta, use->regno, false);
2152 num_uses++;
2153 if (new_last >= 0)
2154 num_pending_births++;
2155 }
2156 }
2157
2158 /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
2159 Also set each group pressure limit for POINT. */
2160 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2161 {
2162 cl = ira_pressure_classes[pci];
2163 model_start_update_pressure (&model_before_pressure,
2164 point, pci, delta[cl]);
2165 }
2166
2167 /* Walk the model schedule backwards, starting immediately before POINT. */
2168 print_p = false;
2169 if (point != model_curr_point)
2170 do
2171 {
2172 point--;
2173 insn = MODEL_INSN (point);
2174 queue = QUEUE_INDEX (insn);
2175
2176 if (queue != QUEUE_SCHEDULED)
2177 {
2178 /* DELTA describes the effect of the move on the register pressure
2179 after POINT. Make it describe the effect on the pressure
2180 before POINT. */
2181 i = 0;
2182 while (i < num_uses)
2183 {
2184 if (uses[i].last_use == point)
2185 {
2186 /* This register is now live again. */
2187 mark_regno_birth_or_death (NULL, delta,
2188 uses[i].regno, true);
2189
2190 /* Remove this use from the array. */
2191 uses[i] = uses[num_uses - 1];
2192 num_uses--;
2193 num_pending_births--;
2194 }
2195 else
2196 i++;
2197 }
2198
2199 if (sched_verbose >= 5)
2200 {
2201 if (!print_p)
2202 {
2203 fprintf (sched_dump, MODEL_BAR);
2204 fprintf (sched_dump, ";;\t\t| New pressure for model"
2205 " schedule\n");
2206 fprintf (sched_dump, MODEL_BAR);
2207 print_p = true;
2208 }
2209
2210 fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
2211 point, INSN_UID (insn),
2212 str_pattern_slim (PATTERN (insn)));
2213 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2214 {
2215 cl = ira_pressure_classes[pci];
2216 ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
2217 point, pci);
2218 fprintf (sched_dump, " %s:[%d->%d]",
2219 reg_class_names[ira_pressure_classes[pci]],
2220 ref_pressure, ref_pressure + delta[cl]);
2221 }
2222 fprintf (sched_dump, "\n");
2223 }
2224 }
2225
2226 /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
2227 might have changed as well. */
2228 mix = num_pending_births;
2229 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2230 {
2231 cl = ira_pressure_classes[pci];
2232 mix |= delta[cl];
2233 mix |= model_update_pressure (&model_before_pressure,
2234 point, pci, delta[cl]);
2235 }
2236 }
2237 while (mix && point > model_curr_point);
2238
2239 if (print_p)
2240 fprintf (sched_dump, MODEL_BAR);
2241 }
2242
2243 /* After DEP, which was cancelled, has been resolved for insn NEXT,
2244 check whether the insn's pattern needs restoring. */
2245 static bool
2246 must_restore_pattern_p (rtx_insn *next, dep_t dep)
2247 {
2248 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
2249 return false;
2250
2251 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
2252 {
2253 gcc_assert (ORIG_PAT (next) != NULL_RTX);
2254 gcc_assert (next == DEP_CON (dep));
2255 }
2256 else
2257 {
2258 struct dep_replacement *desc = DEP_REPLACE (dep);
2259 if (desc->insn != next)
2260 {
2261 gcc_assert (*desc->loc == desc->orig);
2262 return false;
2263 }
2264 }
2265 return true;
2266 }
2267 \f
2268 /* model_spill_cost (CL, P, P') returns the cost of increasing the
2269 pressure on CL from P to P'. We use this to calculate a "base ECC",
2270 baseECC (CL, X), for each pressure class CL and each instruction X.
2271 Supposing X changes the pressure on CL from P to P', and that the
2272 maximum pressure on CL in the current model schedule is MP', then:
2273
2274 * if X occurs before or at the next point of maximum pressure in
2275 the model schedule and P' > MP', then:
2276
2277 baseECC (CL, X) = model_spill_cost (CL, MP, P')
2278
2279 The idea is that the pressure after scheduling a fixed set of
2280 instructions -- in this case, the set up to and including the
2281 next maximum pressure point -- is going to be the same regardless
2282 of the order; we simply want to keep the intermediate pressure
2283 under control. Thus X has a cost of zero unless scheduling it
2284 now would exceed MP'.
2285
2286 If all increases in the set are by the same amount, no zero-cost
2287 instruction will ever cause the pressure to exceed MP'. However,
2288 if X is instead moved past an instruction X' with pressure in the
2289 range (MP' - (P' - P), MP'), the pressure at X' will increase
2290 beyond MP'. Since baseECC is very much a heuristic anyway,
2291 it doesn't seem worth the overhead of tracking cases like these.
2292
2293 The cost of exceeding MP' is always based on the original maximum
2294 pressure MP. This is so that going 2 registers over the original
2295 limit has the same cost regardless of whether it comes from two
2296 separate +1 deltas or from a single +2 delta.
2297
2298 * if X occurs after the next point of maximum pressure in the model
2299 schedule and P' > P, then:
2300
2301 baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
2302
2303 That is, if we move X forward across a point of maximum pressure,
2304 and if X increases the pressure by P' - P, then we conservatively
2305 assume that scheduling X next would increase the maximum pressure
2306 by P' - P. Again, the cost of doing this is based on the original
2307 maximum pressure MP, for the same reason as above.
2308
2309 * if P' < P, P > MP, and X occurs at or after the next point of
2310 maximum pressure, then:
2311
2312 baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
2313
2314 That is, if we have already exceeded the original maximum pressure MP,
2315 and if X might reduce the maximum pressure again -- or at least push
2316 it further back, and thus allow more scheduling freedom -- it is given
2317 a negative cost to reflect the improvement.
2318
2319 * otherwise,
2320
2321 baseECC (CL, X) = 0
2322
2323 In this case, X is not expected to affect the maximum pressure MP',
2324 so it has zero cost.
2325
2326 We then create a combined value baseECC (X) that is the sum of
2327 baseECC (CL, X) for each pressure class CL.
2328
2329 baseECC (X) could itself be used as the ECC value described above.
2330 However, this is often too conservative, in the sense that it
2331 tends to make high-priority instructions that increase pressure
2332 wait too long in cases where introducing a spill would be better.
2333 For this reason the final ECC is a priority-adjusted form of
2334 baseECC (X). Specifically, we calculate:
2335
2336 P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
2337 baseP = MAX { P (X) | baseECC (X) <= 0 }
2338
2339 Then:
2340
2341 ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
2342
2343 Thus an instruction's effect on pressure is ignored if it has a high
2344 enough priority relative to the ones that don't increase pressure.
2345 Negative values of baseECC (X) do not increase the priority of X
2346 itself, but they do make it harder for other instructions to
2347 increase the pressure further.
2348
2349 This pressure cost is deliberately timid. The intention has been
2350 to choose a heuristic that rarely interferes with the normal list
2351 scheduler in cases where that scheduler would produce good code.
2352 We simply want to curb some of its worst excesses. */
2353
2354 /* Return the cost of increasing the pressure in class CL from FROM to TO.
2355
2356 Here we use the very simplistic cost model that every register above
2357 sched_class_regs_num[CL] has a spill cost of 1. We could use other
2358 measures instead, such as one based on MEMORY_MOVE_COST. However:
2359
2360 (1) In order for an instruction to be scheduled, the higher cost
2361 would need to be justified in a single saving of that many stalls.
2362 This is overly pessimistic, because the benefit of spilling is
2363 often to avoid a sequence of several short stalls rather than
2364 a single long one.
2365
2366 (2) The cost is still arbitrary. Because we are not allocating
2367 registers during scheduling, we have no way of knowing for
2368 sure how many memory accesses will be required by each spill,
2369 where the spills will be placed within the block, or even
2370 which block(s) will contain the spills.
2371
2372 So a higher cost than 1 is often too conservative in practice,
2373 forcing blocks to contain unnecessary stalls instead of spill code.
2374 The simple cost below seems to be the best compromise. It reduces
2375 the interference with the normal list scheduler, which helps make
2376 it more suitable for a default-on option. */
2377
2378 static int
2379 model_spill_cost (int cl, int from, int to)
2380 {
2381 from = MAX (from, sched_class_regs_num[cl]);
2382 return MAX (to, from) - from;
2383 }
2384
2385 /* Return baseECC (ira_pressure_classes[PCI], POINT), given that
2386 P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
2387 P' = P + DELTA. */
2388
2389 static int
2390 model_excess_group_cost (struct model_pressure_group *group,
2391 int point, int pci, int delta)
2392 {
2393 int pressure, cl;
2394
2395 cl = ira_pressure_classes[pci];
2396 if (delta < 0 && point >= group->limits[pci].point)
2397 {
2398 pressure = MAX (group->limits[pci].orig_pressure,
2399 curr_reg_pressure[cl] + delta);
2400 return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
2401 }
2402
2403 if (delta > 0)
2404 {
2405 if (point > group->limits[pci].point)
2406 pressure = group->limits[pci].pressure + delta;
2407 else
2408 pressure = curr_reg_pressure[cl] + delta;
2409
2410 if (pressure > group->limits[pci].pressure)
2411 return model_spill_cost (cl, group->limits[pci].orig_pressure,
2412 pressure);
2413 }
2414
2415 return 0;
2416 }
2417
2418 /* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
2419 if PRINT_P. */
2420
2421 static int
2422 model_excess_cost (rtx_insn *insn, bool print_p)
2423 {
2424 int point, pci, cl, cost, this_cost, delta;
2425 struct reg_pressure_data *insn_reg_pressure;
2426 int insn_death[N_REG_CLASSES];
2427
2428 calculate_reg_deaths (insn, insn_death);
2429 point = model_index (insn);
2430 insn_reg_pressure = INSN_REG_PRESSURE (insn);
2431 cost = 0;
2432
2433 if (print_p)
2434 fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
2435 INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
2436
2437 /* Sum up the individual costs for each register class. */
2438 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2439 {
2440 cl = ira_pressure_classes[pci];
2441 delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
2442 this_cost = model_excess_group_cost (&model_before_pressure,
2443 point, pci, delta);
2444 cost += this_cost;
2445 if (print_p)
2446 fprintf (sched_dump, " %s:[%d base cost %d]",
2447 reg_class_names[cl], delta, this_cost);
2448 }
2449
2450 if (print_p)
2451 fprintf (sched_dump, "\n");
2452
2453 return cost;
2454 }
2455
2456 /* Dump the next points of maximum pressure for GROUP. */
2457
2458 static void
2459 model_dump_pressure_points (struct model_pressure_group *group)
2460 {
2461 int pci, cl;
2462
2463 fprintf (sched_dump, ";;\t\t| pressure points");
2464 for (pci = 0; pci < ira_pressure_classes_num; pci++)
2465 {
2466 cl = ira_pressure_classes[pci];
2467 fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
2468 curr_reg_pressure[cl], group->limits[pci].pressure);
2469 if (group->limits[pci].point < model_num_insns)
2470 fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
2471 INSN_UID (MODEL_INSN (group->limits[pci].point)));
2472 else
2473 fprintf (sched_dump, "end]");
2474 }
2475 fprintf (sched_dump, "\n");
2476 }
2477
2478 /* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
2479
2480 static void
2481 model_set_excess_costs (rtx_insn **insns, int count)
2482 {
2483 int i, cost, priority_base, priority;
2484 bool print_p;
2485
2486 /* Record the baseECC value for each instruction in the model schedule,
2487 except that negative costs are converted to zero ones now rather than
2488 later. Do not assign a cost to debug instructions, since they must
2489 not change code-generation decisions. Experiments suggest we also
2490 get better results by not assigning a cost to instructions from
2491 a different block.
2492
2493 Set PRIORITY_BASE to baseP in the block comment above. This is the
2494 maximum priority of the "cheap" instructions, which should always
2495 include the next model instruction. */
2496 priority_base = 0;
2497 print_p = false;
2498 for (i = 0; i < count; i++)
2499 if (INSN_MODEL_INDEX (insns[i]))
2500 {
2501 if (sched_verbose >= 6 && !print_p)
2502 {
2503 fprintf (sched_dump, MODEL_BAR);
2504 fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
2505 model_dump_pressure_points (&model_before_pressure);
2506 fprintf (sched_dump, MODEL_BAR);
2507 print_p = true;
2508 }
2509 cost = model_excess_cost (insns[i], print_p);
2510 if (cost <= 0)
2511 {
2512 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
2513 priority_base = MAX (priority_base, priority);
2514 cost = 0;
2515 }
2516 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
2517 }
2518 if (print_p)
2519 fprintf (sched_dump, MODEL_BAR);
2520
2521 /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
2522 instruction. */
2523 for (i = 0; i < count; i++)
2524 {
2525 cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
2526 priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
2527 if (cost > 0 && priority > priority_base)
2528 {
2529 cost += priority_base - priority;
2530 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
2531 }
2532 }
2533 }
2534 \f
2535
2536 /* Enum of rank_for_schedule heuristic decisions. */
2537 enum rfs_decision {
2538 RFS_DEBUG, RFS_LIVE_RANGE_SHRINK1, RFS_LIVE_RANGE_SHRINK2,
2539 RFS_SCHED_GROUP, RFS_PRESSURE_DELAY, RFS_PRESSURE_TICK,
2540 RFS_FEEDS_BACKTRACK_INSN, RFS_PRIORITY, RFS_SPECULATION,
2541 RFS_SCHED_RANK, RFS_LAST_INSN, RFS_PRESSURE_INDEX,
2542 RFS_DEP_COUNT, RFS_TIE, RFS_N };
2543
2544 /* Corresponding strings for print outs. */
2545 static const char *rfs_str[RFS_N] = {
2546 "RFS_DEBUG", "RFS_LIVE_RANGE_SHRINK1", "RFS_LIVE_RANGE_SHRINK2",
2547 "RFS_SCHED_GROUP", "RFS_PRESSURE_DELAY", "RFS_PRESSURE_TICK",
2548 "RFS_FEEDS_BACKTRACK_INSN", "RFS_PRIORITY", "RFS_SPECULATION",
2549 "RFS_SCHED_RANK", "RFS_LAST_INSN", "RFS_PRESSURE_INDEX",
2550 "RFS_DEP_COUNT", "RFS_TIE" };
2551
2552 /* Statistical breakdown of rank_for_schedule decisions. */
2553 typedef struct { unsigned stats[RFS_N]; } rank_for_schedule_stats_t;
2554 static rank_for_schedule_stats_t rank_for_schedule_stats;
2555
2556 static int
2557 rfs_result (enum rfs_decision decision, int result)
2558 {
2559 ++rank_for_schedule_stats.stats[decision];
2560 return result;
2561 }
2562
2563 /* Returns a positive value if x is preferred; returns a negative value if
2564 y is preferred. Should never return 0, since that will make the sort
2565 unstable. */
2566
2567 static int
2568 rank_for_schedule (const void *x, const void *y)
2569 {
2570 rtx_insn *tmp = *(rtx_insn * const *) y;
2571 rtx_insn *tmp2 = *(rtx_insn * const *) x;
2572 int tmp_class, tmp2_class;
2573 int val, priority_val, info_val, diff;
2574
2575 if (MAY_HAVE_DEBUG_INSNS)
2576 {
2577 /* Schedule debug insns as early as possible. */
2578 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
2579 return rfs_result (RFS_DEBUG, -1);
2580 else if (!DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2581 return rfs_result (RFS_DEBUG, 1);
2582 else if (DEBUG_INSN_P (tmp) && DEBUG_INSN_P (tmp2))
2583 return rfs_result (RFS_DEBUG, INSN_LUID (tmp) - INSN_LUID (tmp2));
2584 }
2585
2586 if (live_range_shrinkage_p)
2587 {
2588 /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse
2589 code. */
2590 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
2591 if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0
2592 || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0)
2593 && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2594 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0)
2595 return rfs_result (RFS_LIVE_RANGE_SHRINK1, diff);
2596 /* Sort by INSN_LUID (original insn order), so that we make the
2597 sort stable. This minimizes instruction movement, thus
2598 minimizing sched's effect on debugging and cross-jumping. */
2599 return rfs_result (RFS_LIVE_RANGE_SHRINK2,
2600 INSN_LUID (tmp) - INSN_LUID (tmp2));
2601 }
2602
2603 /* The insn in a schedule group should be issued the first. */
2604 if (flag_sched_group_heuristic &&
2605 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
2606 return rfs_result (RFS_SCHED_GROUP, SCHED_GROUP_P (tmp2) ? 1 : -1);
2607
2608 /* Make sure that priority of TMP and TMP2 are initialized. */
2609 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
2610
2611 if (sched_pressure != SCHED_PRESSURE_NONE)
2612 {
2613 /* Prefer insn whose scheduling results in the smallest register
2614 pressure excess. */
2615 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
2616 + insn_delay (tmp)
2617 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
2618 - insn_delay (tmp2))))
2619 return rfs_result (RFS_PRESSURE_DELAY, diff);
2620 }
2621
2622 if (sched_pressure != SCHED_PRESSURE_NONE
2623 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var)
2624 && INSN_TICK (tmp2) != INSN_TICK (tmp))
2625 {
2626 diff = INSN_TICK (tmp) - INSN_TICK (tmp2);
2627 return rfs_result (RFS_PRESSURE_TICK, diff);
2628 }
2629
2630 /* If we are doing backtracking in this schedule, prefer insns that
2631 have forward dependencies with negative cost against an insn that
2632 was already scheduled. */
2633 if (current_sched_info->flags & DO_BACKTRACKING)
2634 {
2635 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
2636 if (priority_val)
2637 return rfs_result (RFS_FEEDS_BACKTRACK_INSN, priority_val);
2638 }
2639
2640 /* Prefer insn with higher priority. */
2641 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
2642
2643 if (flag_sched_critical_path_heuristic && priority_val)
2644 return rfs_result (RFS_PRIORITY, priority_val);
2645
2646 /* Prefer speculative insn with greater dependencies weakness. */
2647 if (flag_sched_spec_insn_heuristic && spec_info)
2648 {
2649 ds_t ds1, ds2;
2650 dw_t dw1, dw2;
2651 int dw;
2652
2653 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
2654 if (ds1)
2655 dw1 = ds_weak (ds1);
2656 else
2657 dw1 = NO_DEP_WEAK;
2658
2659 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
2660 if (ds2)
2661 dw2 = ds_weak (ds2);
2662 else
2663 dw2 = NO_DEP_WEAK;
2664
2665 dw = dw2 - dw1;
2666 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
2667 return rfs_result (RFS_SPECULATION, dw);
2668 }
2669
2670 info_val = (*current_sched_info->rank) (tmp, tmp2);
2671 if (flag_sched_rank_heuristic && info_val)
2672 return rfs_result (RFS_SCHED_RANK, info_val);
2673
2674 /* Compare insns based on their relation to the last scheduled
2675 non-debug insn. */
2676 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
2677 {
2678 dep_t dep1;
2679 dep_t dep2;
2680 rtx last = last_nondebug_scheduled_insn;
2681
2682 /* Classify the instructions into three classes:
2683 1) Data dependent on last schedule insn.
2684 2) Anti/Output dependent on last scheduled insn.
2685 3) Independent of last scheduled insn, or has latency of one.
2686 Choose the insn from the highest numbered class if different. */
2687 dep1 = sd_find_dep_between (last, tmp, true);
2688
2689 if (dep1 == NULL || dep_cost (dep1) == 1)
2690 tmp_class = 3;
2691 else if (/* Data dependence. */
2692 DEP_TYPE (dep1) == REG_DEP_TRUE)
2693 tmp_class = 1;
2694 else
2695 tmp_class = 2;
2696
2697 dep2 = sd_find_dep_between (last, tmp2, true);
2698
2699 if (dep2 == NULL || dep_cost (dep2) == 1)
2700 tmp2_class = 3;
2701 else if (/* Data dependence. */
2702 DEP_TYPE (dep2) == REG_DEP_TRUE)
2703 tmp2_class = 1;
2704 else
2705 tmp2_class = 2;
2706
2707 if ((val = tmp2_class - tmp_class))
2708 return rfs_result (RFS_LAST_INSN, val);
2709 }
2710
2711 /* Prefer instructions that occur earlier in the model schedule. */
2712 if (sched_pressure == SCHED_PRESSURE_MODEL
2713 && INSN_BB (tmp) == target_bb && INSN_BB (tmp2) == target_bb)
2714 {
2715 diff = model_index (tmp) - model_index (tmp2);
2716 gcc_assert (diff != 0);
2717 return rfs_result (RFS_PRESSURE_INDEX, diff);
2718 }
2719
2720 /* Prefer the insn which has more later insns that depend on it.
2721 This gives the scheduler more freedom when scheduling later
2722 instructions at the expense of added register pressure. */
2723
2724 val = (dep_list_size (tmp2, SD_LIST_FORW)
2725 - dep_list_size (tmp, SD_LIST_FORW));
2726
2727 if (flag_sched_dep_count_heuristic && val != 0)
2728 return rfs_result (RFS_DEP_COUNT, val);
2729
2730 /* If insns are equally good, sort by INSN_LUID (original insn order),
2731 so that we make the sort stable. This minimizes instruction movement,
2732 thus minimizing sched's effect on debugging and cross-jumping. */
2733 return rfs_result (RFS_TIE, INSN_LUID (tmp) - INSN_LUID (tmp2));
2734 }
2735
2736 /* Resort the array A in which only element at index N may be out of order. */
2737
2738 HAIFA_INLINE static void
2739 swap_sort (rtx_insn **a, int n)
2740 {
2741 rtx_insn *insn = a[n - 1];
2742 int i = n - 2;
2743
2744 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
2745 {
2746 a[i + 1] = a[i];
2747 i -= 1;
2748 }
2749 a[i + 1] = insn;
2750 }
2751
2752 /* Add INSN to the insn queue so that it can be executed at least
2753 N_CYCLES after the currently executing insn. Preserve insns
2754 chain for debugging purposes. REASON will be printed in debugging
2755 output. */
2756
2757 HAIFA_INLINE static void
2758 queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
2759 {
2760 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2761 rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
2762 int new_tick;
2763
2764 gcc_assert (n_cycles <= max_insn_queue_index);
2765 gcc_assert (!DEBUG_INSN_P (insn));
2766
2767 insn_queue[next_q] = link;
2768 q_size += 1;
2769
2770 if (sched_verbose >= 2)
2771 {
2772 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
2773 (*current_sched_info->print_insn) (insn, 0));
2774
2775 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
2776 }
2777
2778 QUEUE_INDEX (insn) = next_q;
2779
2780 if (current_sched_info->flags & DO_BACKTRACKING)
2781 {
2782 new_tick = clock_var + n_cycles;
2783 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
2784 INSN_TICK (insn) = new_tick;
2785
2786 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2787 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
2788 {
2789 must_backtrack = true;
2790 if (sched_verbose >= 2)
2791 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
2792 }
2793 }
2794 }
2795
2796 /* Remove INSN from queue. */
2797 static void
2798 queue_remove (rtx_insn *insn)
2799 {
2800 gcc_assert (QUEUE_INDEX (insn) >= 0);
2801 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
2802 q_size--;
2803 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
2804 }
2805
2806 /* Return a pointer to the bottom of the ready list, i.e. the insn
2807 with the lowest priority. */
2808
2809 rtx_insn **
2810 ready_lastpos (struct ready_list *ready)
2811 {
2812 gcc_assert (ready->n_ready >= 1);
2813 return ready->vec + ready->first - ready->n_ready + 1;
2814 }
2815
2816 /* Add an element INSN to the ready list so that it ends up with the
2817 lowest/highest priority depending on FIRST_P. */
2818
2819 HAIFA_INLINE static void
2820 ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
2821 {
2822 if (!first_p)
2823 {
2824 if (ready->first == ready->n_ready)
2825 {
2826 memmove (ready->vec + ready->veclen - ready->n_ready,
2827 ready_lastpos (ready),
2828 ready->n_ready * sizeof (rtx));
2829 ready->first = ready->veclen - 1;
2830 }
2831 ready->vec[ready->first - ready->n_ready] = insn;
2832 }
2833 else
2834 {
2835 if (ready->first == ready->veclen - 1)
2836 {
2837 if (ready->n_ready)
2838 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
2839 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
2840 ready_lastpos (ready),
2841 ready->n_ready * sizeof (rtx));
2842 ready->first = ready->veclen - 2;
2843 }
2844 ready->vec[++(ready->first)] = insn;
2845 }
2846
2847 ready->n_ready++;
2848 if (DEBUG_INSN_P (insn))
2849 ready->n_debug++;
2850
2851 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
2852 QUEUE_INDEX (insn) = QUEUE_READY;
2853
2854 if (INSN_EXACT_TICK (insn) != INVALID_TICK
2855 && INSN_EXACT_TICK (insn) < clock_var)
2856 {
2857 must_backtrack = true;
2858 }
2859 }
2860
2861 /* Remove the element with the highest priority from the ready list and
2862 return it. */
2863
2864 HAIFA_INLINE static rtx_insn *
2865 ready_remove_first (struct ready_list *ready)
2866 {
2867 rtx_insn *t;
2868
2869 gcc_assert (ready->n_ready);
2870 t = ready->vec[ready->first--];
2871 ready->n_ready--;
2872 if (DEBUG_INSN_P (t))
2873 ready->n_debug--;
2874 /* If the queue becomes empty, reset it. */
2875 if (ready->n_ready == 0)
2876 ready->first = ready->veclen - 1;
2877
2878 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
2879 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2880
2881 return t;
2882 }
2883
2884 /* The following code implements multi-pass scheduling for the first
2885 cycle. In other words, we will try to choose ready insn which
2886 permits to start maximum number of insns on the same cycle. */
2887
2888 /* Return a pointer to the element INDEX from the ready. INDEX for
2889 insn with the highest priority is 0, and the lowest priority has
2890 N_READY - 1. */
2891
2892 rtx_insn *
2893 ready_element (struct ready_list *ready, int index)
2894 {
2895 gcc_assert (ready->n_ready && index < ready->n_ready);
2896
2897 return ready->vec[ready->first - index];
2898 }
2899
2900 /* Remove the element INDEX from the ready list and return it. INDEX
2901 for insn with the highest priority is 0, and the lowest priority
2902 has N_READY - 1. */
2903
2904 HAIFA_INLINE static rtx_insn *
2905 ready_remove (struct ready_list *ready, int index)
2906 {
2907 rtx_insn *t;
2908 int i;
2909
2910 if (index == 0)
2911 return ready_remove_first (ready);
2912 gcc_assert (ready->n_ready && index < ready->n_ready);
2913 t = ready->vec[ready->first - index];
2914 ready->n_ready--;
2915 if (DEBUG_INSN_P (t))
2916 ready->n_debug--;
2917 for (i = index; i < ready->n_ready; i++)
2918 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
2919 QUEUE_INDEX (t) = QUEUE_NOWHERE;
2920 return t;
2921 }
2922
2923 /* Remove INSN from the ready list. */
2924 static void
2925 ready_remove_insn (rtx insn)
2926 {
2927 int i;
2928
2929 for (i = 0; i < readyp->n_ready; i++)
2930 if (ready_element (readyp, i) == insn)
2931 {
2932 ready_remove (readyp, i);
2933 return;
2934 }
2935 gcc_unreachable ();
2936 }
2937
2938 /* Calculate difference of two statistics set WAS and NOW.
2939 Result returned in WAS. */
2940 static void
2941 rank_for_schedule_stats_diff (rank_for_schedule_stats_t *was,
2942 const rank_for_schedule_stats_t *now)
2943 {
2944 for (int i = 0; i < RFS_N; ++i)
2945 was->stats[i] = now->stats[i] - was->stats[i];
2946 }
2947
2948 /* Print rank_for_schedule statistics. */
2949 static void
2950 print_rank_for_schedule_stats (const char *prefix,
2951 const rank_for_schedule_stats_t *stats)
2952 {
2953 for (int i = 0; i < RFS_N; ++i)
2954 if (stats->stats[i])
2955 fprintf (sched_dump, "%s%20s: %u\n", prefix, rfs_str[i], stats->stats[i]);
2956 }
2957
2958 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
2959 macro. */
2960
2961 void
2962 ready_sort (struct ready_list *ready)
2963 {
2964 int i;
2965 rtx_insn **first = ready_lastpos (ready);
2966
2967 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2968 {
2969 for (i = 0; i < ready->n_ready; i++)
2970 if (!DEBUG_INSN_P (first[i]))
2971 setup_insn_reg_pressure_info (first[i]);
2972 }
2973 if (sched_pressure == SCHED_PRESSURE_MODEL
2974 && model_curr_point < model_num_insns)
2975 model_set_excess_costs (first, ready->n_ready);
2976
2977 rank_for_schedule_stats_t stats1;
2978 if (sched_verbose >= 4)
2979 stats1 = rank_for_schedule_stats;
2980
2981 if (ready->n_ready == 2)
2982 swap_sort (first, ready->n_ready);
2983 else if (ready->n_ready > 2)
2984 qsort (first, ready->n_ready, sizeof (rtx), rank_for_schedule);
2985
2986 if (sched_verbose >= 4)
2987 {
2988 rank_for_schedule_stats_diff (&stats1, &rank_for_schedule_stats);
2989 print_rank_for_schedule_stats (";;\t\t", &stats1);
2990 }
2991 }
2992
2993 /* PREV is an insn that is ready to execute. Adjust its priority if that
2994 will help shorten or lengthen register lifetimes as appropriate. Also
2995 provide a hook for the target to tweak itself. */
2996
2997 HAIFA_INLINE static void
2998 adjust_priority (rtx_insn *prev)
2999 {
3000 /* ??? There used to be code here to try and estimate how an insn
3001 affected register lifetimes, but it did it by looking at REG_DEAD
3002 notes, which we removed in schedule_region. Nor did it try to
3003 take into account register pressure or anything useful like that.
3004
3005 Revisit when we have a machine model to work with and not before. */
3006
3007 if (targetm.sched.adjust_priority)
3008 INSN_PRIORITY (prev) =
3009 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
3010 }
3011
3012 /* Advance DFA state STATE on one cycle. */
3013 void
3014 advance_state (state_t state)
3015 {
3016 if (targetm.sched.dfa_pre_advance_cycle)
3017 targetm.sched.dfa_pre_advance_cycle ();
3018
3019 if (targetm.sched.dfa_pre_cycle_insn)
3020 state_transition (state,
3021 targetm.sched.dfa_pre_cycle_insn ());
3022
3023 state_transition (state, NULL);
3024
3025 if (targetm.sched.dfa_post_cycle_insn)
3026 state_transition (state,
3027 targetm.sched.dfa_post_cycle_insn ());
3028
3029 if (targetm.sched.dfa_post_advance_cycle)
3030 targetm.sched.dfa_post_advance_cycle ();
3031 }
3032
3033 /* Advance time on one cycle. */
3034 HAIFA_INLINE static void
3035 advance_one_cycle (void)
3036 {
3037 advance_state (curr_state);
3038 if (sched_verbose >= 4)
3039 fprintf (sched_dump, ";;\tAdvance the current state.\n");
3040 }
3041
3042 /* Update register pressure after scheduling INSN. */
3043 static void
3044 update_register_pressure (rtx_insn *insn)
3045 {
3046 struct reg_use_data *use;
3047 struct reg_set_data *set;
3048
3049 gcc_checking_assert (!DEBUG_INSN_P (insn));
3050
3051 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
3052 if (dying_use_p (use))
3053 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3054 use->regno, false);
3055 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
3056 mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
3057 set->regno, true);
3058 }
3059
3060 /* Set up or update (if UPDATE_P) max register pressure (see its
3061 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
3062 after insn AFTER. */
3063 static void
3064 setup_insn_max_reg_pressure (rtx_insn *after, bool update_p)
3065 {
3066 int i, p;
3067 bool eq_p;
3068 rtx_insn *insn;
3069 static int max_reg_pressure[N_REG_CLASSES];
3070
3071 save_reg_pressure ();
3072 for (i = 0; i < ira_pressure_classes_num; i++)
3073 max_reg_pressure[ira_pressure_classes[i]]
3074 = curr_reg_pressure[ira_pressure_classes[i]];
3075 for (insn = NEXT_INSN (after);
3076 insn != NULL_RTX && ! BARRIER_P (insn)
3077 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
3078 insn = NEXT_INSN (insn))
3079 if (NONDEBUG_INSN_P (insn))
3080 {
3081 eq_p = true;
3082 for (i = 0; i < ira_pressure_classes_num; i++)
3083 {
3084 p = max_reg_pressure[ira_pressure_classes[i]];
3085 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
3086 {
3087 eq_p = false;
3088 INSN_MAX_REG_PRESSURE (insn)[i]
3089 = max_reg_pressure[ira_pressure_classes[i]];
3090 }
3091 }
3092 if (update_p && eq_p)
3093 break;
3094 update_register_pressure (insn);
3095 for (i = 0; i < ira_pressure_classes_num; i++)
3096 if (max_reg_pressure[ira_pressure_classes[i]]
3097 < curr_reg_pressure[ira_pressure_classes[i]])
3098 max_reg_pressure[ira_pressure_classes[i]]
3099 = curr_reg_pressure[ira_pressure_classes[i]];
3100 }
3101 restore_reg_pressure ();
3102 }
3103
3104 /* Update the current register pressure after scheduling INSN. Update
3105 also max register pressure for unscheduled insns of the current
3106 BB. */
3107 static void
3108 update_reg_and_insn_max_reg_pressure (rtx_insn *insn)
3109 {
3110 int i;
3111 int before[N_REG_CLASSES];
3112
3113 for (i = 0; i < ira_pressure_classes_num; i++)
3114 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
3115 update_register_pressure (insn);
3116 for (i = 0; i < ira_pressure_classes_num; i++)
3117 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
3118 break;
3119 if (i < ira_pressure_classes_num)
3120 setup_insn_max_reg_pressure (insn, true);
3121 }
3122
3123 /* Set up register pressure at the beginning of basic block BB whose
3124 insns starting after insn AFTER. Set up also max register pressure
3125 for all insns of the basic block. */
3126 void
3127 sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
3128 {
3129 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
3130 initiate_bb_reg_pressure_info (bb);
3131 setup_insn_max_reg_pressure (after, false);
3132 }
3133 \f
3134 /* If doing predication while scheduling, verify whether INSN, which
3135 has just been scheduled, clobbers the conditions of any
3136 instructions that must be predicated in order to break their
3137 dependencies. If so, remove them from the queues so that they will
3138 only be scheduled once their control dependency is resolved. */
3139
3140 static void
3141 check_clobbered_conditions (rtx insn)
3142 {
3143 HARD_REG_SET t;
3144 int i;
3145
3146 if ((current_sched_info->flags & DO_PREDICATION) == 0)
3147 return;
3148
3149 find_all_hard_reg_sets (insn, &t, true);
3150
3151 restart:
3152 for (i = 0; i < ready.n_ready; i++)
3153 {
3154 rtx_insn *x = ready_element (&ready, i);
3155 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3156 {
3157 ready_remove_insn (x);
3158 goto restart;
3159 }
3160 }
3161 for (i = 0; i <= max_insn_queue_index; i++)
3162 {
3163 rtx_insn_list *link;
3164 int q = NEXT_Q_AFTER (q_ptr, i);
3165
3166 restart_queue:
3167 for (link = insn_queue[q]; link; link = link->next ())
3168 {
3169 rtx_insn *x = link->insn ();
3170 if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
3171 {
3172 queue_remove (x);
3173 goto restart_queue;
3174 }
3175 }
3176 }
3177 }
3178 \f
3179 /* Return (in order):
3180
3181 - positive if INSN adversely affects the pressure on one
3182 register class
3183
3184 - negative if INSN reduces the pressure on one register class
3185
3186 - 0 if INSN doesn't affect the pressure on any register class. */
3187
3188 static int
3189 model_classify_pressure (struct model_insn_info *insn)
3190 {
3191 struct reg_pressure_data *reg_pressure;
3192 int death[N_REG_CLASSES];
3193 int pci, cl, sum;
3194
3195 calculate_reg_deaths (insn->insn, death);
3196 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3197 sum = 0;
3198 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3199 {
3200 cl = ira_pressure_classes[pci];
3201 if (death[cl] < reg_pressure[pci].set_increase)
3202 return 1;
3203 sum += reg_pressure[pci].set_increase - death[cl];
3204 }
3205 return sum;
3206 }
3207
3208 /* Return true if INSN1 should come before INSN2 in the model schedule. */
3209
3210 static int
3211 model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
3212 {
3213 unsigned int height1, height2;
3214 unsigned int priority1, priority2;
3215
3216 /* Prefer instructions with a higher model priority. */
3217 if (insn1->model_priority != insn2->model_priority)
3218 return insn1->model_priority > insn2->model_priority;
3219
3220 /* Combine the length of the longest path of satisfied true dependencies
3221 that leads to each instruction (depth) with the length of the longest
3222 path of any dependencies that leads from the instruction (alap).
3223 Prefer instructions with the greatest combined length. If the combined
3224 lengths are equal, prefer instructions with the greatest depth.
3225
3226 The idea is that, if we have a set S of "equal" instructions that each
3227 have ALAP value X, and we pick one such instruction I, any true-dependent
3228 successors of I that have ALAP value X - 1 should be preferred over S.
3229 This encourages the schedule to be "narrow" rather than "wide".
3230 However, if I is a low-priority instruction that we decided to
3231 schedule because of its model_classify_pressure, and if there
3232 is a set of higher-priority instructions T, the aforementioned
3233 successors of I should not have the edge over T. */
3234 height1 = insn1->depth + insn1->alap;
3235 height2 = insn2->depth + insn2->alap;
3236 if (height1 != height2)
3237 return height1 > height2;
3238 if (insn1->depth != insn2->depth)
3239 return insn1->depth > insn2->depth;
3240
3241 /* We have no real preference between INSN1 an INSN2 as far as attempts
3242 to reduce pressure go. Prefer instructions with higher priorities. */
3243 priority1 = INSN_PRIORITY (insn1->insn);
3244 priority2 = INSN_PRIORITY (insn2->insn);
3245 if (priority1 != priority2)
3246 return priority1 > priority2;
3247
3248 /* Use the original rtl sequence as a tie-breaker. */
3249 return insn1 < insn2;
3250 }
3251
3252 /* Add INSN to the model worklist immediately after PREV. Add it to the
3253 beginning of the list if PREV is null. */
3254
3255 static void
3256 model_add_to_worklist_at (struct model_insn_info *insn,
3257 struct model_insn_info *prev)
3258 {
3259 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
3260 QUEUE_INDEX (insn->insn) = QUEUE_READY;
3261
3262 insn->prev = prev;
3263 if (prev)
3264 {
3265 insn->next = prev->next;
3266 prev->next = insn;
3267 }
3268 else
3269 {
3270 insn->next = model_worklist;
3271 model_worklist = insn;
3272 }
3273 if (insn->next)
3274 insn->next->prev = insn;
3275 }
3276
3277 /* Remove INSN from the model worklist. */
3278
3279 static void
3280 model_remove_from_worklist (struct model_insn_info *insn)
3281 {
3282 gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
3283 QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
3284
3285 if (insn->prev)
3286 insn->prev->next = insn->next;
3287 else
3288 model_worklist = insn->next;
3289 if (insn->next)
3290 insn->next->prev = insn->prev;
3291 }
3292
3293 /* Add INSN to the model worklist. Start looking for a suitable position
3294 between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
3295 insns either side. A null PREV indicates the beginning of the list and
3296 a null NEXT indicates the end. */
3297
3298 static void
3299 model_add_to_worklist (struct model_insn_info *insn,
3300 struct model_insn_info *prev,
3301 struct model_insn_info *next)
3302 {
3303 int count;
3304
3305 count = MAX_SCHED_READY_INSNS;
3306 if (count > 0 && prev && model_order_p (insn, prev))
3307 do
3308 {
3309 count--;
3310 prev = prev->prev;
3311 }
3312 while (count > 0 && prev && model_order_p (insn, prev));
3313 else
3314 while (count > 0 && next && model_order_p (next, insn))
3315 {
3316 count--;
3317 prev = next;
3318 next = next->next;
3319 }
3320 model_add_to_worklist_at (insn, prev);
3321 }
3322
3323 /* INSN may now have a higher priority (in the model_order_p sense)
3324 than before. Move it up the worklist if necessary. */
3325
3326 static void
3327 model_promote_insn (struct model_insn_info *insn)
3328 {
3329 struct model_insn_info *prev;
3330 int count;
3331
3332 prev = insn->prev;
3333 count = MAX_SCHED_READY_INSNS;
3334 while (count > 0 && prev && model_order_p (insn, prev))
3335 {
3336 count--;
3337 prev = prev->prev;
3338 }
3339 if (prev != insn->prev)
3340 {
3341 model_remove_from_worklist (insn);
3342 model_add_to_worklist_at (insn, prev);
3343 }
3344 }
3345
3346 /* Add INSN to the end of the model schedule. */
3347
3348 static void
3349 model_add_to_schedule (rtx_insn *insn)
3350 {
3351 unsigned int point;
3352
3353 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3354 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3355
3356 point = model_schedule.length ();
3357 model_schedule.quick_push (insn);
3358 INSN_MODEL_INDEX (insn) = point + 1;
3359 }
3360
3361 /* Analyze the instructions that are to be scheduled, setting up
3362 MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
3363 instructions to model_worklist. */
3364
3365 static void
3366 model_analyze_insns (void)
3367 {
3368 rtx_insn *start, *end, *iter;
3369 sd_iterator_def sd_it;
3370 dep_t dep;
3371 struct model_insn_info *insn, *con;
3372
3373 model_num_insns = 0;
3374 start = PREV_INSN (current_sched_info->next_tail);
3375 end = current_sched_info->prev_head;
3376 for (iter = start; iter != end; iter = PREV_INSN (iter))
3377 if (NONDEBUG_INSN_P (iter))
3378 {
3379 insn = MODEL_INSN_INFO (iter);
3380 insn->insn = iter;
3381 FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
3382 {
3383 con = MODEL_INSN_INFO (DEP_CON (dep));
3384 if (con->insn && insn->alap < con->alap + 1)
3385 insn->alap = con->alap + 1;
3386 }
3387
3388 insn->old_queue = QUEUE_INDEX (iter);
3389 QUEUE_INDEX (iter) = QUEUE_NOWHERE;
3390
3391 insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
3392 if (insn->unscheduled_preds == 0)
3393 model_add_to_worklist (insn, NULL, model_worklist);
3394
3395 model_num_insns++;
3396 }
3397 }
3398
3399 /* The global state describes the register pressure at the start of the
3400 model schedule. Initialize GROUP accordingly. */
3401
3402 static void
3403 model_init_pressure_group (struct model_pressure_group *group)
3404 {
3405 int pci, cl;
3406
3407 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3408 {
3409 cl = ira_pressure_classes[pci];
3410 group->limits[pci].pressure = curr_reg_pressure[cl];
3411 group->limits[pci].point = 0;
3412 }
3413 /* Use index model_num_insns to record the state after the last
3414 instruction in the model schedule. */
3415 group->model = XNEWVEC (struct model_pressure_data,
3416 (model_num_insns + 1) * ira_pressure_classes_num);
3417 }
3418
3419 /* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
3420 Update the maximum pressure for the whole schedule. */
3421
3422 static void
3423 model_record_pressure (struct model_pressure_group *group,
3424 int point, int pci, int pressure)
3425 {
3426 MODEL_REF_PRESSURE (group, point, pci) = pressure;
3427 if (group->limits[pci].pressure < pressure)
3428 {
3429 group->limits[pci].pressure = pressure;
3430 group->limits[pci].point = point;
3431 }
3432 }
3433
3434 /* INSN has just been added to the end of the model schedule. Record its
3435 register-pressure information. */
3436
3437 static void
3438 model_record_pressures (struct model_insn_info *insn)
3439 {
3440 struct reg_pressure_data *reg_pressure;
3441 int point, pci, cl, delta;
3442 int death[N_REG_CLASSES];
3443
3444 point = model_index (insn->insn);
3445 if (sched_verbose >= 2)
3446 {
3447 if (point == 0)
3448 {
3449 fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
3450 fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
3451 }
3452 fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
3453 point, INSN_UID (insn->insn), insn->model_priority,
3454 insn->depth + insn->alap, insn->depth,
3455 INSN_PRIORITY (insn->insn),
3456 str_pattern_slim (PATTERN (insn->insn)));
3457 }
3458 calculate_reg_deaths (insn->insn, death);
3459 reg_pressure = INSN_REG_PRESSURE (insn->insn);
3460 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3461 {
3462 cl = ira_pressure_classes[pci];
3463 delta = reg_pressure[pci].set_increase - death[cl];
3464 if (sched_verbose >= 2)
3465 fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
3466 curr_reg_pressure[cl], delta);
3467 model_record_pressure (&model_before_pressure, point, pci,
3468 curr_reg_pressure[cl]);
3469 }
3470 if (sched_verbose >= 2)
3471 fprintf (sched_dump, "\n");
3472 }
3473
3474 /* All instructions have been added to the model schedule. Record the
3475 final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
3476
3477 static void
3478 model_record_final_pressures (struct model_pressure_group *group)
3479 {
3480 int point, pci, max_pressure, ref_pressure, cl;
3481
3482 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3483 {
3484 /* Record the final pressure for this class. */
3485 cl = ira_pressure_classes[pci];
3486 point = model_num_insns;
3487 ref_pressure = curr_reg_pressure[cl];
3488 model_record_pressure (group, point, pci, ref_pressure);
3489
3490 /* Record the original maximum pressure. */
3491 group->limits[pci].orig_pressure = group->limits[pci].pressure;
3492
3493 /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
3494 max_pressure = ref_pressure;
3495 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3496 while (point > 0)
3497 {
3498 point--;
3499 ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
3500 max_pressure = MAX (max_pressure, ref_pressure);
3501 MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
3502 }
3503 }
3504 }
3505
3506 /* Update all successors of INSN, given that INSN has just been scheduled. */
3507
3508 static void
3509 model_add_successors_to_worklist (struct model_insn_info *insn)
3510 {
3511 sd_iterator_def sd_it;
3512 struct model_insn_info *con;
3513 dep_t dep;
3514
3515 FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
3516 {
3517 con = MODEL_INSN_INFO (DEP_CON (dep));
3518 /* Ignore debug instructions, and instructions from other blocks. */
3519 if (con->insn)
3520 {
3521 con->unscheduled_preds--;
3522
3523 /* Update the depth field of each true-dependent successor.
3524 Increasing the depth gives them a higher priority than
3525 before. */
3526 if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
3527 {
3528 con->depth = insn->depth + 1;
3529 if (QUEUE_INDEX (con->insn) == QUEUE_READY)
3530 model_promote_insn (con);
3531 }
3532
3533 /* If this is a true dependency, or if there are no remaining
3534 dependencies for CON (meaning that CON only had non-true
3535 dependencies), make sure that CON is on the worklist.
3536 We don't bother otherwise because it would tend to fill the
3537 worklist with a lot of low-priority instructions that are not
3538 yet ready to issue. */
3539 if ((con->depth > 0 || con->unscheduled_preds == 0)
3540 && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
3541 model_add_to_worklist (con, insn, insn->next);
3542 }
3543 }
3544 }
3545
3546 /* Give INSN a higher priority than any current instruction, then give
3547 unscheduled predecessors of INSN a higher priority still. If any of
3548 those predecessors are not on the model worklist, do the same for its
3549 predecessors, and so on. */
3550
3551 static void
3552 model_promote_predecessors (struct model_insn_info *insn)
3553 {
3554 struct model_insn_info *pro, *first;
3555 sd_iterator_def sd_it;
3556 dep_t dep;
3557
3558 if (sched_verbose >= 7)
3559 fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
3560 INSN_UID (insn->insn), model_next_priority);
3561 insn->model_priority = model_next_priority++;
3562 model_remove_from_worklist (insn);
3563 model_add_to_worklist_at (insn, NULL);
3564
3565 first = NULL;
3566 for (;;)
3567 {
3568 FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
3569 {
3570 pro = MODEL_INSN_INFO (DEP_PRO (dep));
3571 /* The first test is to ignore debug instructions, and instructions
3572 from other blocks. */
3573 if (pro->insn
3574 && pro->model_priority != model_next_priority
3575 && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
3576 {
3577 pro->model_priority = model_next_priority;
3578 if (sched_verbose >= 7)
3579 fprintf (sched_dump, " %d", INSN_UID (pro->insn));
3580 if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
3581 {
3582 /* PRO is already in the worklist, but it now has
3583 a higher priority than before. Move it at the
3584 appropriate place. */
3585 model_remove_from_worklist (pro);
3586 model_add_to_worklist (pro, NULL, model_worklist);
3587 }
3588 else
3589 {
3590 /* PRO isn't in the worklist. Recursively process
3591 its predecessors until we find one that is. */
3592 pro->next = first;
3593 first = pro;
3594 }
3595 }
3596 }
3597 if (!first)
3598 break;
3599 insn = first;
3600 first = insn->next;
3601 }
3602 if (sched_verbose >= 7)
3603 fprintf (sched_dump, " = %d\n", model_next_priority);
3604 model_next_priority++;
3605 }
3606
3607 /* Pick one instruction from model_worklist and process it. */
3608
3609 static void
3610 model_choose_insn (void)
3611 {
3612 struct model_insn_info *insn, *fallback;
3613 int count;
3614
3615 if (sched_verbose >= 7)
3616 {
3617 fprintf (sched_dump, ";;\t+--- worklist:\n");
3618 insn = model_worklist;
3619 count = MAX_SCHED_READY_INSNS;
3620 while (count > 0 && insn)
3621 {
3622 fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
3623 INSN_UID (insn->insn), insn->model_priority,
3624 insn->depth + insn->alap, insn->depth,
3625 INSN_PRIORITY (insn->insn));
3626 count--;
3627 insn = insn->next;
3628 }
3629 }
3630
3631 /* Look for a ready instruction whose model_classify_priority is zero
3632 or negative, picking the highest-priority one. Adding such an
3633 instruction to the schedule now should do no harm, and may actually
3634 do some good.
3635
3636 Failing that, see whether there is an instruction with the highest
3637 extant model_priority that is not yet ready, but which would reduce
3638 pressure if it became ready. This is designed to catch cases like:
3639
3640 (set (mem (reg R1)) (reg R2))
3641
3642 where the instruction is the last remaining use of R1 and where the
3643 value of R2 is not yet available (or vice versa). The death of R1
3644 means that this instruction already reduces pressure. It is of
3645 course possible that the computation of R2 involves other registers
3646 that are hard to kill, but such cases are rare enough for this
3647 heuristic to be a win in general.
3648
3649 Failing that, just pick the highest-priority instruction in the
3650 worklist. */
3651 count = MAX_SCHED_READY_INSNS;
3652 insn = model_worklist;
3653 fallback = 0;
3654 for (;;)
3655 {
3656 if (count == 0 || !insn)
3657 {
3658 insn = fallback ? fallback : model_worklist;
3659 break;
3660 }
3661 if (insn->unscheduled_preds)
3662 {
3663 if (model_worklist->model_priority == insn->model_priority
3664 && !fallback
3665 && model_classify_pressure (insn) < 0)
3666 fallback = insn;
3667 }
3668 else
3669 {
3670 if (model_classify_pressure (insn) <= 0)
3671 break;
3672 }
3673 count--;
3674 insn = insn->next;
3675 }
3676
3677 if (sched_verbose >= 7 && insn != model_worklist)
3678 {
3679 if (insn->unscheduled_preds)
3680 fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
3681 INSN_UID (insn->insn));
3682 else
3683 fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
3684 INSN_UID (insn->insn));
3685 }
3686 if (insn->unscheduled_preds)
3687 /* INSN isn't yet ready to issue. Give all its predecessors the
3688 highest priority. */
3689 model_promote_predecessors (insn);
3690 else
3691 {
3692 /* INSN is ready. Add it to the end of model_schedule and
3693 process its successors. */
3694 model_add_successors_to_worklist (insn);
3695 model_remove_from_worklist (insn);
3696 model_add_to_schedule (insn->insn);
3697 model_record_pressures (insn);
3698 update_register_pressure (insn->insn);
3699 }
3700 }
3701
3702 /* Restore all QUEUE_INDEXs to the values that they had before
3703 model_start_schedule was called. */
3704
3705 static void
3706 model_reset_queue_indices (void)
3707 {
3708 unsigned int i;
3709 rtx_insn *insn;
3710
3711 FOR_EACH_VEC_ELT (model_schedule, i, insn)
3712 QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
3713 }
3714
3715 /* We have calculated the model schedule and spill costs. Print a summary
3716 to sched_dump. */
3717
3718 static void
3719 model_dump_pressure_summary (void)
3720 {
3721 int pci, cl;
3722
3723 fprintf (sched_dump, ";; Pressure summary:");
3724 for (pci = 0; pci < ira_pressure_classes_num; pci++)
3725 {
3726 cl = ira_pressure_classes[pci];
3727 fprintf (sched_dump, " %s:%d", reg_class_names[cl],
3728 model_before_pressure.limits[pci].pressure);
3729 }
3730 fprintf (sched_dump, "\n\n");
3731 }
3732
3733 /* Initialize the SCHED_PRESSURE_MODEL information for the current
3734 scheduling region. */
3735
3736 static void
3737 model_start_schedule (basic_block bb)
3738 {
3739 model_next_priority = 1;
3740 model_schedule.create (sched_max_luid);
3741 model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
3742
3743 gcc_assert (bb == BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head)));
3744 initiate_reg_pressure_info (df_get_live_in (bb));
3745
3746 model_analyze_insns ();
3747 model_init_pressure_group (&model_before_pressure);
3748 while (model_worklist)
3749 model_choose_insn ();
3750 gcc_assert (model_num_insns == (int) model_schedule.length ());
3751 if (sched_verbose >= 2)
3752 fprintf (sched_dump, "\n");
3753
3754 model_record_final_pressures (&model_before_pressure);
3755 model_reset_queue_indices ();
3756
3757 XDELETEVEC (model_insns);
3758
3759 model_curr_point = 0;
3760 initiate_reg_pressure_info (df_get_live_in (bb));
3761 if (sched_verbose >= 1)
3762 model_dump_pressure_summary ();
3763 }
3764
3765 /* Free the information associated with GROUP. */
3766
3767 static void
3768 model_finalize_pressure_group (struct model_pressure_group *group)
3769 {
3770 XDELETEVEC (group->model);
3771 }
3772
3773 /* Free the information created by model_start_schedule. */
3774
3775 static void
3776 model_end_schedule (void)
3777 {
3778 model_finalize_pressure_group (&model_before_pressure);
3779 model_schedule.release ();
3780 }
3781
3782 /* Prepare reg pressure scheduling for basic block BB. */
3783 static void
3784 sched_pressure_start_bb (basic_block bb)
3785 {
3786 /* Set the number of available registers for each class taking into account
3787 relative probability of current basic block versus function prologue and
3788 epilogue.
3789 * If the basic block executes much more often than the prologue/epilogue
3790 (e.g., inside a hot loop), then cost of spill in the prologue is close to
3791 nil, so the effective number of available registers is
3792 (ira_class_hard_regs_num[cl] - 0).
3793 * If the basic block executes as often as the prologue/epilogue,
3794 then spill in the block is as costly as in the prologue, so the effective
3795 number of available registers is
3796 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]).
3797 Note that all-else-equal, we prefer to spill in the prologue, since that
3798 allows "extra" registers for other basic blocks of the function.
3799 * If the basic block is on the cold path of the function and executes
3800 rarely, then we should always prefer to spill in the block, rather than
3801 in the prologue/epilogue. The effective number of available register is
3802 (ira_class_hard_regs_num[cl] - call_used_regs_num[cl]). */
3803 {
3804 int i;
3805 int entry_freq = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency;
3806 int bb_freq = bb->frequency;
3807
3808 if (bb_freq == 0)
3809 {
3810 if (entry_freq == 0)
3811 entry_freq = bb_freq = 1;
3812 }
3813 if (bb_freq < entry_freq)
3814 bb_freq = entry_freq;
3815
3816 for (i = 0; i < ira_pressure_classes_num; ++i)
3817 {
3818 enum reg_class cl = ira_pressure_classes[i];
3819 sched_class_regs_num[cl] = ira_class_hard_regs_num[cl];
3820 sched_class_regs_num[cl]
3821 -= (call_used_regs_num[cl] * entry_freq) / bb_freq;
3822 }
3823 }
3824
3825 if (sched_pressure == SCHED_PRESSURE_MODEL)
3826 model_start_schedule (bb);
3827 }
3828 \f
3829 /* A structure that holds local state for the loop in schedule_block. */
3830 struct sched_block_state
3831 {
3832 /* True if no real insns have been scheduled in the current cycle. */
3833 bool first_cycle_insn_p;
3834 /* True if a shadow insn has been scheduled in the current cycle, which
3835 means that no more normal insns can be issued. */
3836 bool shadows_only_p;
3837 /* True if we're winding down a modulo schedule, which means that we only
3838 issue insns with INSN_EXACT_TICK set. */
3839 bool modulo_epilogue;
3840 /* Initialized with the machine's issue rate every cycle, and updated
3841 by calls to the variable_issue hook. */
3842 int can_issue_more;
3843 };
3844
3845 /* INSN is the "currently executing insn". Launch each insn which was
3846 waiting on INSN. READY is the ready list which contains the insns
3847 that are ready to fire. CLOCK is the current cycle. The function
3848 returns necessary cycle advance after issuing the insn (it is not
3849 zero for insns in a schedule group). */
3850
3851 static int
3852 schedule_insn (rtx_insn *insn)
3853 {
3854 sd_iterator_def sd_it;
3855 dep_t dep;
3856 int i;
3857 int advance = 0;
3858
3859 if (sched_verbose >= 1)
3860 {
3861 struct reg_pressure_data *pressure_info;
3862 fprintf (sched_dump, ";;\t%3i--> %s %-40s:",
3863 clock_var, (*current_sched_info->print_insn) (insn, 1),
3864 str_pattern_slim (PATTERN (insn)));
3865
3866 if (recog_memoized (insn) < 0)
3867 fprintf (sched_dump, "nothing");
3868 else
3869 print_reservation (sched_dump, insn);
3870 pressure_info = INSN_REG_PRESSURE (insn);
3871 if (pressure_info != NULL)
3872 {
3873 fputc (':', sched_dump);
3874 for (i = 0; i < ira_pressure_classes_num; i++)
3875 fprintf (sched_dump, "%s%s%+d(%d)",
3876 scheduled_insns.length () > 1
3877 && INSN_LUID (insn)
3878 < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "",
3879 reg_class_names[ira_pressure_classes[i]],
3880 pressure_info[i].set_increase, pressure_info[i].change);
3881 }
3882 if (sched_pressure == SCHED_PRESSURE_MODEL
3883 && model_curr_point < model_num_insns
3884 && model_index (insn) == model_curr_point)
3885 fprintf (sched_dump, ":model %d", model_curr_point);
3886 fputc ('\n', sched_dump);
3887 }
3888
3889 if (sched_pressure == SCHED_PRESSURE_WEIGHTED && !DEBUG_INSN_P (insn))
3890 update_reg_and_insn_max_reg_pressure (insn);
3891
3892 /* Scheduling instruction should have all its dependencies resolved and
3893 should have been removed from the ready list. */
3894 gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
3895
3896 /* Reset debug insns invalidated by moving this insn. */
3897 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
3898 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
3899 sd_iterator_cond (&sd_it, &dep);)
3900 {
3901 rtx_insn *dbg = DEP_PRO (dep);
3902 struct reg_use_data *use, *next;
3903
3904 if (DEP_STATUS (dep) & DEP_CANCELLED)
3905 {
3906 sd_iterator_next (&sd_it);
3907 continue;
3908 }
3909
3910 gcc_assert (DEBUG_INSN_P (dbg));
3911
3912 if (sched_verbose >= 6)
3913 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
3914 INSN_UID (dbg));
3915
3916 /* ??? Rather than resetting the debug insn, we might be able
3917 to emit a debug temp before the just-scheduled insn, but
3918 this would involve checking that the expression at the
3919 point of the debug insn is equivalent to the expression
3920 before the just-scheduled insn. They might not be: the
3921 expression in the debug insn may depend on other insns not
3922 yet scheduled that set MEMs, REGs or even other debug
3923 insns. It's not clear that attempting to preserve debug
3924 information in these cases is worth the effort, given how
3925 uncommon these resets are and the likelihood that the debug
3926 temps introduced won't survive the schedule change. */
3927 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
3928 df_insn_rescan (dbg);
3929
3930 /* Unknown location doesn't use any registers. */
3931 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
3932 {
3933 struct reg_use_data *prev = use;
3934
3935 /* Remove use from the cyclic next_regno_use chain first. */
3936 while (prev->next_regno_use != use)
3937 prev = prev->next_regno_use;
3938 prev->next_regno_use = use->next_regno_use;
3939 next = use->next_insn_use;
3940 free (use);
3941 }
3942 INSN_REG_USE_LIST (dbg) = NULL;
3943
3944 /* We delete rather than resolve these deps, otherwise we
3945 crash in sched_free_deps(), because forward deps are
3946 expected to be released before backward deps. */
3947 sd_delete_dep (sd_it);
3948 }
3949
3950 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
3951 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
3952
3953 if (sched_pressure == SCHED_PRESSURE_MODEL
3954 && model_curr_point < model_num_insns
3955 && NONDEBUG_INSN_P (insn))
3956 {
3957 if (model_index (insn) == model_curr_point)
3958 do
3959 model_curr_point++;
3960 while (model_curr_point < model_num_insns
3961 && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
3962 == QUEUE_SCHEDULED));
3963 else
3964 model_recompute (insn);
3965 model_update_limit_points ();
3966 update_register_pressure (insn);
3967 if (sched_verbose >= 2)
3968 print_curr_reg_pressure ();
3969 }
3970
3971 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
3972 if (INSN_TICK (insn) > clock_var)
3973 /* INSN has been prematurely moved from the queue to the ready list.
3974 This is possible only if following flag is set. */
3975 gcc_assert (flag_sched_stalled_insns);
3976
3977 /* ??? Probably, if INSN is scheduled prematurely, we should leave
3978 INSN_TICK untouched. This is a machine-dependent issue, actually. */
3979 INSN_TICK (insn) = clock_var;
3980
3981 check_clobbered_conditions (insn);
3982
3983 /* Update dependent instructions. First, see if by scheduling this insn
3984 now we broke a dependence in a way that requires us to change another
3985 insn. */
3986 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3987 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
3988 {
3989 struct dep_replacement *desc = DEP_REPLACE (dep);
3990 rtx_insn *pro = DEP_PRO (dep);
3991 if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
3992 && desc != NULL && desc->insn == pro)
3993 apply_replacement (dep, false);
3994 }
3995
3996 /* Go through and resolve forward dependencies. */
3997 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
3998 sd_iterator_cond (&sd_it, &dep);)
3999 {
4000 rtx_insn *next = DEP_CON (dep);
4001 bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
4002
4003 /* Resolve the dependence between INSN and NEXT.
4004 sd_resolve_dep () moves current dep to another list thus
4005 advancing the iterator. */
4006 sd_resolve_dep (sd_it);
4007
4008 if (cancelled)
4009 {
4010 if (must_restore_pattern_p (next, dep))
4011 restore_pattern (dep, false);
4012 continue;
4013 }
4014
4015 /* Don't bother trying to mark next as ready if insn is a debug
4016 insn. If insn is the last hard dependency, it will have
4017 already been discounted. */
4018 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
4019 continue;
4020
4021 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4022 {
4023 int effective_cost;
4024
4025 effective_cost = try_ready (next);
4026
4027 if (effective_cost >= 0
4028 && SCHED_GROUP_P (next)
4029 && advance < effective_cost)
4030 advance = effective_cost;
4031 }
4032 else
4033 /* Check always has only one forward dependence (to the first insn in
4034 the recovery block), therefore, this will be executed only once. */
4035 {
4036 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4037 fix_recovery_deps (RECOVERY_BLOCK (insn));
4038 }
4039 }
4040
4041 /* Annotate the instruction with issue information -- TImode
4042 indicates that the instruction is expected not to be able
4043 to issue on the same cycle as the previous insn. A machine
4044 may use this information to decide how the instruction should
4045 be aligned. */
4046 if (issue_rate > 1
4047 && GET_CODE (PATTERN (insn)) != USE
4048 && GET_CODE (PATTERN (insn)) != CLOBBER
4049 && !DEBUG_INSN_P (insn))
4050 {
4051 if (reload_completed)
4052 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
4053 last_clock_var = clock_var;
4054 }
4055
4056 if (nonscheduled_insns_begin != NULL_RTX)
4057 /* Indicate to debug counters that INSN is scheduled. */
4058 nonscheduled_insns_begin = insn;
4059
4060 return advance;
4061 }
4062
4063 /* Functions for handling of notes. */
4064
4065 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
4066 void
4067 concat_note_lists (rtx_insn *from_end, rtx_insn **to_endp)
4068 {
4069 rtx_insn *from_start;
4070
4071 /* It's easy when have nothing to concat. */
4072 if (from_end == NULL)
4073 return;
4074
4075 /* It's also easy when destination is empty. */
4076 if (*to_endp == NULL)
4077 {
4078 *to_endp = from_end;
4079 return;
4080 }
4081
4082 from_start = from_end;
4083 while (PREV_INSN (from_start) != NULL)
4084 from_start = PREV_INSN (from_start);
4085
4086 SET_PREV_INSN (from_start) = *to_endp;
4087 SET_NEXT_INSN (*to_endp) = from_start;
4088 *to_endp = from_end;
4089 }
4090
4091 /* Delete notes between HEAD and TAIL and put them in the chain
4092 of notes ended by NOTE_LIST. */
4093 void
4094 remove_notes (rtx_insn *head, rtx_insn *tail)
4095 {
4096 rtx_insn *next_tail, *insn, *next;
4097
4098 note_list = 0;
4099 if (head == tail && !INSN_P (head))
4100 return;
4101
4102 next_tail = NEXT_INSN (tail);
4103 for (insn = head; insn != next_tail; insn = next)
4104 {
4105 next = NEXT_INSN (insn);
4106 if (!NOTE_P (insn))
4107 continue;
4108
4109 switch (NOTE_KIND (insn))
4110 {
4111 case NOTE_INSN_BASIC_BLOCK:
4112 continue;
4113
4114 case NOTE_INSN_EPILOGUE_BEG:
4115 if (insn != tail)
4116 {
4117 remove_insn (insn);
4118 add_reg_note (next, REG_SAVE_NOTE,
4119 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
4120 break;
4121 }
4122 /* FALLTHRU */
4123
4124 default:
4125 remove_insn (insn);
4126
4127 /* Add the note to list that ends at NOTE_LIST. */
4128 SET_PREV_INSN (insn) = note_list;
4129 SET_NEXT_INSN (insn) = NULL_RTX;
4130 if (note_list)
4131 SET_NEXT_INSN (note_list) = insn;
4132 note_list = insn;
4133 break;
4134 }
4135
4136 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
4137 }
4138 }
4139
4140 /* A structure to record enough data to allow us to backtrack the scheduler to
4141 a previous state. */
4142 struct haifa_saved_data
4143 {
4144 /* Next entry on the list. */
4145 struct haifa_saved_data *next;
4146
4147 /* Backtracking is associated with scheduling insns that have delay slots.
4148 DELAY_PAIR points to the structure that contains the insns involved, and
4149 the number of cycles between them. */
4150 struct delay_pair *delay_pair;
4151
4152 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
4153 void *fe_saved_data;
4154 /* Data used by the backend. */
4155 void *be_saved_data;
4156
4157 /* Copies of global state. */
4158 int clock_var, last_clock_var;
4159 struct ready_list ready;
4160 state_t curr_state;
4161
4162 rtx_insn *last_scheduled_insn;
4163 rtx last_nondebug_scheduled_insn;
4164 rtx_insn *nonscheduled_insns_begin;
4165 int cycle_issued_insns;
4166
4167 /* Copies of state used in the inner loop of schedule_block. */
4168 struct sched_block_state sched_block;
4169
4170 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
4171 to 0 when restoring. */
4172 int q_size;
4173 rtx_insn_list **insn_queue;
4174
4175 /* Describe pattern replacements that occurred since this backtrack point
4176 was queued. */
4177 vec<dep_t> replacement_deps;
4178 vec<int> replace_apply;
4179
4180 /* A copy of the next-cycle replacement vectors at the time of the backtrack
4181 point. */
4182 vec<dep_t> next_cycle_deps;
4183 vec<int> next_cycle_apply;
4184 };
4185
4186 /* A record, in reverse order, of all scheduled insns which have delay slots
4187 and may require backtracking. */
4188 static struct haifa_saved_data *backtrack_queue;
4189
4190 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
4191 to SET_P. */
4192 static void
4193 mark_backtrack_feeds (rtx insn, int set_p)
4194 {
4195 sd_iterator_def sd_it;
4196 dep_t dep;
4197 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
4198 {
4199 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
4200 }
4201 }
4202
4203 /* Save the current scheduler state so that we can backtrack to it
4204 later if necessary. PAIR gives the insns that make it necessary to
4205 save this point. SCHED_BLOCK is the local state of schedule_block
4206 that need to be saved. */
4207 static void
4208 save_backtrack_point (struct delay_pair *pair,
4209 struct sched_block_state sched_block)
4210 {
4211 int i;
4212 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
4213
4214 save->curr_state = xmalloc (dfa_state_size);
4215 memcpy (save->curr_state, curr_state, dfa_state_size);
4216
4217 save->ready.first = ready.first;
4218 save->ready.n_ready = ready.n_ready;
4219 save->ready.n_debug = ready.n_debug;
4220 save->ready.veclen = ready.veclen;
4221 save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
4222 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
4223
4224 save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
4225 save->q_size = q_size;
4226 for (i = 0; i <= max_insn_queue_index; i++)
4227 {
4228 int q = NEXT_Q_AFTER (q_ptr, i);
4229 save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
4230 }
4231
4232 save->clock_var = clock_var;
4233 save->last_clock_var = last_clock_var;
4234 save->cycle_issued_insns = cycle_issued_insns;
4235 save->last_scheduled_insn = last_scheduled_insn;
4236 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
4237 save->nonscheduled_insns_begin = nonscheduled_insns_begin;
4238
4239 save->sched_block = sched_block;
4240
4241 save->replacement_deps.create (0);
4242 save->replace_apply.create (0);
4243 save->next_cycle_deps = next_cycle_replace_deps.copy ();
4244 save->next_cycle_apply = next_cycle_apply.copy ();
4245
4246 if (current_sched_info->save_state)
4247 save->fe_saved_data = (*current_sched_info->save_state) ();
4248
4249 if (targetm.sched.alloc_sched_context)
4250 {
4251 save->be_saved_data = targetm.sched.alloc_sched_context ();
4252 targetm.sched.init_sched_context (save->be_saved_data, false);
4253 }
4254 else
4255 save->be_saved_data = NULL;
4256
4257 save->delay_pair = pair;
4258
4259 save->next = backtrack_queue;
4260 backtrack_queue = save;
4261
4262 while (pair)
4263 {
4264 mark_backtrack_feeds (pair->i2, 1);
4265 INSN_TICK (pair->i2) = INVALID_TICK;
4266 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
4267 SHADOW_P (pair->i2) = pair->stages == 0;
4268 pair = pair->next_same_i1;
4269 }
4270 }
4271
4272 /* Walk the ready list and all queues. If any insns have unresolved backwards
4273 dependencies, these must be cancelled deps, broken by predication. Set or
4274 clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
4275
4276 static void
4277 toggle_cancelled_flags (bool set)
4278 {
4279 int i;
4280 sd_iterator_def sd_it;
4281 dep_t dep;
4282
4283 if (ready.n_ready > 0)
4284 {
4285 rtx_insn **first = ready_lastpos (&ready);
4286 for (i = 0; i < ready.n_ready; i++)
4287 FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
4288 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4289 {
4290 if (set)
4291 DEP_STATUS (dep) |= DEP_CANCELLED;
4292 else
4293 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4294 }
4295 }
4296 for (i = 0; i <= max_insn_queue_index; i++)
4297 {
4298 int q = NEXT_Q_AFTER (q_ptr, i);
4299 rtx_insn_list *link;
4300 for (link = insn_queue[q]; link; link = link->next ())
4301 {
4302 rtx_insn *insn = link->insn ();
4303 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4304 if (!DEBUG_INSN_P (DEP_PRO (dep)))
4305 {
4306 if (set)
4307 DEP_STATUS (dep) |= DEP_CANCELLED;
4308 else
4309 DEP_STATUS (dep) &= ~DEP_CANCELLED;
4310 }
4311 }
4312 }
4313 }
4314
4315 /* Undo the replacements that have occurred after backtrack point SAVE
4316 was placed. */
4317 static void
4318 undo_replacements_for_backtrack (struct haifa_saved_data *save)
4319 {
4320 while (!save->replacement_deps.is_empty ())
4321 {
4322 dep_t dep = save->replacement_deps.pop ();
4323 int apply_p = save->replace_apply.pop ();
4324
4325 if (apply_p)
4326 restore_pattern (dep, true);
4327 else
4328 apply_replacement (dep, true);
4329 }
4330 save->replacement_deps.release ();
4331 save->replace_apply.release ();
4332 }
4333
4334 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
4335 Restore their dependencies to an unresolved state, and mark them as
4336 queued nowhere. */
4337
4338 static void
4339 unschedule_insns_until (rtx insn)
4340 {
4341 auto_vec<rtx_insn *> recompute_vec;
4342
4343 /* Make two passes over the insns to be unscheduled. First, we clear out
4344 dependencies and other trivial bookkeeping. */
4345 for (;;)
4346 {
4347 rtx_insn *last;
4348 sd_iterator_def sd_it;
4349 dep_t dep;
4350
4351 last = scheduled_insns.pop ();
4352
4353 /* This will be changed by restore_backtrack_point if the insn is in
4354 any queue. */
4355 QUEUE_INDEX (last) = QUEUE_NOWHERE;
4356 if (last != insn)
4357 INSN_TICK (last) = INVALID_TICK;
4358
4359 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
4360 modulo_insns_scheduled--;
4361
4362 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
4363 sd_iterator_cond (&sd_it, &dep);)
4364 {
4365 rtx_insn *con = DEP_CON (dep);
4366 sd_unresolve_dep (sd_it);
4367 if (!MUST_RECOMPUTE_SPEC_P (con))
4368 {
4369 MUST_RECOMPUTE_SPEC_P (con) = 1;
4370 recompute_vec.safe_push (con);
4371 }
4372 }
4373
4374 if (last == insn)
4375 break;
4376 }
4377
4378 /* A second pass, to update ready and speculation status for insns
4379 depending on the unscheduled ones. The first pass must have
4380 popped the scheduled_insns vector up to the point where we
4381 restart scheduling, as recompute_todo_spec requires it to be
4382 up-to-date. */
4383 while (!recompute_vec.is_empty ())
4384 {
4385 rtx_insn *con;
4386
4387 con = recompute_vec.pop ();
4388 MUST_RECOMPUTE_SPEC_P (con) = 0;
4389 if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
4390 {
4391 TODO_SPEC (con) = HARD_DEP;
4392 INSN_TICK (con) = INVALID_TICK;
4393 if (PREDICATED_PAT (con) != NULL_RTX)
4394 haifa_change_pattern (con, ORIG_PAT (con));
4395 }
4396 else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
4397 TODO_SPEC (con) = recompute_todo_spec (con, true);
4398 }
4399 }
4400
4401 /* Restore scheduler state from the topmost entry on the backtracking queue.
4402 PSCHED_BLOCK_P points to the local data of schedule_block that we must
4403 overwrite with the saved data.
4404 The caller must already have called unschedule_insns_until. */
4405
4406 static void
4407 restore_last_backtrack_point (struct sched_block_state *psched_block)
4408 {
4409 int i;
4410 struct haifa_saved_data *save = backtrack_queue;
4411
4412 backtrack_queue = save->next;
4413
4414 if (current_sched_info->restore_state)
4415 (*current_sched_info->restore_state) (save->fe_saved_data);
4416
4417 if (targetm.sched.alloc_sched_context)
4418 {
4419 targetm.sched.set_sched_context (save->be_saved_data);
4420 targetm.sched.free_sched_context (save->be_saved_data);
4421 }
4422
4423 /* Do this first since it clobbers INSN_TICK of the involved
4424 instructions. */
4425 undo_replacements_for_backtrack (save);
4426
4427 /* Clear the QUEUE_INDEX of everything in the ready list or one
4428 of the queues. */
4429 if (ready.n_ready > 0)
4430 {
4431 rtx_insn **first = ready_lastpos (&ready);
4432 for (i = 0; i < ready.n_ready; i++)
4433 {
4434 rtx_insn *insn = first[i];
4435 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
4436 INSN_TICK (insn) = INVALID_TICK;
4437 }
4438 }
4439 for (i = 0; i <= max_insn_queue_index; i++)
4440 {
4441 int q = NEXT_Q_AFTER (q_ptr, i);
4442
4443 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4444 {
4445 rtx_insn *x = link->insn ();
4446 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4447 INSN_TICK (x) = INVALID_TICK;
4448 }
4449 free_INSN_LIST_list (&insn_queue[q]);
4450 }
4451
4452 free (ready.vec);
4453 ready = save->ready;
4454
4455 if (ready.n_ready > 0)
4456 {
4457 rtx_insn **first = ready_lastpos (&ready);
4458 for (i = 0; i < ready.n_ready; i++)
4459 {
4460 rtx_insn *insn = first[i];
4461 QUEUE_INDEX (insn) = QUEUE_READY;
4462 TODO_SPEC (insn) = recompute_todo_spec (insn, true);
4463 INSN_TICK (insn) = save->clock_var;
4464 }
4465 }
4466
4467 q_ptr = 0;
4468 q_size = save->q_size;
4469 for (i = 0; i <= max_insn_queue_index; i++)
4470 {
4471 int q = NEXT_Q_AFTER (q_ptr, i);
4472
4473 insn_queue[q] = save->insn_queue[q];
4474
4475 for (rtx_insn_list *link = insn_queue[q]; link; link = link->next ())
4476 {
4477 rtx_insn *x = link->insn ();
4478 QUEUE_INDEX (x) = i;
4479 TODO_SPEC (x) = recompute_todo_spec (x, true);
4480 INSN_TICK (x) = save->clock_var + i;
4481 }
4482 }
4483 free (save->insn_queue);
4484
4485 toggle_cancelled_flags (true);
4486
4487 clock_var = save->clock_var;
4488 last_clock_var = save->last_clock_var;
4489 cycle_issued_insns = save->cycle_issued_insns;
4490 last_scheduled_insn = save->last_scheduled_insn;
4491 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
4492 nonscheduled_insns_begin = save->nonscheduled_insns_begin;
4493
4494 *psched_block = save->sched_block;
4495
4496 memcpy (curr_state, save->curr_state, dfa_state_size);
4497 free (save->curr_state);
4498
4499 mark_backtrack_feeds (save->delay_pair->i2, 0);
4500
4501 gcc_assert (next_cycle_replace_deps.is_empty ());
4502 next_cycle_replace_deps = save->next_cycle_deps.copy ();
4503 next_cycle_apply = save->next_cycle_apply.copy ();
4504
4505 free (save);
4506
4507 for (save = backtrack_queue; save; save = save->next)
4508 {
4509 mark_backtrack_feeds (save->delay_pair->i2, 1);
4510 }
4511 }
4512
4513 /* Discard all data associated with the topmost entry in the backtrack
4514 queue. If RESET_TICK is false, we just want to free the data. If true,
4515 we are doing this because we discovered a reason to backtrack. In the
4516 latter case, also reset the INSN_TICK for the shadow insn. */
4517 static void
4518 free_topmost_backtrack_point (bool reset_tick)
4519 {
4520 struct haifa_saved_data *save = backtrack_queue;
4521 int i;
4522
4523 backtrack_queue = save->next;
4524
4525 if (reset_tick)
4526 {
4527 struct delay_pair *pair = save->delay_pair;
4528 while (pair)
4529 {
4530 INSN_TICK (pair->i2) = INVALID_TICK;
4531 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
4532 pair = pair->next_same_i1;
4533 }
4534 undo_replacements_for_backtrack (save);
4535 }
4536 else
4537 {
4538 save->replacement_deps.release ();
4539 save->replace_apply.release ();
4540 }
4541
4542 if (targetm.sched.free_sched_context)
4543 targetm.sched.free_sched_context (save->be_saved_data);
4544 if (current_sched_info->restore_state)
4545 free (save->fe_saved_data);
4546 for (i = 0; i <= max_insn_queue_index; i++)
4547 free_INSN_LIST_list (&save->insn_queue[i]);
4548 free (save->insn_queue);
4549 free (save->curr_state);
4550 free (save->ready.vec);
4551 free (save);
4552 }
4553
4554 /* Free the entire backtrack queue. */
4555 static void
4556 free_backtrack_queue (void)
4557 {
4558 while (backtrack_queue)
4559 free_topmost_backtrack_point (false);
4560 }
4561
4562 /* Apply a replacement described by DESC. If IMMEDIATELY is false, we
4563 may have to postpone the replacement until the start of the next cycle,
4564 at which point we will be called again with IMMEDIATELY true. This is
4565 only done for machines which have instruction packets with explicit
4566 parallelism however. */
4567 static void
4568 apply_replacement (dep_t dep, bool immediately)
4569 {
4570 struct dep_replacement *desc = DEP_REPLACE (dep);
4571 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4572 {
4573 next_cycle_replace_deps.safe_push (dep);
4574 next_cycle_apply.safe_push (1);
4575 }
4576 else
4577 {
4578 bool success;
4579
4580 if (QUEUE_INDEX (desc->insn) == QUEUE_SCHEDULED)
4581 return;
4582
4583 if (sched_verbose >= 5)
4584 fprintf (sched_dump, "applying replacement for insn %d\n",
4585 INSN_UID (desc->insn));
4586
4587 success = validate_change (desc->insn, desc->loc, desc->newval, 0);
4588 gcc_assert (success);
4589
4590 update_insn_after_change (desc->insn);
4591 if ((TODO_SPEC (desc->insn) & (HARD_DEP | DEP_POSTPONED)) == 0)
4592 fix_tick_ready (desc->insn);
4593
4594 if (backtrack_queue != NULL)
4595 {
4596 backtrack_queue->replacement_deps.safe_push (dep);
4597 backtrack_queue->replace_apply.safe_push (1);
4598 }
4599 }
4600 }
4601
4602 /* We have determined that a pattern involved in DEP must be restored.
4603 If IMMEDIATELY is false, we may have to postpone the replacement
4604 until the start of the next cycle, at which point we will be called
4605 again with IMMEDIATELY true. */
4606 static void
4607 restore_pattern (dep_t dep, bool immediately)
4608 {
4609 rtx_insn *next = DEP_CON (dep);
4610 int tick = INSN_TICK (next);
4611
4612 /* If we already scheduled the insn, the modified version is
4613 correct. */
4614 if (QUEUE_INDEX (next) == QUEUE_SCHEDULED)
4615 return;
4616
4617 if (!immediately && targetm.sched.exposed_pipeline && reload_completed)
4618 {
4619 next_cycle_replace_deps.safe_push (dep);
4620 next_cycle_apply.safe_push (0);
4621 return;
4622 }
4623
4624
4625 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
4626 {
4627 if (sched_verbose >= 5)
4628 fprintf (sched_dump, "restoring pattern for insn %d\n",
4629 INSN_UID (next));
4630 haifa_change_pattern (next, ORIG_PAT (next));
4631 }
4632 else
4633 {
4634 struct dep_replacement *desc = DEP_REPLACE (dep);
4635 bool success;
4636
4637 if (sched_verbose >= 5)
4638 fprintf (sched_dump, "restoring pattern for insn %d\n",
4639 INSN_UID (desc->insn));
4640 tick = INSN_TICK (desc->insn);
4641
4642 success = validate_change (desc->insn, desc->loc, desc->orig, 0);
4643 gcc_assert (success);
4644 update_insn_after_change (desc->insn);
4645 if (backtrack_queue != NULL)
4646 {
4647 backtrack_queue->replacement_deps.safe_push (dep);
4648 backtrack_queue->replace_apply.safe_push (0);
4649 }
4650 }
4651 INSN_TICK (next) = tick;
4652 if (TODO_SPEC (next) == DEP_POSTPONED)
4653 return;
4654
4655 if (sd_lists_empty_p (next, SD_LIST_BACK))
4656 TODO_SPEC (next) = 0;
4657 else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4658 TODO_SPEC (next) = HARD_DEP;
4659 }
4660
4661 /* Perform pattern replacements that were queued up until the next
4662 cycle. */
4663 static void
4664 perform_replacements_new_cycle (void)
4665 {
4666 int i;
4667 dep_t dep;
4668 FOR_EACH_VEC_ELT (next_cycle_replace_deps, i, dep)
4669 {
4670 int apply_p = next_cycle_apply[i];
4671 if (apply_p)
4672 apply_replacement (dep, true);
4673 else
4674 restore_pattern (dep, true);
4675 }
4676 next_cycle_replace_deps.truncate (0);
4677 next_cycle_apply.truncate (0);
4678 }
4679
4680 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
4681 instructions we've previously encountered, a set bit prevents
4682 recursion. BUDGET is a limit on how far ahead we look, it is
4683 reduced on recursive calls. Return true if we produced a good
4684 estimate, or false if we exceeded the budget. */
4685 static bool
4686 estimate_insn_tick (bitmap processed, rtx_insn *insn, int budget)
4687 {
4688 sd_iterator_def sd_it;
4689 dep_t dep;
4690 int earliest = INSN_TICK (insn);
4691
4692 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
4693 {
4694 rtx_insn *pro = DEP_PRO (dep);
4695 int t;
4696
4697 if (DEP_STATUS (dep) & DEP_CANCELLED)
4698 continue;
4699
4700 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
4701 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
4702 else
4703 {
4704 int cost = dep_cost (dep);
4705 if (cost >= budget)
4706 return false;
4707 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
4708 {
4709 if (!estimate_insn_tick (processed, pro, budget - cost))
4710 return false;
4711 }
4712 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
4713 t = INSN_TICK_ESTIMATE (pro) + cost;
4714 if (earliest == INVALID_TICK || t > earliest)
4715 earliest = t;
4716 }
4717 }
4718 bitmap_set_bit (processed, INSN_LUID (insn));
4719 INSN_TICK_ESTIMATE (insn) = earliest;
4720 return true;
4721 }
4722
4723 /* Examine the pair of insns in P, and estimate (optimistically, assuming
4724 infinite resources) the cycle in which the delayed shadow can be issued.
4725 Return the number of cycles that must pass before the real insn can be
4726 issued in order to meet this constraint. */
4727 static int
4728 estimate_shadow_tick (struct delay_pair *p)
4729 {
4730 bitmap_head processed;
4731 int t;
4732 bool cutoff;
4733 bitmap_initialize (&processed, 0);
4734
4735 cutoff = !estimate_insn_tick (&processed, p->i2,
4736 max_insn_queue_index + pair_delay (p));
4737 bitmap_clear (&processed);
4738 if (cutoff)
4739 return max_insn_queue_index;
4740 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
4741 if (t > 0)
4742 return t;
4743 return 0;
4744 }
4745
4746 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
4747 recursively resolve all its forward dependencies. */
4748 static void
4749 resolve_dependencies (rtx_insn *insn)
4750 {
4751 sd_iterator_def sd_it;
4752 dep_t dep;
4753
4754 /* Don't use sd_lists_empty_p; it ignores debug insns. */
4755 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
4756 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
4757 return;
4758
4759 if (sched_verbose >= 4)
4760 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
4761
4762 if (QUEUE_INDEX (insn) >= 0)
4763 queue_remove (insn);
4764
4765 scheduled_insns.safe_push (insn);
4766
4767 /* Update dependent instructions. */
4768 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
4769 sd_iterator_cond (&sd_it, &dep);)
4770 {
4771 rtx_insn *next = DEP_CON (dep);
4772
4773 if (sched_verbose >= 4)
4774 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
4775 INSN_UID (next));
4776
4777 /* Resolve the dependence between INSN and NEXT.
4778 sd_resolve_dep () moves current dep to another list thus
4779 advancing the iterator. */
4780 sd_resolve_dep (sd_it);
4781
4782 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
4783 {
4784 resolve_dependencies (next);
4785 }
4786 else
4787 /* Check always has only one forward dependence (to the first insn in
4788 the recovery block), therefore, this will be executed only once. */
4789 {
4790 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
4791 }
4792 }
4793 }
4794
4795
4796 /* Return the head and tail pointers of ebb starting at BEG and ending
4797 at END. */
4798 void
4799 get_ebb_head_tail (basic_block beg, basic_block end,
4800 rtx_insn **headp, rtx_insn **tailp)
4801 {
4802 rtx_insn *beg_head = BB_HEAD (beg);
4803 rtx_insn * beg_tail = BB_END (beg);
4804 rtx_insn * end_head = BB_HEAD (end);
4805 rtx_insn * end_tail = BB_END (end);
4806
4807 /* Don't include any notes or labels at the beginning of the BEG
4808 basic block, or notes at the end of the END basic blocks. */
4809
4810 if (LABEL_P (beg_head))
4811 beg_head = NEXT_INSN (beg_head);
4812
4813 while (beg_head != beg_tail)
4814 if (NOTE_P (beg_head))
4815 beg_head = NEXT_INSN (beg_head);
4816 else if (DEBUG_INSN_P (beg_head))
4817 {
4818 rtx_insn * note, *next;
4819
4820 for (note = NEXT_INSN (beg_head);
4821 note != beg_tail;
4822 note = next)
4823 {
4824 next = NEXT_INSN (note);
4825 if (NOTE_P (note))
4826 {
4827 if (sched_verbose >= 9)
4828 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4829
4830 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
4831
4832 if (BLOCK_FOR_INSN (note) != beg)
4833 df_insn_change_bb (note, beg);
4834 }
4835 else if (!DEBUG_INSN_P (note))
4836 break;
4837 }
4838
4839 break;
4840 }
4841 else
4842 break;
4843
4844 *headp = beg_head;
4845
4846 if (beg == end)
4847 end_head = beg_head;
4848 else if (LABEL_P (end_head))
4849 end_head = NEXT_INSN (end_head);
4850
4851 while (end_head != end_tail)
4852 if (NOTE_P (end_tail))
4853 end_tail = PREV_INSN (end_tail);
4854 else if (DEBUG_INSN_P (end_tail))
4855 {
4856 rtx_insn * note, *prev;
4857
4858 for (note = PREV_INSN (end_tail);
4859 note != end_head;
4860 note = prev)
4861 {
4862 prev = PREV_INSN (note);
4863 if (NOTE_P (note))
4864 {
4865 if (sched_verbose >= 9)
4866 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
4867
4868 reorder_insns_nobb (note, note, end_tail);
4869
4870 if (end_tail == BB_END (end))
4871 BB_END (end) = note;
4872
4873 if (BLOCK_FOR_INSN (note) != end)
4874 df_insn_change_bb (note, end);
4875 }
4876 else if (!DEBUG_INSN_P (note))
4877 break;
4878 }
4879
4880 break;
4881 }
4882 else
4883 break;
4884
4885 *tailp = end_tail;
4886 }
4887
4888 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
4889
4890 int
4891 no_real_insns_p (const rtx_insn *head, const rtx_insn *tail)
4892 {
4893 while (head != NEXT_INSN (tail))
4894 {
4895 if (!NOTE_P (head) && !LABEL_P (head))
4896 return 0;
4897 head = NEXT_INSN (head);
4898 }
4899 return 1;
4900 }
4901
4902 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
4903 previously found among the insns. Insert them just before HEAD. */
4904 rtx_insn *
4905 restore_other_notes (rtx_insn *head, basic_block head_bb)
4906 {
4907 if (note_list != 0)
4908 {
4909 rtx_insn *note_head = note_list;
4910
4911 if (head)
4912 head_bb = BLOCK_FOR_INSN (head);
4913 else
4914 head = NEXT_INSN (bb_note (head_bb));
4915
4916 while (PREV_INSN (note_head))
4917 {
4918 set_block_for_insn (note_head, head_bb);
4919 note_head = PREV_INSN (note_head);
4920 }
4921 /* In the above cycle we've missed this note. */
4922 set_block_for_insn (note_head, head_bb);
4923
4924 SET_PREV_INSN (note_head) = PREV_INSN (head);
4925 SET_NEXT_INSN (PREV_INSN (head)) = note_head;
4926 SET_PREV_INSN (head) = note_list;
4927 SET_NEXT_INSN (note_list) = head;
4928
4929 if (BLOCK_FOR_INSN (head) != head_bb)
4930 BB_END (head_bb) = note_list;
4931
4932 head = note_head;
4933 }
4934
4935 return head;
4936 }
4937
4938 /* When we know we are going to discard the schedule due to a failed attempt
4939 at modulo scheduling, undo all replacements. */
4940 static void
4941 undo_all_replacements (void)
4942 {
4943 rtx_insn *insn;
4944 int i;
4945
4946 FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
4947 {
4948 sd_iterator_def sd_it;
4949 dep_t dep;
4950
4951 /* See if we must undo a replacement. */
4952 for (sd_it = sd_iterator_start (insn, SD_LIST_RES_FORW);
4953 sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
4954 {
4955 struct dep_replacement *desc = DEP_REPLACE (dep);
4956 if (desc != NULL)
4957 validate_change (desc->insn, desc->loc, desc->orig, 0);
4958 }
4959 }
4960 }
4961
4962 /* Return first non-scheduled insn in the current scheduling block.
4963 This is mostly used for debug-counter purposes. */
4964 static rtx_insn *
4965 first_nonscheduled_insn (void)
4966 {
4967 rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
4968 ? nonscheduled_insns_begin
4969 : current_sched_info->prev_head);
4970
4971 do
4972 {
4973 insn = next_nonnote_nondebug_insn (insn);
4974 }
4975 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
4976
4977 return insn;
4978 }
4979
4980 /* Move insns that became ready to fire from queue to ready list. */
4981
4982 static void
4983 queue_to_ready (struct ready_list *ready)
4984 {
4985 rtx_insn *insn;
4986 rtx_insn_list *link;
4987 rtx skip_insn;
4988
4989 q_ptr = NEXT_Q (q_ptr);
4990
4991 if (dbg_cnt (sched_insn) == false)
4992 /* If debug counter is activated do not requeue the first
4993 nonscheduled insn. */
4994 skip_insn = first_nonscheduled_insn ();
4995 else
4996 skip_insn = NULL_RTX;
4997
4998 /* Add all pending insns that can be scheduled without stalls to the
4999 ready list. */
5000 for (link = insn_queue[q_ptr]; link; link = link->next ())
5001 {
5002 insn = link->insn ();
5003 q_size -= 1;
5004
5005 if (sched_verbose >= 2)
5006 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5007 (*current_sched_info->print_insn) (insn, 0));
5008
5009 /* If the ready list is full, delay the insn for 1 cycle.
5010 See the comment in schedule_block for the rationale. */
5011 if (!reload_completed
5012 && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
5013 || (sched_pressure == SCHED_PRESSURE_MODEL
5014 /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
5015 instructions too. */
5016 && model_index (insn) > (model_curr_point
5017 + MAX_SCHED_READY_INSNS)))
5018 && !(sched_pressure == SCHED_PRESSURE_MODEL
5019 && model_curr_point < model_num_insns
5020 /* Always allow the next model instruction to issue. */
5021 && model_index (insn) == model_curr_point)
5022 && !SCHED_GROUP_P (insn)
5023 && insn != skip_insn)
5024 {
5025 if (sched_verbose >= 2)
5026 fprintf (sched_dump, "keeping in queue, ready full\n");
5027 queue_insn (insn, 1, "ready full");
5028 }
5029 else
5030 {
5031 ready_add (ready, insn, false);
5032 if (sched_verbose >= 2)
5033 fprintf (sched_dump, "moving to ready without stalls\n");
5034 }
5035 }
5036 free_INSN_LIST_list (&insn_queue[q_ptr]);
5037
5038 /* If there are no ready insns, stall until one is ready and add all
5039 of the pending insns at that point to the ready list. */
5040 if (ready->n_ready == 0)
5041 {
5042 int stalls;
5043
5044 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
5045 {
5046 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5047 {
5048 for (; link; link = link->next ())
5049 {
5050 insn = link->insn ();
5051 q_size -= 1;
5052
5053 if (sched_verbose >= 2)
5054 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
5055 (*current_sched_info->print_insn) (insn, 0));
5056
5057 ready_add (ready, insn, false);
5058 if (sched_verbose >= 2)
5059 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
5060 }
5061 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
5062
5063 advance_one_cycle ();
5064
5065 break;
5066 }
5067
5068 advance_one_cycle ();
5069 }
5070
5071 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
5072 clock_var += stalls;
5073 if (sched_verbose >= 2)
5074 fprintf (sched_dump, ";;\tAdvancing clock by %d cycle[s] to %d\n",
5075 stalls, clock_var);
5076 }
5077 }
5078
5079 /* Used by early_queue_to_ready. Determines whether it is "ok" to
5080 prematurely move INSN from the queue to the ready list. Currently,
5081 if a target defines the hook 'is_costly_dependence', this function
5082 uses the hook to check whether there exist any dependences which are
5083 considered costly by the target, between INSN and other insns that
5084 have already been scheduled. Dependences are checked up to Y cycles
5085 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
5086 controlling this value.
5087 (Other considerations could be taken into account instead (or in
5088 addition) depending on user flags and target hooks. */
5089
5090 static bool
5091 ok_for_early_queue_removal (rtx insn)
5092 {
5093 if (targetm.sched.is_costly_dependence)
5094 {
5095 rtx prev_insn;
5096 int n_cycles;
5097 int i = scheduled_insns.length ();
5098 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
5099 {
5100 while (i-- > 0)
5101 {
5102 int cost;
5103
5104 prev_insn = scheduled_insns[i];
5105
5106 if (!NOTE_P (prev_insn))
5107 {
5108 dep_t dep;
5109
5110 dep = sd_find_dep_between (prev_insn, insn, true);
5111
5112 if (dep != NULL)
5113 {
5114 cost = dep_cost (dep);
5115
5116 if (targetm.sched.is_costly_dependence (dep, cost,
5117 flag_sched_stalled_insns_dep - n_cycles))
5118 return false;
5119 }
5120 }
5121
5122 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
5123 break;
5124 }
5125
5126 if (i == 0)
5127 break;
5128 }
5129 }
5130
5131 return true;
5132 }
5133
5134
5135 /* Remove insns from the queue, before they become "ready" with respect
5136 to FU latency considerations. */
5137
5138 static int
5139 early_queue_to_ready (state_t state, struct ready_list *ready)
5140 {
5141 rtx_insn *insn;
5142 rtx_insn_list *link;
5143 rtx_insn_list *next_link;
5144 rtx_insn_list *prev_link;
5145 bool move_to_ready;
5146 int cost;
5147 state_t temp_state = alloca (dfa_state_size);
5148 int stalls;
5149 int insns_removed = 0;
5150
5151 /*
5152 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
5153 function:
5154
5155 X == 0: There is no limit on how many queued insns can be removed
5156 prematurely. (flag_sched_stalled_insns = -1).
5157
5158 X >= 1: Only X queued insns can be removed prematurely in each
5159 invocation. (flag_sched_stalled_insns = X).
5160
5161 Otherwise: Early queue removal is disabled.
5162 (flag_sched_stalled_insns = 0)
5163 */
5164
5165 if (! flag_sched_stalled_insns)
5166 return 0;
5167
5168 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
5169 {
5170 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
5171 {
5172 if (sched_verbose > 6)
5173 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
5174
5175 prev_link = 0;
5176 while (link)
5177 {
5178 next_link = link->next ();
5179 insn = link->insn ();
5180 if (insn && sched_verbose > 6)
5181 print_rtl_single (sched_dump, insn);
5182
5183 memcpy (temp_state, state, dfa_state_size);
5184 if (recog_memoized (insn) < 0)
5185 /* non-negative to indicate that it's not ready
5186 to avoid infinite Q->R->Q->R... */
5187 cost = 0;
5188 else
5189 cost = state_transition (temp_state, insn);
5190
5191 if (sched_verbose >= 6)
5192 fprintf (sched_dump, "transition cost = %d\n", cost);
5193
5194 move_to_ready = false;
5195 if (cost < 0)
5196 {
5197 move_to_ready = ok_for_early_queue_removal (insn);
5198 if (move_to_ready == true)
5199 {
5200 /* move from Q to R */
5201 q_size -= 1;
5202 ready_add (ready, insn, false);
5203
5204 if (prev_link)
5205 XEXP (prev_link, 1) = next_link;
5206 else
5207 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
5208
5209 free_INSN_LIST_node (link);
5210
5211 if (sched_verbose >= 2)
5212 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
5213 (*current_sched_info->print_insn) (insn, 0));
5214
5215 insns_removed++;
5216 if (insns_removed == flag_sched_stalled_insns)
5217 /* Remove no more than flag_sched_stalled_insns insns
5218 from Q at a time. */
5219 return insns_removed;
5220 }
5221 }
5222
5223 if (move_to_ready == false)
5224 prev_link = link;
5225
5226 link = next_link;
5227 } /* while link */
5228 } /* if link */
5229
5230 } /* for stalls.. */
5231
5232 return insns_removed;
5233 }
5234
5235
5236 /* Print the ready list for debugging purposes.
5237 If READY_TRY is non-zero then only print insns that max_issue
5238 will consider. */
5239 static void
5240 debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
5241 {
5242 rtx_insn **p;
5243 int i;
5244
5245 if (ready->n_ready == 0)
5246 {
5247 fprintf (sched_dump, "\n");
5248 return;
5249 }
5250
5251 p = ready_lastpos (ready);
5252 for (i = 0; i < ready->n_ready; i++)
5253 {
5254 if (ready_try != NULL && ready_try[ready->n_ready - i - 1])
5255 continue;
5256
5257 fprintf (sched_dump, " %s:%d",
5258 (*current_sched_info->print_insn) (p[i], 0),
5259 INSN_LUID (p[i]));
5260 if (sched_pressure != SCHED_PRESSURE_NONE)
5261 fprintf (sched_dump, "(cost=%d",
5262 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
5263 fprintf (sched_dump, ":prio=%d", INSN_PRIORITY (p[i]));
5264 if (INSN_TICK (p[i]) > clock_var)
5265 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
5266 if (sched_pressure != SCHED_PRESSURE_NONE)
5267 fprintf (sched_dump, ")");
5268 }
5269 fprintf (sched_dump, "\n");
5270 }
5271
5272 /* Print the ready list. Callable from debugger. */
5273 static void
5274 debug_ready_list (struct ready_list *ready)
5275 {
5276 debug_ready_list_1 (ready, NULL);
5277 }
5278
5279 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
5280 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
5281 replaces the epilogue note in the correct basic block. */
5282 void
5283 reemit_notes (rtx_insn *insn)
5284 {
5285 rtx note;
5286 rtx_insn *last = insn;
5287
5288 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
5289 {
5290 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
5291 {
5292 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
5293
5294 last = emit_note_before (note_type, last);
5295 remove_note (insn, note);
5296 }
5297 }
5298 }
5299
5300 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
5301 static void
5302 move_insn (rtx_insn *insn, rtx_insn *last, rtx nt)
5303 {
5304 if (PREV_INSN (insn) != last)
5305 {
5306 basic_block bb;
5307 rtx_insn *note;
5308 int jump_p = 0;
5309
5310 bb = BLOCK_FOR_INSN (insn);
5311
5312 /* BB_HEAD is either LABEL or NOTE. */
5313 gcc_assert (BB_HEAD (bb) != insn);
5314
5315 if (BB_END (bb) == insn)
5316 /* If this is last instruction in BB, move end marker one
5317 instruction up. */
5318 {
5319 /* Jumps are always placed at the end of basic block. */
5320 jump_p = control_flow_insn_p (insn);
5321
5322 gcc_assert (!jump_p
5323 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
5324 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
5325 || (common_sched_info->sched_pass_id
5326 == SCHED_EBB_PASS));
5327
5328 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
5329
5330 BB_END (bb) = PREV_INSN (insn);
5331 }
5332
5333 gcc_assert (BB_END (bb) != last);
5334
5335 if (jump_p)
5336 /* We move the block note along with jump. */
5337 {
5338 gcc_assert (nt);
5339
5340 note = NEXT_INSN (insn);
5341 while (NOTE_NOT_BB_P (note) && note != nt)
5342 note = NEXT_INSN (note);
5343
5344 if (note != nt
5345 && (LABEL_P (note)
5346 || BARRIER_P (note)))
5347 note = NEXT_INSN (note);
5348
5349 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5350 }
5351 else
5352 note = insn;
5353
5354 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
5355 SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
5356
5357 SET_NEXT_INSN (note) = NEXT_INSN (last);
5358 SET_PREV_INSN (NEXT_INSN (last)) = note;
5359
5360 SET_NEXT_INSN (last) = insn;
5361 SET_PREV_INSN (insn) = last;
5362
5363 bb = BLOCK_FOR_INSN (last);
5364
5365 if (jump_p)
5366 {
5367 fix_jump_move (insn);
5368
5369 if (BLOCK_FOR_INSN (insn) != bb)
5370 move_block_after_check (insn);
5371
5372 gcc_assert (BB_END (bb) == last);
5373 }
5374
5375 df_insn_change_bb (insn, bb);
5376
5377 /* Update BB_END, if needed. */
5378 if (BB_END (bb) == last)
5379 BB_END (bb) = insn;
5380 }
5381
5382 SCHED_GROUP_P (insn) = 0;
5383 }
5384
5385 /* Return true if scheduling INSN will finish current clock cycle. */
5386 static bool
5387 insn_finishes_cycle_p (rtx_insn *insn)
5388 {
5389 if (SCHED_GROUP_P (insn))
5390 /* After issuing INSN, rest of the sched_group will be forced to issue
5391 in order. Don't make any plans for the rest of cycle. */
5392 return true;
5393
5394 /* Finishing the block will, apparently, finish the cycle. */
5395 if (current_sched_info->insn_finishes_block_p
5396 && current_sched_info->insn_finishes_block_p (insn))
5397 return true;
5398
5399 return false;
5400 }
5401
5402 /* Define type for target data used in multipass scheduling. */
5403 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
5404 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
5405 #endif
5406 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
5407
5408 /* The following structure describe an entry of the stack of choices. */
5409 struct choice_entry
5410 {
5411 /* Ordinal number of the issued insn in the ready queue. */
5412 int index;
5413 /* The number of the rest insns whose issues we should try. */
5414 int rest;
5415 /* The number of issued essential insns. */
5416 int n;
5417 /* State after issuing the insn. */
5418 state_t state;
5419 /* Target-specific data. */
5420 first_cycle_multipass_data_t target_data;
5421 };
5422
5423 /* The following array is used to implement a stack of choices used in
5424 function max_issue. */
5425 static struct choice_entry *choice_stack;
5426
5427 /* This holds the value of the target dfa_lookahead hook. */
5428 int dfa_lookahead;
5429
5430 /* The following variable value is maximal number of tries of issuing
5431 insns for the first cycle multipass insn scheduling. We define
5432 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
5433 need this constraint if all real insns (with non-negative codes)
5434 had reservations because in this case the algorithm complexity is
5435 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
5436 might be incomplete and such insn might occur. For such
5437 descriptions, the complexity of algorithm (without the constraint)
5438 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
5439 static int max_lookahead_tries;
5440
5441 /* The following value is value of hook
5442 `first_cycle_multipass_dfa_lookahead' at the last call of
5443 `max_issue'. */
5444 static int cached_first_cycle_multipass_dfa_lookahead = 0;
5445
5446 /* The following value is value of `issue_rate' at the last call of
5447 `sched_init'. */
5448 static int cached_issue_rate = 0;
5449
5450 /* The following function returns maximal (or close to maximal) number
5451 of insns which can be issued on the same cycle and one of which
5452 insns is insns with the best rank (the first insn in READY). To
5453 make this function tries different samples of ready insns. READY
5454 is current queue `ready'. Global array READY_TRY reflects what
5455 insns are already issued in this try. The function stops immediately,
5456 if it reached the such a solution, that all instruction can be issued.
5457 INDEX will contain index of the best insn in READY. The following
5458 function is used only for first cycle multipass scheduling.
5459
5460 PRIVILEGED_N >= 0
5461
5462 This function expects recognized insns only. All USEs,
5463 CLOBBERs, etc must be filtered elsewhere. */
5464 int
5465 max_issue (struct ready_list *ready, int privileged_n, state_t state,
5466 bool first_cycle_insn_p, int *index)
5467 {
5468 int n, i, all, n_ready, best, delay, tries_num;
5469 int more_issue;
5470 struct choice_entry *top;
5471 rtx_insn *insn;
5472
5473 n_ready = ready->n_ready;
5474 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
5475 && privileged_n <= n_ready);
5476
5477 /* Init MAX_LOOKAHEAD_TRIES. */
5478 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
5479 {
5480 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
5481 max_lookahead_tries = 100;
5482 for (i = 0; i < issue_rate; i++)
5483 max_lookahead_tries *= dfa_lookahead;
5484 }
5485
5486 /* Init max_points. */
5487 more_issue = issue_rate - cycle_issued_insns;
5488 gcc_assert (more_issue >= 0);
5489
5490 /* The number of the issued insns in the best solution. */
5491 best = 0;
5492
5493 top = choice_stack;
5494
5495 /* Set initial state of the search. */
5496 memcpy (top->state, state, dfa_state_size);
5497 top->rest = dfa_lookahead;
5498 top->n = 0;
5499 if (targetm.sched.first_cycle_multipass_begin)
5500 targetm.sched.first_cycle_multipass_begin (&top->target_data,
5501 ready_try, n_ready,
5502 first_cycle_insn_p);
5503
5504 /* Count the number of the insns to search among. */
5505 for (all = i = 0; i < n_ready; i++)
5506 if (!ready_try [i])
5507 all++;
5508
5509 if (sched_verbose >= 2)
5510 {
5511 fprintf (sched_dump, ";;\t\tmax_issue among %d insns:", all);
5512 debug_ready_list_1 (ready, ready_try);
5513 }
5514
5515 /* I is the index of the insn to try next. */
5516 i = 0;
5517 tries_num = 0;
5518 for (;;)
5519 {
5520 if (/* If we've reached a dead end or searched enough of what we have
5521 been asked... */
5522 top->rest == 0
5523 /* or have nothing else to try... */
5524 || i >= n_ready
5525 /* or should not issue more. */
5526 || top->n >= more_issue)
5527 {
5528 /* ??? (... || i == n_ready). */
5529 gcc_assert (i <= n_ready);
5530
5531 /* We should not issue more than issue_rate instructions. */
5532 gcc_assert (top->n <= more_issue);
5533
5534 if (top == choice_stack)
5535 break;
5536
5537 if (best < top - choice_stack)
5538 {
5539 if (privileged_n)
5540 {
5541 n = privileged_n;
5542 /* Try to find issued privileged insn. */
5543 while (n && !ready_try[--n])
5544 ;
5545 }
5546
5547 if (/* If all insns are equally good... */
5548 privileged_n == 0
5549 /* Or a privileged insn will be issued. */
5550 || ready_try[n])
5551 /* Then we have a solution. */
5552 {
5553 best = top - choice_stack;
5554 /* This is the index of the insn issued first in this
5555 solution. */
5556 *index = choice_stack [1].index;
5557 if (top->n == more_issue || best == all)
5558 break;
5559 }
5560 }
5561
5562 /* Set ready-list index to point to the last insn
5563 ('i++' below will advance it to the next insn). */
5564 i = top->index;
5565
5566 /* Backtrack. */
5567 ready_try [i] = 0;
5568
5569 if (targetm.sched.first_cycle_multipass_backtrack)
5570 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
5571 ready_try, n_ready);
5572
5573 top--;
5574 memcpy (state, top->state, dfa_state_size);
5575 }
5576 else if (!ready_try [i])
5577 {
5578 tries_num++;
5579 if (tries_num > max_lookahead_tries)
5580 break;
5581 insn = ready_element (ready, i);
5582 delay = state_transition (state, insn);
5583 if (delay < 0)
5584 {
5585 if (state_dead_lock_p (state)
5586 || insn_finishes_cycle_p (insn))
5587 /* We won't issue any more instructions in the next
5588 choice_state. */
5589 top->rest = 0;
5590 else
5591 top->rest--;
5592
5593 n = top->n;
5594 if (memcmp (top->state, state, dfa_state_size) != 0)
5595 n++;
5596
5597 /* Advance to the next choice_entry. */
5598 top++;
5599 /* Initialize it. */
5600 top->rest = dfa_lookahead;
5601 top->index = i;
5602 top->n = n;
5603 memcpy (top->state, state, dfa_state_size);
5604 ready_try [i] = 1;
5605
5606 if (targetm.sched.first_cycle_multipass_issue)
5607 targetm.sched.first_cycle_multipass_issue (&top->target_data,
5608 ready_try, n_ready,
5609 insn,
5610 &((top - 1)
5611 ->target_data));
5612
5613 i = -1;
5614 }
5615 }
5616
5617 /* Increase ready-list index. */
5618 i++;
5619 }
5620
5621 if (targetm.sched.first_cycle_multipass_end)
5622 targetm.sched.first_cycle_multipass_end (best != 0
5623 ? &choice_stack[1].target_data
5624 : NULL);
5625
5626 /* Restore the original state of the DFA. */
5627 memcpy (state, choice_stack->state, dfa_state_size);
5628
5629 return best;
5630 }
5631
5632 /* The following function chooses insn from READY and modifies
5633 READY. The following function is used only for first
5634 cycle multipass scheduling.
5635 Return:
5636 -1 if cycle should be advanced,
5637 0 if INSN_PTR is set to point to the desirable insn,
5638 1 if choose_ready () should be restarted without advancing the cycle. */
5639 static int
5640 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
5641 rtx_insn **insn_ptr)
5642 {
5643 int lookahead;
5644
5645 if (dbg_cnt (sched_insn) == false)
5646 {
5647 if (nonscheduled_insns_begin == NULL_RTX)
5648 nonscheduled_insns_begin = current_sched_info->prev_head;
5649
5650 rtx_insn *insn = first_nonscheduled_insn ();
5651
5652 if (QUEUE_INDEX (insn) == QUEUE_READY)
5653 /* INSN is in the ready_list. */
5654 {
5655 ready_remove_insn (insn);
5656 *insn_ptr = insn;
5657 return 0;
5658 }
5659
5660 /* INSN is in the queue. Advance cycle to move it to the ready list. */
5661 gcc_assert (QUEUE_INDEX (insn) >= 0);
5662 return -1;
5663 }
5664
5665 lookahead = 0;
5666
5667 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
5668 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
5669 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
5670 || DEBUG_INSN_P (ready_element (ready, 0)))
5671 {
5672 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
5673 *insn_ptr = ready_remove_first_dispatch (ready);
5674 else
5675 *insn_ptr = ready_remove_first (ready);
5676
5677 return 0;
5678 }
5679 else
5680 {
5681 /* Try to choose the best insn. */
5682 int index = 0, i;
5683 rtx_insn *insn;
5684
5685 insn = ready_element (ready, 0);
5686 if (INSN_CODE (insn) < 0)
5687 {
5688 *insn_ptr = ready_remove_first (ready);
5689 return 0;
5690 }
5691
5692 /* Filter the search space. */
5693 for (i = 0; i < ready->n_ready; i++)
5694 {
5695 ready_try[i] = 0;
5696
5697 insn = ready_element (ready, i);
5698
5699 /* If this insn is recognizable we should have already
5700 recognized it earlier.
5701 ??? Not very clear where this is supposed to be done.
5702 See dep_cost_1. */
5703 gcc_checking_assert (INSN_CODE (insn) >= 0
5704 || recog_memoized (insn) < 0);
5705 if (INSN_CODE (insn) < 0)
5706 {
5707 /* Non-recognized insns at position 0 are handled above. */
5708 gcc_assert (i > 0);
5709 ready_try[i] = 1;
5710 continue;
5711 }
5712
5713 if (targetm.sched.first_cycle_multipass_dfa_lookahead_guard)
5714 {
5715 ready_try[i]
5716 = (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
5717 (insn, i));
5718
5719 if (ready_try[i] < 0)
5720 /* Queue instruction for several cycles.
5721 We need to restart choose_ready as we have changed
5722 the ready list. */
5723 {
5724 change_queue_index (insn, -ready_try[i]);
5725 return 1;
5726 }
5727
5728 /* Make sure that we didn't end up with 0'th insn filtered out.
5729 Don't be tempted to make life easier for backends and just
5730 requeue 0'th insn if (ready_try[0] == 0) and restart
5731 choose_ready. Backends should be very considerate about
5732 requeueing instructions -- especially the highest priority
5733 one at position 0. */
5734 gcc_assert (ready_try[i] == 0 || i > 0);
5735 if (ready_try[i])
5736 continue;
5737 }
5738
5739 gcc_assert (ready_try[i] == 0);
5740 /* INSN made it through the scrutiny of filters! */
5741 }
5742
5743 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
5744 {
5745 *insn_ptr = ready_remove_first (ready);
5746 if (sched_verbose >= 4)
5747 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
5748 (*current_sched_info->print_insn) (*insn_ptr, 0));
5749 return 0;
5750 }
5751 else
5752 {
5753 if (sched_verbose >= 4)
5754 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
5755 (*current_sched_info->print_insn)
5756 (ready_element (ready, index), 0));
5757
5758 *insn_ptr = ready_remove (ready, index);
5759 return 0;
5760 }
5761 }
5762 }
5763
5764 /* This function is called when we have successfully scheduled a
5765 block. It uses the schedule stored in the scheduled_insns vector
5766 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
5767 append the scheduled insns; TAIL is the insn after the scheduled
5768 block. TARGET_BB is the argument passed to schedule_block. */
5769
5770 static void
5771 commit_schedule (rtx_insn *prev_head, rtx_insn *tail, basic_block *target_bb)
5772 {
5773 unsigned int i;
5774 rtx_insn *insn;
5775
5776 last_scheduled_insn = prev_head;
5777 for (i = 0;
5778 scheduled_insns.iterate (i, &insn);
5779 i++)
5780 {
5781 if (control_flow_insn_p (last_scheduled_insn)
5782 || current_sched_info->advance_target_bb (*target_bb, insn))
5783 {
5784 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
5785
5786 if (sched_verbose)
5787 {
5788 rtx_insn *x;
5789
5790 x = next_real_insn (last_scheduled_insn);
5791 gcc_assert (x);
5792 dump_new_block_header (1, *target_bb, x, tail);
5793 }
5794
5795 last_scheduled_insn = bb_note (*target_bb);
5796 }
5797
5798 if (current_sched_info->begin_move_insn)
5799 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
5800 move_insn (insn, last_scheduled_insn,
5801 current_sched_info->next_tail);
5802 if (!DEBUG_INSN_P (insn))
5803 reemit_notes (insn);
5804 last_scheduled_insn = insn;
5805 }
5806
5807 scheduled_insns.truncate (0);
5808 }
5809
5810 /* Examine all insns on the ready list and queue those which can't be
5811 issued in this cycle. TEMP_STATE is temporary scheduler state we
5812 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
5813 have been issued for the current cycle, which means it is valid to
5814 issue an asm statement.
5815
5816 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
5817 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
5818 we only leave insns which have an INSN_EXACT_TICK. */
5819
5820 static void
5821 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
5822 bool shadows_only_p, bool modulo_epilogue_p)
5823 {
5824 int i, pass;
5825 bool sched_group_found = false;
5826 int min_cost_group = 1;
5827
5828 for (i = 0; i < ready.n_ready; i++)
5829 {
5830 rtx_insn *insn = ready_element (&ready, i);
5831 if (SCHED_GROUP_P (insn))
5832 {
5833 sched_group_found = true;
5834 break;
5835 }
5836 }
5837
5838 /* Make two passes if there's a SCHED_GROUP_P insn; make sure to handle
5839 such an insn first and note its cost, then schedule all other insns
5840 for one cycle later. */
5841 for (pass = sched_group_found ? 0 : 1; pass < 2; )
5842 {
5843 int n = ready.n_ready;
5844 for (i = 0; i < n; i++)
5845 {
5846 rtx_insn *insn = ready_element (&ready, i);
5847 int cost = 0;
5848 const char *reason = "resource conflict";
5849
5850 if (DEBUG_INSN_P (insn))
5851 continue;
5852
5853 if (sched_group_found && !SCHED_GROUP_P (insn))
5854 {
5855 if (pass == 0)
5856 continue;
5857 cost = min_cost_group;
5858 reason = "not in sched group";
5859 }
5860 else if (modulo_epilogue_p
5861 && INSN_EXACT_TICK (insn) == INVALID_TICK)
5862 {
5863 cost = max_insn_queue_index;
5864 reason = "not an epilogue insn";
5865 }
5866 else if (shadows_only_p && !SHADOW_P (insn))
5867 {
5868 cost = 1;
5869 reason = "not a shadow";
5870 }
5871 else if (recog_memoized (insn) < 0)
5872 {
5873 if (!first_cycle_insn_p
5874 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
5875 || asm_noperands (PATTERN (insn)) >= 0))
5876 cost = 1;
5877 reason = "asm";
5878 }
5879 else if (sched_pressure != SCHED_PRESSURE_NONE)
5880 {
5881 if (sched_pressure == SCHED_PRESSURE_MODEL
5882 && INSN_TICK (insn) <= clock_var)
5883 {
5884 memcpy (temp_state, curr_state, dfa_state_size);
5885 if (state_transition (temp_state, insn) >= 0)
5886 INSN_TICK (insn) = clock_var + 1;
5887 }
5888 cost = 0;
5889 }
5890 else
5891 {
5892 int delay_cost = 0;
5893
5894 if (delay_htab)
5895 {
5896 struct delay_pair *delay_entry;
5897 delay_entry
5898 = delay_htab->find_with_hash (insn,
5899 htab_hash_pointer (insn));
5900 while (delay_entry && delay_cost == 0)
5901 {
5902 delay_cost = estimate_shadow_tick (delay_entry);
5903 if (delay_cost > max_insn_queue_index)
5904 delay_cost = max_insn_queue_index;
5905 delay_entry = delay_entry->next_same_i1;
5906 }
5907 }
5908
5909 memcpy (temp_state, curr_state, dfa_state_size);
5910 cost = state_transition (temp_state, insn);
5911 if (cost < 0)
5912 cost = 0;
5913 else if (cost == 0)
5914 cost = 1;
5915 if (cost < delay_cost)
5916 {
5917 cost = delay_cost;
5918 reason = "shadow tick";
5919 }
5920 }
5921 if (cost >= 1)
5922 {
5923 if (SCHED_GROUP_P (insn) && cost > min_cost_group)
5924 min_cost_group = cost;
5925 ready_remove (&ready, i);
5926 queue_insn (insn, cost, reason);
5927 if (i + 1 < n)
5928 break;
5929 }
5930 }
5931 if (i == n)
5932 pass++;
5933 }
5934 }
5935
5936 /* Called when we detect that the schedule is impossible. We examine the
5937 backtrack queue to find the earliest insn that caused this condition. */
5938
5939 static struct haifa_saved_data *
5940 verify_shadows (void)
5941 {
5942 struct haifa_saved_data *save, *earliest_fail = NULL;
5943 for (save = backtrack_queue; save; save = save->next)
5944 {
5945 int t;
5946 struct delay_pair *pair = save->delay_pair;
5947 rtx_insn *i1 = pair->i1;
5948
5949 for (; pair; pair = pair->next_same_i1)
5950 {
5951 rtx_insn *i2 = pair->i2;
5952
5953 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
5954 continue;
5955
5956 t = INSN_TICK (i1) + pair_delay (pair);
5957 if (t < clock_var)
5958 {
5959 if (sched_verbose >= 2)
5960 fprintf (sched_dump,
5961 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
5962 ", not ready\n",
5963 INSN_UID (pair->i1), INSN_UID (pair->i2),
5964 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
5965 earliest_fail = save;
5966 break;
5967 }
5968 if (QUEUE_INDEX (i2) >= 0)
5969 {
5970 int queued_for = INSN_TICK (i2);
5971
5972 if (t < queued_for)
5973 {
5974 if (sched_verbose >= 2)
5975 fprintf (sched_dump,
5976 ";;\t\tfailed delay requirements for %d/%d"
5977 " (%d->%d), queued too late\n",
5978 INSN_UID (pair->i1), INSN_UID (pair->i2),
5979 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
5980 earliest_fail = save;
5981 break;
5982 }
5983 }
5984 }
5985 }
5986
5987 return earliest_fail;
5988 }
5989
5990 /* Print instructions together with useful scheduling information between
5991 HEAD and TAIL (inclusive). */
5992 static void
5993 dump_insn_stream (rtx_insn *head, rtx_insn *tail)
5994 {
5995 fprintf (sched_dump, ";;\t| insn | prio |\n");
5996
5997 rtx_insn *next_tail = NEXT_INSN (tail);
5998 for (rtx_insn *insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5999 {
6000 int priority = NOTE_P (insn) ? 0 : INSN_PRIORITY (insn);
6001 const char *pattern = (NOTE_P (insn)
6002 ? "note"
6003 : str_pattern_slim (PATTERN (insn)));
6004
6005 fprintf (sched_dump, ";;\t| %4d | %4d | %-30s ",
6006 INSN_UID (insn), priority, pattern);
6007
6008 if (sched_verbose >= 4)
6009 {
6010 if (NOTE_P (insn) || recog_memoized (insn) < 0)
6011 fprintf (sched_dump, "nothing");
6012 else
6013 print_reservation (sched_dump, insn);
6014 }
6015 fprintf (sched_dump, "\n");
6016 }
6017 }
6018
6019 /* Use forward list scheduling to rearrange insns of block pointed to by
6020 TARGET_BB, possibly bringing insns from subsequent blocks in the same
6021 region. */
6022
6023 bool
6024 schedule_block (basic_block *target_bb, state_t init_state)
6025 {
6026 int i;
6027 bool success = modulo_ii == 0;
6028 struct sched_block_state ls;
6029 state_t temp_state = NULL; /* It is used for multipass scheduling. */
6030 int sort_p, advance, start_clock_var;
6031
6032 /* Head/tail info for this block. */
6033 rtx_insn *prev_head = current_sched_info->prev_head;
6034 rtx_insn *next_tail = current_sched_info->next_tail;
6035 rtx_insn *head = NEXT_INSN (prev_head);
6036 rtx_insn *tail = PREV_INSN (next_tail);
6037
6038 if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
6039 && sched_pressure != SCHED_PRESSURE_MODEL)
6040 find_modifiable_mems (head, tail);
6041
6042 /* We used to have code to avoid getting parameters moved from hard
6043 argument registers into pseudos.
6044
6045 However, it was removed when it proved to be of marginal benefit
6046 and caused problems because schedule_block and compute_forward_dependences
6047 had different notions of what the "head" insn was. */
6048
6049 gcc_assert (head != tail || INSN_P (head));
6050
6051 haifa_recovery_bb_recently_added_p = false;
6052
6053 backtrack_queue = NULL;
6054
6055 /* Debug info. */
6056 if (sched_verbose)
6057 {
6058 dump_new_block_header (0, *target_bb, head, tail);
6059
6060 if (sched_verbose >= 2)
6061 {
6062 dump_insn_stream (head, tail);
6063 memset (&rank_for_schedule_stats, 0,
6064 sizeof (rank_for_schedule_stats));
6065 }
6066 }
6067
6068 if (init_state == NULL)
6069 state_reset (curr_state);
6070 else
6071 memcpy (curr_state, init_state, dfa_state_size);
6072
6073 /* Clear the ready list. */
6074 ready.first = ready.veclen - 1;
6075 ready.n_ready = 0;
6076 ready.n_debug = 0;
6077
6078 /* It is used for first cycle multipass scheduling. */
6079 temp_state = alloca (dfa_state_size);
6080
6081 if (targetm.sched.init)
6082 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
6083
6084 /* We start inserting insns after PREV_HEAD. */
6085 last_scheduled_insn = prev_head;
6086 last_nondebug_scheduled_insn = NULL_RTX;
6087 nonscheduled_insns_begin = NULL;
6088
6089 gcc_assert ((NOTE_P (last_scheduled_insn)
6090 || DEBUG_INSN_P (last_scheduled_insn))
6091 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
6092
6093 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
6094 queue. */
6095 q_ptr = 0;
6096 q_size = 0;
6097
6098 insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
6099 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
6100
6101 /* Start just before the beginning of time. */
6102 clock_var = -1;
6103
6104 /* We need queue and ready lists and clock_var be initialized
6105 in try_ready () (which is called through init_ready_list ()). */
6106 (*current_sched_info->init_ready_list) ();
6107
6108 if (sched_pressure)
6109 sched_pressure_start_bb (*target_bb);
6110
6111 /* The algorithm is O(n^2) in the number of ready insns at any given
6112 time in the worst case. Before reload we are more likely to have
6113 big lists so truncate them to a reasonable size. */
6114 if (!reload_completed
6115 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
6116 {
6117 ready_sort (&ready);
6118
6119 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
6120 If there are debug insns, we know they're first. */
6121 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
6122 if (!SCHED_GROUP_P (ready_element (&ready, i)))
6123 break;
6124
6125 if (sched_verbose >= 2)
6126 {
6127 fprintf (sched_dump,
6128 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
6129 fprintf (sched_dump,
6130 ";;\t\t before reload => truncated to %d insns\n", i);
6131 }
6132
6133 /* Delay all insns past it for 1 cycle. If debug counter is
6134 activated make an exception for the insn right after
6135 nonscheduled_insns_begin. */
6136 {
6137 rtx_insn *skip_insn;
6138
6139 if (dbg_cnt (sched_insn) == false)
6140 skip_insn = first_nonscheduled_insn ();
6141 else
6142 skip_insn = NULL;
6143
6144 while (i < ready.n_ready)
6145 {
6146 rtx_insn *insn;
6147
6148 insn = ready_remove (&ready, i);
6149
6150 if (insn != skip_insn)
6151 queue_insn (insn, 1, "list truncated");
6152 }
6153 if (skip_insn)
6154 ready_add (&ready, skip_insn, true);
6155 }
6156 }
6157
6158 /* Now we can restore basic block notes and maintain precise cfg. */
6159 restore_bb_notes (*target_bb);
6160
6161 last_clock_var = -1;
6162
6163 advance = 0;
6164
6165 gcc_assert (scheduled_insns.length () == 0);
6166 sort_p = TRUE;
6167 must_backtrack = false;
6168 modulo_insns_scheduled = 0;
6169
6170 ls.modulo_epilogue = false;
6171 ls.first_cycle_insn_p = true;
6172
6173 /* Loop until all the insns in BB are scheduled. */
6174 while ((*current_sched_info->schedule_more_p) ())
6175 {
6176 perform_replacements_new_cycle ();
6177 do
6178 {
6179 start_clock_var = clock_var;
6180
6181 clock_var++;
6182
6183 advance_one_cycle ();
6184
6185 /* Add to the ready list all pending insns that can be issued now.
6186 If there are no ready insns, increment clock until one
6187 is ready and add all pending insns at that point to the ready
6188 list. */
6189 queue_to_ready (&ready);
6190
6191 gcc_assert (ready.n_ready);
6192
6193 if (sched_verbose >= 2)
6194 {
6195 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready:");
6196 debug_ready_list (&ready);
6197 }
6198 advance -= clock_var - start_clock_var;
6199 }
6200 while (advance > 0);
6201
6202 if (ls.modulo_epilogue)
6203 {
6204 int stage = clock_var / modulo_ii;
6205 if (stage > modulo_last_stage * 2 + 2)
6206 {
6207 if (sched_verbose >= 2)
6208 fprintf (sched_dump,
6209 ";;\t\tmodulo scheduled succeeded at II %d\n",
6210 modulo_ii);
6211 success = true;
6212 goto end_schedule;
6213 }
6214 }
6215 else if (modulo_ii > 0)
6216 {
6217 int stage = clock_var / modulo_ii;
6218 if (stage > modulo_max_stages)
6219 {
6220 if (sched_verbose >= 2)
6221 fprintf (sched_dump,
6222 ";;\t\tfailing schedule due to excessive stages\n");
6223 goto end_schedule;
6224 }
6225 if (modulo_n_insns == modulo_insns_scheduled
6226 && stage > modulo_last_stage)
6227 {
6228 if (sched_verbose >= 2)
6229 fprintf (sched_dump,
6230 ";;\t\tfound kernel after %d stages, II %d\n",
6231 stage, modulo_ii);
6232 ls.modulo_epilogue = true;
6233 }
6234 }
6235
6236 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
6237 if (ready.n_ready == 0)
6238 continue;
6239 if (must_backtrack)
6240 goto do_backtrack;
6241
6242 ls.shadows_only_p = false;
6243 cycle_issued_insns = 0;
6244 ls.can_issue_more = issue_rate;
6245 for (;;)
6246 {
6247 rtx_insn *insn;
6248 int cost;
6249 bool asm_p;
6250
6251 if (sort_p && ready.n_ready > 0)
6252 {
6253 /* Sort the ready list based on priority. This must be
6254 done every iteration through the loop, as schedule_insn
6255 may have readied additional insns that will not be
6256 sorted correctly. */
6257 ready_sort (&ready);
6258
6259 if (sched_verbose >= 2)
6260 {
6261 fprintf (sched_dump,
6262 ";;\t\tReady list after ready_sort: ");
6263 debug_ready_list (&ready);
6264 }
6265 }
6266
6267 /* We don't want md sched reorder to even see debug isns, so put
6268 them out right away. */
6269 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
6270 && (*current_sched_info->schedule_more_p) ())
6271 {
6272 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
6273 {
6274 rtx_insn *insn = ready_remove_first (&ready);
6275 gcc_assert (DEBUG_INSN_P (insn));
6276 (*current_sched_info->begin_schedule_ready) (insn);
6277 scheduled_insns.safe_push (insn);
6278 last_scheduled_insn = insn;
6279 advance = schedule_insn (insn);
6280 gcc_assert (advance == 0);
6281 if (ready.n_ready > 0)
6282 ready_sort (&ready);
6283 }
6284 }
6285
6286 if (ls.first_cycle_insn_p && !ready.n_ready)
6287 break;
6288
6289 resume_after_backtrack:
6290 /* Allow the target to reorder the list, typically for
6291 better instruction bundling. */
6292 if (sort_p
6293 && (ready.n_ready == 0
6294 || !SCHED_GROUP_P (ready_element (&ready, 0))))
6295 {
6296 if (ls.first_cycle_insn_p && targetm.sched.reorder)
6297 ls.can_issue_more
6298 = targetm.sched.reorder (sched_dump, sched_verbose,
6299 ready_lastpos (&ready),
6300 &ready.n_ready, clock_var);
6301 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
6302 ls.can_issue_more
6303 = targetm.sched.reorder2 (sched_dump, sched_verbose,
6304 ready.n_ready
6305 ? ready_lastpos (&ready) : NULL,
6306 &ready.n_ready, clock_var);
6307 }
6308
6309 restart_choose_ready:
6310 if (sched_verbose >= 2)
6311 {
6312 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
6313 clock_var);
6314 debug_ready_list (&ready);
6315 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6316 print_curr_reg_pressure ();
6317 }
6318
6319 if (ready.n_ready == 0
6320 && ls.can_issue_more
6321 && reload_completed)
6322 {
6323 /* Allow scheduling insns directly from the queue in case
6324 there's nothing better to do (ready list is empty) but
6325 there are still vacant dispatch slots in the current cycle. */
6326 if (sched_verbose >= 6)
6327 fprintf (sched_dump,";;\t\tSecond chance\n");
6328 memcpy (temp_state, curr_state, dfa_state_size);
6329 if (early_queue_to_ready (temp_state, &ready))
6330 ready_sort (&ready);
6331 }
6332
6333 if (ready.n_ready == 0
6334 || !ls.can_issue_more
6335 || state_dead_lock_p (curr_state)
6336 || !(*current_sched_info->schedule_more_p) ())
6337 break;
6338
6339 /* Select and remove the insn from the ready list. */
6340 if (sort_p)
6341 {
6342 int res;
6343
6344 insn = NULL;
6345 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
6346
6347 if (res < 0)
6348 /* Finish cycle. */
6349 break;
6350 if (res > 0)
6351 goto restart_choose_ready;
6352
6353 gcc_assert (insn != NULL_RTX);
6354 }
6355 else
6356 insn = ready_remove_first (&ready);
6357
6358 if (sched_pressure != SCHED_PRESSURE_NONE
6359 && INSN_TICK (insn) > clock_var)
6360 {
6361 ready_add (&ready, insn, true);
6362 advance = 1;
6363 break;
6364 }
6365
6366 if (targetm.sched.dfa_new_cycle
6367 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
6368 insn, last_clock_var,
6369 clock_var, &sort_p))
6370 /* SORT_P is used by the target to override sorting
6371 of the ready list. This is needed when the target
6372 has modified its internal structures expecting that
6373 the insn will be issued next. As we need the insn
6374 to have the highest priority (so it will be returned by
6375 the ready_remove_first call above), we invoke
6376 ready_add (&ready, insn, true).
6377 But, still, there is one issue: INSN can be later
6378 discarded by scheduler's front end through
6379 current_sched_info->can_schedule_ready_p, hence, won't
6380 be issued next. */
6381 {
6382 ready_add (&ready, insn, true);
6383 break;
6384 }
6385
6386 sort_p = TRUE;
6387
6388 if (current_sched_info->can_schedule_ready_p
6389 && ! (*current_sched_info->can_schedule_ready_p) (insn))
6390 /* We normally get here only if we don't want to move
6391 insn from the split block. */
6392 {
6393 TODO_SPEC (insn) = DEP_POSTPONED;
6394 goto restart_choose_ready;
6395 }
6396
6397 if (delay_htab)
6398 {
6399 /* If this insn is the first part of a delay-slot pair, record a
6400 backtrack point. */
6401 struct delay_pair *delay_entry;
6402 delay_entry
6403 = delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
6404 if (delay_entry)
6405 {
6406 save_backtrack_point (delay_entry, ls);
6407 if (sched_verbose >= 2)
6408 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
6409 }
6410 }
6411
6412 /* DECISION is made. */
6413
6414 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
6415 {
6416 modulo_insns_scheduled++;
6417 modulo_last_stage = clock_var / modulo_ii;
6418 }
6419 if (TODO_SPEC (insn) & SPECULATIVE)
6420 generate_recovery_code (insn);
6421
6422 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6423 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
6424
6425 /* Update counters, etc in the scheduler's front end. */
6426 (*current_sched_info->begin_schedule_ready) (insn);
6427 scheduled_insns.safe_push (insn);
6428 gcc_assert (NONDEBUG_INSN_P (insn));
6429 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
6430
6431 if (recog_memoized (insn) >= 0)
6432 {
6433 memcpy (temp_state, curr_state, dfa_state_size);
6434 cost = state_transition (curr_state, insn);
6435 if (sched_pressure != SCHED_PRESSURE_WEIGHTED)
6436 gcc_assert (cost < 0);
6437 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
6438 cycle_issued_insns++;
6439 asm_p = false;
6440 }
6441 else
6442 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6443 || asm_noperands (PATTERN (insn)) >= 0);
6444
6445 if (targetm.sched.variable_issue)
6446 ls.can_issue_more =
6447 targetm.sched.variable_issue (sched_dump, sched_verbose,
6448 insn, ls.can_issue_more);
6449 /* A naked CLOBBER or USE generates no instruction, so do
6450 not count them against the issue rate. */
6451 else if (GET_CODE (PATTERN (insn)) != USE
6452 && GET_CODE (PATTERN (insn)) != CLOBBER)
6453 ls.can_issue_more--;
6454 advance = schedule_insn (insn);
6455
6456 if (SHADOW_P (insn))
6457 ls.shadows_only_p = true;
6458
6459 /* After issuing an asm insn we should start a new cycle. */
6460 if (advance == 0 && asm_p)
6461 advance = 1;
6462
6463 if (must_backtrack)
6464 break;
6465
6466 if (advance != 0)
6467 break;
6468
6469 ls.first_cycle_insn_p = false;
6470 if (ready.n_ready > 0)
6471 prune_ready_list (temp_state, false, ls.shadows_only_p,
6472 ls.modulo_epilogue);
6473 }
6474
6475 do_backtrack:
6476 if (!must_backtrack)
6477 for (i = 0; i < ready.n_ready; i++)
6478 {
6479 rtx_insn *insn = ready_element (&ready, i);
6480 if (INSN_EXACT_TICK (insn) == clock_var)
6481 {
6482 must_backtrack = true;
6483 clock_var++;
6484 break;
6485 }
6486 }
6487 if (must_backtrack && modulo_ii > 0)
6488 {
6489 if (modulo_backtracks_left == 0)
6490 goto end_schedule;
6491 modulo_backtracks_left--;
6492 }
6493 while (must_backtrack)
6494 {
6495 struct haifa_saved_data *failed;
6496 rtx_insn *failed_insn;
6497
6498 must_backtrack = false;
6499 failed = verify_shadows ();
6500 gcc_assert (failed);
6501
6502 failed_insn = failed->delay_pair->i1;
6503 /* Clear these queues. */
6504 perform_replacements_new_cycle ();
6505 toggle_cancelled_flags (false);
6506 unschedule_insns_until (failed_insn);
6507 while (failed != backtrack_queue)
6508 free_topmost_backtrack_point (true);
6509 restore_last_backtrack_point (&ls);
6510 if (sched_verbose >= 2)
6511 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
6512 /* Delay by at least a cycle. This could cause additional
6513 backtracking. */
6514 queue_insn (failed_insn, 1, "backtracked");
6515 advance = 0;
6516 if (must_backtrack)
6517 continue;
6518 if (ready.n_ready > 0)
6519 goto resume_after_backtrack;
6520 else
6521 {
6522 if (clock_var == 0 && ls.first_cycle_insn_p)
6523 goto end_schedule;
6524 advance = 1;
6525 break;
6526 }
6527 }
6528 ls.first_cycle_insn_p = true;
6529 }
6530 if (ls.modulo_epilogue)
6531 success = true;
6532 end_schedule:
6533 if (!ls.first_cycle_insn_p || advance)
6534 advance_one_cycle ();
6535 perform_replacements_new_cycle ();
6536 if (modulo_ii > 0)
6537 {
6538 /* Once again, debug insn suckiness: they can be on the ready list
6539 even if they have unresolved dependencies. To make our view
6540 of the world consistent, remove such "ready" insns. */
6541 restart_debug_insn_loop:
6542 for (i = ready.n_ready - 1; i >= 0; i--)
6543 {
6544 rtx_insn *x;
6545
6546 x = ready_element (&ready, i);
6547 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
6548 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
6549 {
6550 ready_remove (&ready, i);
6551 goto restart_debug_insn_loop;
6552 }
6553 }
6554 for (i = ready.n_ready - 1; i >= 0; i--)
6555 {
6556 rtx_insn *x;
6557
6558 x = ready_element (&ready, i);
6559 resolve_dependencies (x);
6560 }
6561 for (i = 0; i <= max_insn_queue_index; i++)
6562 {
6563 rtx_insn_list *link;
6564 while ((link = insn_queue[i]) != NULL)
6565 {
6566 rtx_insn *x = link->insn ();
6567 insn_queue[i] = link->next ();
6568 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6569 free_INSN_LIST_node (link);
6570 resolve_dependencies (x);
6571 }
6572 }
6573 }
6574
6575 if (!success)
6576 undo_all_replacements ();
6577
6578 /* Debug info. */
6579 if (sched_verbose)
6580 {
6581 fprintf (sched_dump, ";;\tReady list (final): ");
6582 debug_ready_list (&ready);
6583 }
6584
6585 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
6586 /* Sanity check -- queue must be empty now. Meaningless if region has
6587 multiple bbs. */
6588 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
6589 else if (modulo_ii == 0)
6590 {
6591 /* We must maintain QUEUE_INDEX between blocks in region. */
6592 for (i = ready.n_ready - 1; i >= 0; i--)
6593 {
6594 rtx_insn *x;
6595
6596 x = ready_element (&ready, i);
6597 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6598 TODO_SPEC (x) = HARD_DEP;
6599 }
6600
6601 if (q_size)
6602 for (i = 0; i <= max_insn_queue_index; i++)
6603 {
6604 rtx_insn_list *link;
6605 for (link = insn_queue[i]; link; link = link->next ())
6606 {
6607 rtx_insn *x;
6608
6609 x = link->insn ();
6610 QUEUE_INDEX (x) = QUEUE_NOWHERE;
6611 TODO_SPEC (x) = HARD_DEP;
6612 }
6613 free_INSN_LIST_list (&insn_queue[i]);
6614 }
6615 }
6616
6617 if (sched_pressure == SCHED_PRESSURE_MODEL)
6618 model_end_schedule ();
6619
6620 if (success)
6621 {
6622 commit_schedule (prev_head, tail, target_bb);
6623 if (sched_verbose)
6624 fprintf (sched_dump, ";; total time = %d\n", clock_var);
6625 }
6626 else
6627 last_scheduled_insn = tail;
6628
6629 scheduled_insns.truncate (0);
6630
6631 if (!current_sched_info->queue_must_finish_empty
6632 || haifa_recovery_bb_recently_added_p)
6633 {
6634 /* INSN_TICK (minimum clock tick at which the insn becomes
6635 ready) may be not correct for the insn in the subsequent
6636 blocks of the region. We should use a correct value of
6637 `clock_var' or modify INSN_TICK. It is better to keep
6638 clock_var value equal to 0 at the start of a basic block.
6639 Therefore we modify INSN_TICK here. */
6640 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
6641 }
6642
6643 if (targetm.sched.finish)
6644 {
6645 targetm.sched.finish (sched_dump, sched_verbose);
6646 /* Target might have added some instructions to the scheduled block
6647 in its md_finish () hook. These new insns don't have any data
6648 initialized and to identify them we extend h_i_d so that they'll
6649 get zero luids. */
6650 sched_extend_luids ();
6651 }
6652
6653 /* Update head/tail boundaries. */
6654 head = NEXT_INSN (prev_head);
6655 tail = last_scheduled_insn;
6656
6657 if (sched_verbose)
6658 {
6659 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n",
6660 INSN_UID (head), INSN_UID (tail));
6661
6662 if (sched_verbose >= 2)
6663 {
6664 dump_insn_stream (head, tail);
6665 print_rank_for_schedule_stats (";; TOTAL ", &rank_for_schedule_stats);
6666 }
6667
6668 fprintf (sched_dump, "\n");
6669 }
6670
6671 head = restore_other_notes (head, NULL);
6672
6673 current_sched_info->head = head;
6674 current_sched_info->tail = tail;
6675
6676 free_backtrack_queue ();
6677
6678 return success;
6679 }
6680 \f
6681 /* Set_priorities: compute priority of each insn in the block. */
6682
6683 int
6684 set_priorities (rtx_insn *head, rtx_insn *tail)
6685 {
6686 rtx_insn *insn;
6687 int n_insn;
6688 int sched_max_insns_priority =
6689 current_sched_info->sched_max_insns_priority;
6690 rtx_insn *prev_head;
6691
6692 if (head == tail && ! INSN_P (head))
6693 gcc_unreachable ();
6694
6695 n_insn = 0;
6696
6697 prev_head = PREV_INSN (head);
6698 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
6699 {
6700 if (!INSN_P (insn))
6701 continue;
6702
6703 n_insn++;
6704 (void) priority (insn);
6705
6706 gcc_assert (INSN_PRIORITY_KNOWN (insn));
6707
6708 sched_max_insns_priority = MAX (sched_max_insns_priority,
6709 INSN_PRIORITY (insn));
6710 }
6711
6712 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
6713
6714 return n_insn;
6715 }
6716
6717 /* Set dump and sched_verbose for the desired debugging output. If no
6718 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
6719 For -fsched-verbose=N, N>=10, print everything to stderr. */
6720 void
6721 setup_sched_dump (void)
6722 {
6723 sched_verbose = sched_verbose_param;
6724 if (sched_verbose_param == 0 && dump_file)
6725 sched_verbose = 1;
6726 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
6727 ? stderr : dump_file);
6728 }
6729
6730 /* Allocate data for register pressure sensitive scheduling. */
6731 static void
6732 alloc_global_sched_pressure_data (void)
6733 {
6734 if (sched_pressure != SCHED_PRESSURE_NONE)
6735 {
6736 int i, max_regno = max_reg_num ();
6737
6738 if (sched_dump != NULL)
6739 /* We need info about pseudos for rtl dumps about pseudo
6740 classes and costs. */
6741 regstat_init_n_sets_and_refs ();
6742 ira_set_pseudo_classes (true, sched_verbose ? sched_dump : NULL);
6743 sched_regno_pressure_class
6744 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
6745 for (i = 0; i < max_regno; i++)
6746 sched_regno_pressure_class[i]
6747 = (i < FIRST_PSEUDO_REGISTER
6748 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
6749 : ira_pressure_class_translate[reg_allocno_class (i)]);
6750 curr_reg_live = BITMAP_ALLOC (NULL);
6751 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6752 {
6753 saved_reg_live = BITMAP_ALLOC (NULL);
6754 region_ref_regs = BITMAP_ALLOC (NULL);
6755 }
6756
6757 /* Calculate number of CALL_USED_REGS in register classes that
6758 we calculate register pressure for. */
6759 for (int c = 0; c < ira_pressure_classes_num; ++c)
6760 {
6761 enum reg_class cl = ira_pressure_classes[c];
6762
6763 call_used_regs_num[cl] = 0;
6764
6765 for (int i = 0; i < ira_class_hard_regs_num[cl]; ++i)
6766 if (call_used_regs[ira_class_hard_regs[cl][i]])
6767 ++call_used_regs_num[cl];
6768 }
6769 }
6770 }
6771
6772 /* Free data for register pressure sensitive scheduling. Also called
6773 from schedule_region when stopping sched-pressure early. */
6774 void
6775 free_global_sched_pressure_data (void)
6776 {
6777 if (sched_pressure != SCHED_PRESSURE_NONE)
6778 {
6779 if (regstat_n_sets_and_refs != NULL)
6780 regstat_free_n_sets_and_refs ();
6781 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
6782 {
6783 BITMAP_FREE (region_ref_regs);
6784 BITMAP_FREE (saved_reg_live);
6785 }
6786 BITMAP_FREE (curr_reg_live);
6787 free (sched_regno_pressure_class);
6788 }
6789 }
6790
6791 /* Initialize some global state for the scheduler. This function works
6792 with the common data shared between all the schedulers. It is called
6793 from the scheduler specific initialization routine. */
6794
6795 void
6796 sched_init (void)
6797 {
6798 /* Disable speculative loads in their presence if cc0 defined. */
6799 #ifdef HAVE_cc0
6800 flag_schedule_speculative_load = 0;
6801 #endif
6802
6803 if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
6804 targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
6805
6806 if (live_range_shrinkage_p)
6807 sched_pressure = SCHED_PRESSURE_WEIGHTED;
6808 else if (flag_sched_pressure
6809 && !reload_completed
6810 && common_sched_info->sched_pass_id == SCHED_RGN_PASS)
6811 sched_pressure = ((enum sched_pressure_algorithm)
6812 PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM));
6813 else
6814 sched_pressure = SCHED_PRESSURE_NONE;
6815
6816 if (sched_pressure != SCHED_PRESSURE_NONE)
6817 ira_setup_eliminable_regset ();
6818
6819 /* Initialize SPEC_INFO. */
6820 if (targetm.sched.set_sched_flags)
6821 {
6822 spec_info = &spec_info_var;
6823 targetm.sched.set_sched_flags (spec_info);
6824
6825 if (spec_info->mask != 0)
6826 {
6827 spec_info->data_weakness_cutoff =
6828 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
6829 spec_info->control_weakness_cutoff =
6830 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
6831 * REG_BR_PROB_BASE) / 100;
6832 }
6833 else
6834 /* So we won't read anything accidentally. */
6835 spec_info = NULL;
6836
6837 }
6838 else
6839 /* So we won't read anything accidentally. */
6840 spec_info = 0;
6841
6842 /* Initialize issue_rate. */
6843 if (targetm.sched.issue_rate)
6844 issue_rate = targetm.sched.issue_rate ();
6845 else
6846 issue_rate = 1;
6847
6848 if (cached_issue_rate != issue_rate)
6849 {
6850 cached_issue_rate = issue_rate;
6851 /* To invalidate max_lookahead_tries: */
6852 cached_first_cycle_multipass_dfa_lookahead = 0;
6853 }
6854
6855 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
6856 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
6857 else
6858 dfa_lookahead = 0;
6859
6860 if (targetm.sched.init_dfa_pre_cycle_insn)
6861 targetm.sched.init_dfa_pre_cycle_insn ();
6862
6863 if (targetm.sched.init_dfa_post_cycle_insn)
6864 targetm.sched.init_dfa_post_cycle_insn ();
6865
6866 dfa_start ();
6867 dfa_state_size = state_size ();
6868
6869 init_alias_analysis ();
6870
6871 if (!sched_no_dce)
6872 df_set_flags (DF_LR_RUN_DCE);
6873 df_note_add_problem ();
6874
6875 /* More problems needed for interloop dep calculation in SMS. */
6876 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
6877 {
6878 df_rd_add_problem ();
6879 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
6880 }
6881
6882 df_analyze ();
6883
6884 /* Do not run DCE after reload, as this can kill nops inserted
6885 by bundling. */
6886 if (reload_completed)
6887 df_clear_flags (DF_LR_RUN_DCE);
6888
6889 regstat_compute_calls_crossed ();
6890
6891 if (targetm.sched.init_global)
6892 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
6893
6894 alloc_global_sched_pressure_data ();
6895
6896 curr_state = xmalloc (dfa_state_size);
6897 }
6898
6899 static void haifa_init_only_bb (basic_block, basic_block);
6900
6901 /* Initialize data structures specific to the Haifa scheduler. */
6902 void
6903 haifa_sched_init (void)
6904 {
6905 setup_sched_dump ();
6906 sched_init ();
6907
6908 scheduled_insns.create (0);
6909
6910 if (spec_info != NULL)
6911 {
6912 sched_deps_info->use_deps_list = 1;
6913 sched_deps_info->generate_spec_deps = 1;
6914 }
6915
6916 /* Initialize luids, dependency caches, target and h_i_d for the
6917 whole function. */
6918 {
6919 bb_vec_t bbs;
6920 bbs.create (n_basic_blocks_for_fn (cfun));
6921 basic_block bb;
6922
6923 sched_init_bbs ();
6924
6925 FOR_EACH_BB_FN (bb, cfun)
6926 bbs.quick_push (bb);
6927 sched_init_luids (bbs);
6928 sched_deps_init (true);
6929 sched_extend_target ();
6930 haifa_init_h_i_d (bbs);
6931
6932 bbs.release ();
6933 }
6934
6935 sched_init_only_bb = haifa_init_only_bb;
6936 sched_split_block = sched_split_block_1;
6937 sched_create_empty_bb = sched_create_empty_bb_1;
6938 haifa_recovery_bb_ever_added_p = false;
6939
6940 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
6941 before_recovery = 0;
6942 after_recovery = 0;
6943
6944 modulo_ii = 0;
6945 }
6946
6947 /* Finish work with the data specific to the Haifa scheduler. */
6948 void
6949 haifa_sched_finish (void)
6950 {
6951 sched_create_empty_bb = NULL;
6952 sched_split_block = NULL;
6953 sched_init_only_bb = NULL;
6954
6955 if (spec_info && spec_info->dump)
6956 {
6957 char c = reload_completed ? 'a' : 'b';
6958
6959 fprintf (spec_info->dump,
6960 ";; %s:\n", current_function_name ());
6961
6962 fprintf (spec_info->dump,
6963 ";; Procedure %cr-begin-data-spec motions == %d\n",
6964 c, nr_begin_data);
6965 fprintf (spec_info->dump,
6966 ";; Procedure %cr-be-in-data-spec motions == %d\n",
6967 c, nr_be_in_data);
6968 fprintf (spec_info->dump,
6969 ";; Procedure %cr-begin-control-spec motions == %d\n",
6970 c, nr_begin_control);
6971 fprintf (spec_info->dump,
6972 ";; Procedure %cr-be-in-control-spec motions == %d\n",
6973 c, nr_be_in_control);
6974 }
6975
6976 scheduled_insns.release ();
6977
6978 /* Finalize h_i_d, dependency caches, and luids for the whole
6979 function. Target will be finalized in md_global_finish (). */
6980 sched_deps_finish ();
6981 sched_finish_luids ();
6982 current_sched_info = NULL;
6983 sched_finish ();
6984 }
6985
6986 /* Free global data used during insn scheduling. This function works with
6987 the common data shared between the schedulers. */
6988
6989 void
6990 sched_finish (void)
6991 {
6992 haifa_finish_h_i_d ();
6993 free_global_sched_pressure_data ();
6994 free (curr_state);
6995
6996 if (targetm.sched.finish_global)
6997 targetm.sched.finish_global (sched_dump, sched_verbose);
6998
6999 end_alias_analysis ();
7000
7001 regstat_free_calls_crossed ();
7002
7003 dfa_finish ();
7004 }
7005
7006 /* Free all delay_pair structures that were recorded. */
7007 void
7008 free_delay_pairs (void)
7009 {
7010 if (delay_htab)
7011 {
7012 delay_htab->empty ();
7013 delay_htab_i2->empty ();
7014 }
7015 }
7016
7017 /* Fix INSN_TICKs of the instructions in the current block as well as
7018 INSN_TICKs of their dependents.
7019 HEAD and TAIL are the begin and the end of the current scheduled block. */
7020 static void
7021 fix_inter_tick (rtx_insn *head, rtx_insn *tail)
7022 {
7023 /* Set of instructions with corrected INSN_TICK. */
7024 bitmap_head processed;
7025 /* ??? It is doubtful if we should assume that cycle advance happens on
7026 basic block boundaries. Basically insns that are unconditionally ready
7027 on the start of the block are more preferable then those which have
7028 a one cycle dependency over insn from the previous block. */
7029 int next_clock = clock_var + 1;
7030
7031 bitmap_initialize (&processed, 0);
7032
7033 /* Iterates over scheduled instructions and fix their INSN_TICKs and
7034 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
7035 across different blocks. */
7036 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
7037 {
7038 if (INSN_P (head))
7039 {
7040 int tick;
7041 sd_iterator_def sd_it;
7042 dep_t dep;
7043
7044 tick = INSN_TICK (head);
7045 gcc_assert (tick >= MIN_TICK);
7046
7047 /* Fix INSN_TICK of instruction from just scheduled block. */
7048 if (bitmap_set_bit (&processed, INSN_LUID (head)))
7049 {
7050 tick -= next_clock;
7051
7052 if (tick < MIN_TICK)
7053 tick = MIN_TICK;
7054
7055 INSN_TICK (head) = tick;
7056 }
7057
7058 if (DEBUG_INSN_P (head))
7059 continue;
7060
7061 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
7062 {
7063 rtx_insn *next;
7064
7065 next = DEP_CON (dep);
7066 tick = INSN_TICK (next);
7067
7068 if (tick != INVALID_TICK
7069 /* If NEXT has its INSN_TICK calculated, fix it.
7070 If not - it will be properly calculated from
7071 scratch later in fix_tick_ready. */
7072 && bitmap_set_bit (&processed, INSN_LUID (next)))
7073 {
7074 tick -= next_clock;
7075
7076 if (tick < MIN_TICK)
7077 tick = MIN_TICK;
7078
7079 if (tick > INTER_TICK (next))
7080 INTER_TICK (next) = tick;
7081 else
7082 tick = INTER_TICK (next);
7083
7084 INSN_TICK (next) = tick;
7085 }
7086 }
7087 }
7088 }
7089 bitmap_clear (&processed);
7090 }
7091
7092 /* Check if NEXT is ready to be added to the ready or queue list.
7093 If "yes", add it to the proper list.
7094 Returns:
7095 -1 - is not ready yet,
7096 0 - added to the ready list,
7097 0 < N - queued for N cycles. */
7098 int
7099 try_ready (rtx_insn *next)
7100 {
7101 ds_t old_ts, new_ts;
7102
7103 old_ts = TODO_SPEC (next);
7104
7105 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL | DEP_POSTPONED))
7106 && (old_ts == HARD_DEP
7107 || old_ts == DEP_POSTPONED
7108 || (old_ts & SPECULATIVE)
7109 || old_ts == DEP_CONTROL));
7110
7111 new_ts = recompute_todo_spec (next, false);
7112
7113 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7114 gcc_assert (new_ts == old_ts
7115 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
7116 else if (current_sched_info->new_ready)
7117 new_ts = current_sched_info->new_ready (next, new_ts);
7118
7119 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
7120 have its original pattern or changed (speculative) one. This is due
7121 to changing ebb in region scheduling.
7122 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
7123 has speculative pattern.
7124
7125 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
7126 control-speculative NEXT could have been discarded by sched-rgn.c
7127 (the same case as when discarded by can_schedule_ready_p ()). */
7128
7129 if ((new_ts & SPECULATIVE)
7130 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
7131 need to change anything. */
7132 && new_ts != old_ts)
7133 {
7134 int res;
7135 rtx new_pat;
7136
7137 gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
7138
7139 res = haifa_speculate_insn (next, new_ts, &new_pat);
7140
7141 switch (res)
7142 {
7143 case -1:
7144 /* It would be nice to change DEP_STATUS of all dependences,
7145 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
7146 so we won't reanalyze anything. */
7147 new_ts = HARD_DEP;
7148 break;
7149
7150 case 0:
7151 /* We follow the rule, that every speculative insn
7152 has non-null ORIG_PAT. */
7153 if (!ORIG_PAT (next))
7154 ORIG_PAT (next) = PATTERN (next);
7155 break;
7156
7157 case 1:
7158 if (!ORIG_PAT (next))
7159 /* If we gonna to overwrite the original pattern of insn,
7160 save it. */
7161 ORIG_PAT (next) = PATTERN (next);
7162
7163 res = haifa_change_pattern (next, new_pat);
7164 gcc_assert (res);
7165 break;
7166
7167 default:
7168 gcc_unreachable ();
7169 }
7170 }
7171
7172 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
7173 either correct (new_ts & SPECULATIVE),
7174 or we simply don't care (new_ts & HARD_DEP). */
7175
7176 gcc_assert (!ORIG_PAT (next)
7177 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
7178
7179 TODO_SPEC (next) = new_ts;
7180
7181 if (new_ts & (HARD_DEP | DEP_POSTPONED))
7182 {
7183 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
7184 control-speculative NEXT could have been discarded by sched-rgn.c
7185 (the same case as when discarded by can_schedule_ready_p ()). */
7186 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
7187
7188 change_queue_index (next, QUEUE_NOWHERE);
7189
7190 return -1;
7191 }
7192 else if (!(new_ts & BEGIN_SPEC)
7193 && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
7194 && !IS_SPECULATION_CHECK_P (next))
7195 /* We should change pattern of every previously speculative
7196 instruction - and we determine if NEXT was speculative by using
7197 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
7198 pat too, so skip them. */
7199 {
7200 bool success = haifa_change_pattern (next, ORIG_PAT (next));
7201 gcc_assert (success);
7202 ORIG_PAT (next) = 0;
7203 }
7204
7205 if (sched_verbose >= 2)
7206 {
7207 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
7208 (*current_sched_info->print_insn) (next, 0));
7209
7210 if (spec_info && spec_info->dump)
7211 {
7212 if (new_ts & BEGIN_DATA)
7213 fprintf (spec_info->dump, "; data-spec;");
7214 if (new_ts & BEGIN_CONTROL)
7215 fprintf (spec_info->dump, "; control-spec;");
7216 if (new_ts & BE_IN_CONTROL)
7217 fprintf (spec_info->dump, "; in-control-spec;");
7218 }
7219 if (TODO_SPEC (next) & DEP_CONTROL)
7220 fprintf (sched_dump, " predicated");
7221 fprintf (sched_dump, "\n");
7222 }
7223
7224 adjust_priority (next);
7225
7226 return fix_tick_ready (next);
7227 }
7228
7229 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
7230 static int
7231 fix_tick_ready (rtx_insn *next)
7232 {
7233 int tick, delay;
7234
7235 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
7236 {
7237 int full_p;
7238 sd_iterator_def sd_it;
7239 dep_t dep;
7240
7241 tick = INSN_TICK (next);
7242 /* if tick is not equal to INVALID_TICK, then update
7243 INSN_TICK of NEXT with the most recent resolved dependence
7244 cost. Otherwise, recalculate from scratch. */
7245 full_p = (tick == INVALID_TICK);
7246
7247 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
7248 {
7249 rtx_insn *pro = DEP_PRO (dep);
7250 int tick1;
7251
7252 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
7253
7254 tick1 = INSN_TICK (pro) + dep_cost (dep);
7255 if (tick1 > tick)
7256 tick = tick1;
7257
7258 if (!full_p)
7259 break;
7260 }
7261 }
7262 else
7263 tick = -1;
7264
7265 INSN_TICK (next) = tick;
7266
7267 delay = tick - clock_var;
7268 if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE)
7269 delay = QUEUE_READY;
7270
7271 change_queue_index (next, delay);
7272
7273 return delay;
7274 }
7275
7276 /* Move NEXT to the proper queue list with (DELAY >= 1),
7277 or add it to the ready list (DELAY == QUEUE_READY),
7278 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
7279 static void
7280 change_queue_index (rtx_insn *next, int delay)
7281 {
7282 int i = QUEUE_INDEX (next);
7283
7284 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
7285 && delay != 0);
7286 gcc_assert (i != QUEUE_SCHEDULED);
7287
7288 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
7289 || (delay < 0 && delay == i))
7290 /* We have nothing to do. */
7291 return;
7292
7293 /* Remove NEXT from wherever it is now. */
7294 if (i == QUEUE_READY)
7295 ready_remove_insn (next);
7296 else if (i >= 0)
7297 queue_remove (next);
7298
7299 /* Add it to the proper place. */
7300 if (delay == QUEUE_READY)
7301 ready_add (readyp, next, false);
7302 else if (delay >= 1)
7303 queue_insn (next, delay, "change queue index");
7304
7305 if (sched_verbose >= 2)
7306 {
7307 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
7308 (*current_sched_info->print_insn) (next, 0));
7309
7310 if (delay == QUEUE_READY)
7311 fprintf (sched_dump, " into ready\n");
7312 else if (delay >= 1)
7313 fprintf (sched_dump, " into queue with cost=%d\n", delay);
7314 else
7315 fprintf (sched_dump, " removed from ready or queue lists\n");
7316 }
7317 }
7318
7319 static int sched_ready_n_insns = -1;
7320
7321 /* Initialize per region data structures. */
7322 void
7323 sched_extend_ready_list (int new_sched_ready_n_insns)
7324 {
7325 int i;
7326
7327 if (sched_ready_n_insns == -1)
7328 /* At the first call we need to initialize one more choice_stack
7329 entry. */
7330 {
7331 i = 0;
7332 sched_ready_n_insns = 0;
7333 scheduled_insns.reserve (new_sched_ready_n_insns);
7334 }
7335 else
7336 i = sched_ready_n_insns + 1;
7337
7338 ready.veclen = new_sched_ready_n_insns + issue_rate;
7339 ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
7340
7341 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
7342
7343 ready_try = (signed char *) xrecalloc (ready_try, new_sched_ready_n_insns,
7344 sched_ready_n_insns,
7345 sizeof (*ready_try));
7346
7347 /* We allocate +1 element to save initial state in the choice_stack[0]
7348 entry. */
7349 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
7350 new_sched_ready_n_insns + 1);
7351
7352 for (; i <= new_sched_ready_n_insns; i++)
7353 {
7354 choice_stack[i].state = xmalloc (dfa_state_size);
7355
7356 if (targetm.sched.first_cycle_multipass_init)
7357 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
7358 .target_data));
7359 }
7360
7361 sched_ready_n_insns = new_sched_ready_n_insns;
7362 }
7363
7364 /* Free per region data structures. */
7365 void
7366 sched_finish_ready_list (void)
7367 {
7368 int i;
7369
7370 free (ready.vec);
7371 ready.vec = NULL;
7372 ready.veclen = 0;
7373
7374 free (ready_try);
7375 ready_try = NULL;
7376
7377 for (i = 0; i <= sched_ready_n_insns; i++)
7378 {
7379 if (targetm.sched.first_cycle_multipass_fini)
7380 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
7381 .target_data));
7382
7383 free (choice_stack [i].state);
7384 }
7385 free (choice_stack);
7386 choice_stack = NULL;
7387
7388 sched_ready_n_insns = -1;
7389 }
7390
7391 static int
7392 haifa_luid_for_non_insn (rtx x)
7393 {
7394 gcc_assert (NOTE_P (x) || LABEL_P (x));
7395
7396 return 0;
7397 }
7398
7399 /* Generates recovery code for INSN. */
7400 static void
7401 generate_recovery_code (rtx_insn *insn)
7402 {
7403 if (TODO_SPEC (insn) & BEGIN_SPEC)
7404 begin_speculative_block (insn);
7405
7406 /* Here we have insn with no dependencies to
7407 instructions other then CHECK_SPEC ones. */
7408
7409 if (TODO_SPEC (insn) & BE_IN_SPEC)
7410 add_to_speculative_block (insn);
7411 }
7412
7413 /* Helper function.
7414 Tries to add speculative dependencies of type FS between instructions
7415 in deps_list L and TWIN. */
7416 static void
7417 process_insn_forw_deps_be_in_spec (rtx insn, rtx_insn *twin, ds_t fs)
7418 {
7419 sd_iterator_def sd_it;
7420 dep_t dep;
7421
7422 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7423 {
7424 ds_t ds;
7425 rtx_insn *consumer;
7426
7427 consumer = DEP_CON (dep);
7428
7429 ds = DEP_STATUS (dep);
7430
7431 if (/* If we want to create speculative dep. */
7432 fs
7433 /* And we can do that because this is a true dep. */
7434 && (ds & DEP_TYPES) == DEP_TRUE)
7435 {
7436 gcc_assert (!(ds & BE_IN_SPEC));
7437
7438 if (/* If this dep can be overcome with 'begin speculation'. */
7439 ds & BEGIN_SPEC)
7440 /* Then we have a choice: keep the dep 'begin speculative'
7441 or transform it into 'be in speculative'. */
7442 {
7443 if (/* In try_ready we assert that if insn once became ready
7444 it can be removed from the ready (or queue) list only
7445 due to backend decision. Hence we can't let the
7446 probability of the speculative dep to decrease. */
7447 ds_weak (ds) <= ds_weak (fs))
7448 {
7449 ds_t new_ds;
7450
7451 new_ds = (ds & ~BEGIN_SPEC) | fs;
7452
7453 if (/* consumer can 'be in speculative'. */
7454 sched_insn_is_legitimate_for_speculation_p (consumer,
7455 new_ds))
7456 /* Transform it to be in speculative. */
7457 ds = new_ds;
7458 }
7459 }
7460 else
7461 /* Mark the dep as 'be in speculative'. */
7462 ds |= fs;
7463 }
7464
7465 {
7466 dep_def _new_dep, *new_dep = &_new_dep;
7467
7468 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
7469 sd_add_dep (new_dep, false);
7470 }
7471 }
7472 }
7473
7474 /* Generates recovery code for BEGIN speculative INSN. */
7475 static void
7476 begin_speculative_block (rtx_insn *insn)
7477 {
7478 if (TODO_SPEC (insn) & BEGIN_DATA)
7479 nr_begin_data++;
7480 if (TODO_SPEC (insn) & BEGIN_CONTROL)
7481 nr_begin_control++;
7482
7483 create_check_block_twin (insn, false);
7484
7485 TODO_SPEC (insn) &= ~BEGIN_SPEC;
7486 }
7487
7488 static void haifa_init_insn (rtx_insn *);
7489
7490 /* Generates recovery code for BE_IN speculative INSN. */
7491 static void
7492 add_to_speculative_block (rtx_insn *insn)
7493 {
7494 ds_t ts;
7495 sd_iterator_def sd_it;
7496 dep_t dep;
7497 rtx_insn_list *twins = NULL;
7498 rtx_vec_t priorities_roots;
7499
7500 ts = TODO_SPEC (insn);
7501 gcc_assert (!(ts & ~BE_IN_SPEC));
7502
7503 if (ts & BE_IN_DATA)
7504 nr_be_in_data++;
7505 if (ts & BE_IN_CONTROL)
7506 nr_be_in_control++;
7507
7508 TODO_SPEC (insn) &= ~BE_IN_SPEC;
7509 gcc_assert (!TODO_SPEC (insn));
7510
7511 DONE_SPEC (insn) |= ts;
7512
7513 /* First we convert all simple checks to branchy. */
7514 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7515 sd_iterator_cond (&sd_it, &dep);)
7516 {
7517 rtx_insn *check = DEP_PRO (dep);
7518
7519 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
7520 {
7521 create_check_block_twin (check, true);
7522
7523 /* Restart search. */
7524 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7525 }
7526 else
7527 /* Continue search. */
7528 sd_iterator_next (&sd_it);
7529 }
7530
7531 priorities_roots.create (0);
7532 clear_priorities (insn, &priorities_roots);
7533
7534 while (1)
7535 {
7536 rtx_insn *check, *twin;
7537 basic_block rec;
7538
7539 /* Get the first backward dependency of INSN. */
7540 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7541 if (!sd_iterator_cond (&sd_it, &dep))
7542 /* INSN has no backward dependencies left. */
7543 break;
7544
7545 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
7546 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
7547 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
7548
7549 check = DEP_PRO (dep);
7550
7551 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
7552 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
7553
7554 rec = BLOCK_FOR_INSN (check);
7555
7556 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
7557 haifa_init_insn (twin);
7558
7559 sd_copy_back_deps (twin, insn, true);
7560
7561 if (sched_verbose && spec_info->dump)
7562 /* INSN_BB (insn) isn't determined for twin insns yet.
7563 So we can't use current_sched_info->print_insn. */
7564 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7565 INSN_UID (twin), rec->index);
7566
7567 twins = alloc_INSN_LIST (twin, twins);
7568
7569 /* Add dependences between TWIN and all appropriate
7570 instructions from REC. */
7571 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
7572 {
7573 rtx_insn *pro = DEP_PRO (dep);
7574
7575 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
7576
7577 /* INSN might have dependencies from the instructions from
7578 several recovery blocks. At this iteration we process those
7579 producers that reside in REC. */
7580 if (BLOCK_FOR_INSN (pro) == rec)
7581 {
7582 dep_def _new_dep, *new_dep = &_new_dep;
7583
7584 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
7585 sd_add_dep (new_dep, false);
7586 }
7587 }
7588
7589 process_insn_forw_deps_be_in_spec (insn, twin, ts);
7590
7591 /* Remove all dependencies between INSN and insns in REC. */
7592 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
7593 sd_iterator_cond (&sd_it, &dep);)
7594 {
7595 rtx_insn *pro = DEP_PRO (dep);
7596
7597 if (BLOCK_FOR_INSN (pro) == rec)
7598 sd_delete_dep (sd_it);
7599 else
7600 sd_iterator_next (&sd_it);
7601 }
7602 }
7603
7604 /* We couldn't have added the dependencies between INSN and TWINS earlier
7605 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
7606 while (twins)
7607 {
7608 rtx_insn *twin;
7609 rtx_insn_list *next_node;
7610
7611 twin = twins->insn ();
7612
7613 {
7614 dep_def _new_dep, *new_dep = &_new_dep;
7615
7616 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
7617 sd_add_dep (new_dep, false);
7618 }
7619
7620 next_node = twins->next ();
7621 free_INSN_LIST_node (twins);
7622 twins = next_node;
7623 }
7624
7625 calc_priorities (priorities_roots);
7626 priorities_roots.release ();
7627 }
7628
7629 /* Extends and fills with zeros (only the new part) array pointed to by P. */
7630 void *
7631 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
7632 {
7633 gcc_assert (new_nmemb >= old_nmemb);
7634 p = XRESIZEVAR (void, p, new_nmemb * size);
7635 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
7636 return p;
7637 }
7638
7639 /* Helper function.
7640 Find fallthru edge from PRED. */
7641 edge
7642 find_fallthru_edge_from (basic_block pred)
7643 {
7644 edge e;
7645 basic_block succ;
7646
7647 succ = pred->next_bb;
7648 gcc_assert (succ->prev_bb == pred);
7649
7650 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
7651 {
7652 e = find_fallthru_edge (pred->succs);
7653
7654 if (e)
7655 {
7656 gcc_assert (e->dest == succ);
7657 return e;
7658 }
7659 }
7660 else
7661 {
7662 e = find_fallthru_edge (succ->preds);
7663
7664 if (e)
7665 {
7666 gcc_assert (e->src == pred);
7667 return e;
7668 }
7669 }
7670
7671 return NULL;
7672 }
7673
7674 /* Extend per basic block data structures. */
7675 static void
7676 sched_extend_bb (void)
7677 {
7678 /* The following is done to keep current_sched_info->next_tail non null. */
7679 rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
7680 rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
7681 if (NEXT_INSN (end) == 0
7682 || (!NOTE_P (insn)
7683 && !LABEL_P (insn)
7684 /* Don't emit a NOTE if it would end up before a BARRIER. */
7685 && !BARRIER_P (NEXT_INSN (end))))
7686 {
7687 rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
7688 /* Make note appear outside BB. */
7689 set_block_for_insn (note, NULL);
7690 BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb) = end;
7691 }
7692 }
7693
7694 /* Init per basic block data structures. */
7695 void
7696 sched_init_bbs (void)
7697 {
7698 sched_extend_bb ();
7699 }
7700
7701 /* Initialize BEFORE_RECOVERY variable. */
7702 static void
7703 init_before_recovery (basic_block *before_recovery_ptr)
7704 {
7705 basic_block last;
7706 edge e;
7707
7708 last = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
7709 e = find_fallthru_edge_from (last);
7710
7711 if (e)
7712 {
7713 /* We create two basic blocks:
7714 1. Single instruction block is inserted right after E->SRC
7715 and has jump to
7716 2. Empty block right before EXIT_BLOCK.
7717 Between these two blocks recovery blocks will be emitted. */
7718
7719 basic_block single, empty;
7720 rtx_insn *x;
7721 rtx label;
7722
7723 /* If the fallthrough edge to exit we've found is from the block we've
7724 created before, don't do anything more. */
7725 if (last == after_recovery)
7726 return;
7727
7728 adding_bb_to_current_region_p = false;
7729
7730 single = sched_create_empty_bb (last);
7731 empty = sched_create_empty_bb (single);
7732
7733 /* Add new blocks to the root loop. */
7734 if (current_loops != NULL)
7735 {
7736 add_bb_to_loop (single, (*current_loops->larray)[0]);
7737 add_bb_to_loop (empty, (*current_loops->larray)[0]);
7738 }
7739
7740 single->count = last->count;
7741 empty->count = last->count;
7742 single->frequency = last->frequency;
7743 empty->frequency = last->frequency;
7744 BB_COPY_PARTITION (single, last);
7745 BB_COPY_PARTITION (empty, last);
7746
7747 redirect_edge_succ (e, single);
7748 make_single_succ_edge (single, empty, 0);
7749 make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
7750 EDGE_FALLTHRU);
7751
7752 label = block_label (empty);
7753 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
7754 JUMP_LABEL (x) = label;
7755 LABEL_NUSES (label)++;
7756 haifa_init_insn (x);
7757
7758 emit_barrier_after (x);
7759
7760 sched_init_only_bb (empty, NULL);
7761 sched_init_only_bb (single, NULL);
7762 sched_extend_bb ();
7763
7764 adding_bb_to_current_region_p = true;
7765 before_recovery = single;
7766 after_recovery = empty;
7767
7768 if (before_recovery_ptr)
7769 *before_recovery_ptr = before_recovery;
7770
7771 if (sched_verbose >= 2 && spec_info->dump)
7772 fprintf (spec_info->dump,
7773 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
7774 last->index, single->index, empty->index);
7775 }
7776 else
7777 before_recovery = last;
7778 }
7779
7780 /* Returns new recovery block. */
7781 basic_block
7782 sched_create_recovery_block (basic_block *before_recovery_ptr)
7783 {
7784 rtx label;
7785 rtx_insn *barrier;
7786 basic_block rec;
7787
7788 haifa_recovery_bb_recently_added_p = true;
7789 haifa_recovery_bb_ever_added_p = true;
7790
7791 init_before_recovery (before_recovery_ptr);
7792
7793 barrier = get_last_bb_insn (before_recovery);
7794 gcc_assert (BARRIER_P (barrier));
7795
7796 label = emit_label_after (gen_label_rtx (), barrier);
7797
7798 rec = create_basic_block (label, label, before_recovery);
7799
7800 /* A recovery block always ends with an unconditional jump. */
7801 emit_barrier_after (BB_END (rec));
7802
7803 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
7804 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
7805
7806 if (sched_verbose && spec_info->dump)
7807 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
7808 rec->index);
7809
7810 return rec;
7811 }
7812
7813 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
7814 and emit necessary jumps. */
7815 void
7816 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
7817 basic_block second_bb)
7818 {
7819 rtx label;
7820 rtx jump;
7821 int edge_flags;
7822
7823 /* This is fixing of incoming edge. */
7824 /* ??? Which other flags should be specified? */
7825 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
7826 /* Partition type is the same, if it is "unpartitioned". */
7827 edge_flags = EDGE_CROSSING;
7828 else
7829 edge_flags = 0;
7830
7831 make_edge (first_bb, rec, edge_flags);
7832 label = block_label (second_bb);
7833 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
7834 JUMP_LABEL (jump) = label;
7835 LABEL_NUSES (label)++;
7836
7837 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
7838 /* Partition type is the same, if it is "unpartitioned". */
7839 {
7840 /* Rewritten from cfgrtl.c. */
7841 if (flag_reorder_blocks_and_partition
7842 && targetm_common.have_named_sections)
7843 {
7844 /* We don't need the same note for the check because
7845 any_condjump_p (check) == true. */
7846 CROSSING_JUMP_P (jump) = 1;
7847 }
7848 edge_flags = EDGE_CROSSING;
7849 }
7850 else
7851 edge_flags = 0;
7852
7853 make_single_succ_edge (rec, second_bb, edge_flags);
7854 if (dom_info_available_p (CDI_DOMINATORS))
7855 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
7856 }
7857
7858 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
7859 INSN is a simple check, that should be converted to branchy one. */
7860 static void
7861 create_check_block_twin (rtx_insn *insn, bool mutate_p)
7862 {
7863 basic_block rec;
7864 rtx_insn *label, *check, *twin;
7865 rtx check_pat;
7866 ds_t fs;
7867 sd_iterator_def sd_it;
7868 dep_t dep;
7869 dep_def _new_dep, *new_dep = &_new_dep;
7870 ds_t todo_spec;
7871
7872 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
7873
7874 if (!mutate_p)
7875 todo_spec = TODO_SPEC (insn);
7876 else
7877 {
7878 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
7879 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
7880
7881 todo_spec = CHECK_SPEC (insn);
7882 }
7883
7884 todo_spec &= SPECULATIVE;
7885
7886 /* Create recovery block. */
7887 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
7888 {
7889 rec = sched_create_recovery_block (NULL);
7890 label = BB_HEAD (rec);
7891 }
7892 else
7893 {
7894 rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
7895 label = NULL;
7896 }
7897
7898 /* Emit CHECK. */
7899 check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
7900
7901 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7902 {
7903 /* To have mem_reg alive at the beginning of second_bb,
7904 we emit check BEFORE insn, so insn after splitting
7905 insn will be at the beginning of second_bb, which will
7906 provide us with the correct life information. */
7907 check = emit_jump_insn_before (check_pat, insn);
7908 JUMP_LABEL (check) = label;
7909 LABEL_NUSES (label)++;
7910 }
7911 else
7912 check = emit_insn_before (check_pat, insn);
7913
7914 /* Extend data structures. */
7915 haifa_init_insn (check);
7916
7917 /* CHECK is being added to current region. Extend ready list. */
7918 gcc_assert (sched_ready_n_insns != -1);
7919 sched_extend_ready_list (sched_ready_n_insns + 1);
7920
7921 if (current_sched_info->add_remove_insn)
7922 current_sched_info->add_remove_insn (insn, 0);
7923
7924 RECOVERY_BLOCK (check) = rec;
7925
7926 if (sched_verbose && spec_info->dump)
7927 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
7928 (*current_sched_info->print_insn) (check, 0));
7929
7930 gcc_assert (ORIG_PAT (insn));
7931
7932 /* Initialize TWIN (twin is a duplicate of original instruction
7933 in the recovery block). */
7934 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7935 {
7936 sd_iterator_def sd_it;
7937 dep_t dep;
7938
7939 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7940 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
7941 {
7942 struct _dep _dep2, *dep2 = &_dep2;
7943
7944 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
7945
7946 sd_add_dep (dep2, true);
7947 }
7948
7949 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
7950 haifa_init_insn (twin);
7951
7952 if (sched_verbose && spec_info->dump)
7953 /* INSN_BB (insn) isn't determined for twin insns yet.
7954 So we can't use current_sched_info->print_insn. */
7955 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
7956 INSN_UID (twin), rec->index);
7957 }
7958 else
7959 {
7960 ORIG_PAT (check) = ORIG_PAT (insn);
7961 HAS_INTERNAL_DEP (check) = 1;
7962 twin = check;
7963 /* ??? We probably should change all OUTPUT dependencies to
7964 (TRUE | OUTPUT). */
7965 }
7966
7967 /* Copy all resolved back dependencies of INSN to TWIN. This will
7968 provide correct value for INSN_TICK (TWIN). */
7969 sd_copy_back_deps (twin, insn, true);
7970
7971 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
7972 /* In case of branchy check, fix CFG. */
7973 {
7974 basic_block first_bb, second_bb;
7975 rtx_insn *jump;
7976
7977 first_bb = BLOCK_FOR_INSN (check);
7978 second_bb = sched_split_block (first_bb, check);
7979
7980 sched_create_recovery_edges (first_bb, rec, second_bb);
7981
7982 sched_init_only_bb (second_bb, first_bb);
7983 sched_init_only_bb (rec, EXIT_BLOCK_PTR_FOR_FN (cfun));
7984
7985 jump = BB_END (rec);
7986 haifa_init_insn (jump);
7987 }
7988
7989 /* Move backward dependences from INSN to CHECK and
7990 move forward dependences from INSN to TWIN. */
7991
7992 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
7993 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
7994 {
7995 rtx_insn *pro = DEP_PRO (dep);
7996 ds_t ds;
7997
7998 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
7999 check --TRUE--> producer ??? or ANTI ???
8000 twin --TRUE--> producer
8001 twin --ANTI--> check
8002
8003 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
8004 check --ANTI--> producer
8005 twin --ANTI--> producer
8006 twin --ANTI--> check
8007
8008 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
8009 check ~~TRUE~~> producer
8010 twin ~~TRUE~~> producer
8011 twin --ANTI--> check */
8012
8013 ds = DEP_STATUS (dep);
8014
8015 if (ds & BEGIN_SPEC)
8016 {
8017 gcc_assert (!mutate_p);
8018 ds &= ~BEGIN_SPEC;
8019 }
8020
8021 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
8022 sd_add_dep (new_dep, false);
8023
8024 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8025 {
8026 DEP_CON (new_dep) = twin;
8027 sd_add_dep (new_dep, false);
8028 }
8029 }
8030
8031 /* Second, remove backward dependencies of INSN. */
8032 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
8033 sd_iterator_cond (&sd_it, &dep);)
8034 {
8035 if ((DEP_STATUS (dep) & BEGIN_SPEC)
8036 || mutate_p)
8037 /* We can delete this dep because we overcome it with
8038 BEGIN_SPECULATION. */
8039 sd_delete_dep (sd_it);
8040 else
8041 sd_iterator_next (&sd_it);
8042 }
8043
8044 /* Future Speculations. Determine what BE_IN speculations will be like. */
8045 fs = 0;
8046
8047 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
8048 here. */
8049
8050 gcc_assert (!DONE_SPEC (insn));
8051
8052 if (!mutate_p)
8053 {
8054 ds_t ts = TODO_SPEC (insn);
8055
8056 DONE_SPEC (insn) = ts & BEGIN_SPEC;
8057 CHECK_SPEC (check) = ts & BEGIN_SPEC;
8058
8059 /* Luckiness of future speculations solely depends upon initial
8060 BEGIN speculation. */
8061 if (ts & BEGIN_DATA)
8062 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
8063 if (ts & BEGIN_CONTROL)
8064 fs = set_dep_weak (fs, BE_IN_CONTROL,
8065 get_dep_weak (ts, BEGIN_CONTROL));
8066 }
8067 else
8068 CHECK_SPEC (check) = CHECK_SPEC (insn);
8069
8070 /* Future speculations: call the helper. */
8071 process_insn_forw_deps_be_in_spec (insn, twin, fs);
8072
8073 if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
8074 {
8075 /* Which types of dependencies should we use here is,
8076 generally, machine-dependent question... But, for now,
8077 it is not. */
8078
8079 if (!mutate_p)
8080 {
8081 init_dep (new_dep, insn, check, REG_DEP_TRUE);
8082 sd_add_dep (new_dep, false);
8083
8084 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
8085 sd_add_dep (new_dep, false);
8086 }
8087 else
8088 {
8089 if (spec_info->dump)
8090 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
8091 (*current_sched_info->print_insn) (insn, 0));
8092
8093 /* Remove all dependencies of the INSN. */
8094 {
8095 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
8096 | SD_LIST_BACK
8097 | SD_LIST_RES_BACK));
8098 while (sd_iterator_cond (&sd_it, &dep))
8099 sd_delete_dep (sd_it);
8100 }
8101
8102 /* If former check (INSN) already was moved to the ready (or queue)
8103 list, add new check (CHECK) there too. */
8104 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
8105 try_ready (check);
8106
8107 /* Remove old check from instruction stream and free its
8108 data. */
8109 sched_remove_insn (insn);
8110 }
8111
8112 init_dep (new_dep, check, twin, REG_DEP_ANTI);
8113 sd_add_dep (new_dep, false);
8114 }
8115 else
8116 {
8117 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
8118 sd_add_dep (new_dep, false);
8119 }
8120
8121 if (!mutate_p)
8122 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
8123 because it'll be done later in add_to_speculative_block. */
8124 {
8125 rtx_vec_t priorities_roots = rtx_vec_t ();
8126
8127 clear_priorities (twin, &priorities_roots);
8128 calc_priorities (priorities_roots);
8129 priorities_roots.release ();
8130 }
8131 }
8132
8133 /* Removes dependency between instructions in the recovery block REC
8134 and usual region instructions. It keeps inner dependences so it
8135 won't be necessary to recompute them. */
8136 static void
8137 fix_recovery_deps (basic_block rec)
8138 {
8139 rtx_insn *note, *insn, *jump;
8140 rtx_insn_list *ready_list = 0;
8141 bitmap_head in_ready;
8142 rtx_insn_list *link;
8143
8144 bitmap_initialize (&in_ready, 0);
8145
8146 /* NOTE - a basic block note. */
8147 note = NEXT_INSN (BB_HEAD (rec));
8148 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8149 insn = BB_END (rec);
8150 gcc_assert (JUMP_P (insn));
8151 insn = PREV_INSN (insn);
8152
8153 do
8154 {
8155 sd_iterator_def sd_it;
8156 dep_t dep;
8157
8158 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
8159 sd_iterator_cond (&sd_it, &dep);)
8160 {
8161 rtx_insn *consumer = DEP_CON (dep);
8162
8163 if (BLOCK_FOR_INSN (consumer) != rec)
8164 {
8165 sd_delete_dep (sd_it);
8166
8167 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
8168 ready_list = alloc_INSN_LIST (consumer, ready_list);
8169 }
8170 else
8171 {
8172 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
8173
8174 sd_iterator_next (&sd_it);
8175 }
8176 }
8177
8178 insn = PREV_INSN (insn);
8179 }
8180 while (insn != note);
8181
8182 bitmap_clear (&in_ready);
8183
8184 /* Try to add instructions to the ready or queue list. */
8185 for (link = ready_list; link; link = link->next ())
8186 try_ready (link->insn ());
8187 free_INSN_LIST_list (&ready_list);
8188
8189 /* Fixing jump's dependences. */
8190 insn = BB_HEAD (rec);
8191 jump = BB_END (rec);
8192
8193 gcc_assert (LABEL_P (insn));
8194 insn = NEXT_INSN (insn);
8195
8196 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
8197 add_jump_dependencies (insn, jump);
8198 }
8199
8200 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
8201 instruction data. */
8202 static bool
8203 haifa_change_pattern (rtx_insn *insn, rtx new_pat)
8204 {
8205 int t;
8206
8207 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
8208 if (!t)
8209 return false;
8210
8211 update_insn_after_change (insn);
8212 return true;
8213 }
8214
8215 /* -1 - can't speculate,
8216 0 - for speculation with REQUEST mode it is OK to use
8217 current instruction pattern,
8218 1 - need to change pattern for *NEW_PAT to be speculative. */
8219 int
8220 sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8221 {
8222 gcc_assert (current_sched_info->flags & DO_SPECULATION
8223 && (request & SPECULATIVE)
8224 && sched_insn_is_legitimate_for_speculation_p (insn, request));
8225
8226 if ((request & spec_info->mask) != request)
8227 return -1;
8228
8229 if (request & BE_IN_SPEC
8230 && !(request & BEGIN_SPEC))
8231 return 0;
8232
8233 return targetm.sched.speculate_insn (insn, request, new_pat);
8234 }
8235
8236 static int
8237 haifa_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
8238 {
8239 gcc_assert (sched_deps_info->generate_spec_deps
8240 && !IS_SPECULATION_CHECK_P (insn));
8241
8242 if (HAS_INTERNAL_DEP (insn)
8243 || SCHED_GROUP_P (insn))
8244 return -1;
8245
8246 return sched_speculate_insn (insn, request, new_pat);
8247 }
8248
8249 /* Print some information about block BB, which starts with HEAD and
8250 ends with TAIL, before scheduling it.
8251 I is zero, if scheduler is about to start with the fresh ebb. */
8252 static void
8253 dump_new_block_header (int i, basic_block bb, rtx_insn *head, rtx_insn *tail)
8254 {
8255 if (!i)
8256 fprintf (sched_dump,
8257 ";; ======================================================\n");
8258 else
8259 fprintf (sched_dump,
8260 ";; =====================ADVANCING TO=====================\n");
8261 fprintf (sched_dump,
8262 ";; -- basic block %d from %d to %d -- %s reload\n",
8263 bb->index, INSN_UID (head), INSN_UID (tail),
8264 (reload_completed ? "after" : "before"));
8265 fprintf (sched_dump,
8266 ";; ======================================================\n");
8267 fprintf (sched_dump, "\n");
8268 }
8269
8270 /* Unlink basic block notes and labels and saves them, so they
8271 can be easily restored. We unlink basic block notes in EBB to
8272 provide back-compatibility with the previous code, as target backends
8273 assume, that there'll be only instructions between
8274 current_sched_info->{head and tail}. We restore these notes as soon
8275 as we can.
8276 FIRST (LAST) is the first (last) basic block in the ebb.
8277 NB: In usual case (FIRST == LAST) nothing is really done. */
8278 void
8279 unlink_bb_notes (basic_block first, basic_block last)
8280 {
8281 /* We DON'T unlink basic block notes of the first block in the ebb. */
8282 if (first == last)
8283 return;
8284
8285 bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
8286
8287 /* Make a sentinel. */
8288 if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
8289 bb_header[last->next_bb->index] = 0;
8290
8291 first = first->next_bb;
8292 do
8293 {
8294 rtx_insn *prev, *label, *note, *next;
8295
8296 label = BB_HEAD (last);
8297 if (LABEL_P (label))
8298 note = NEXT_INSN (label);
8299 else
8300 note = label;
8301 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8302
8303 prev = PREV_INSN (label);
8304 next = NEXT_INSN (note);
8305 gcc_assert (prev && next);
8306
8307 SET_NEXT_INSN (prev) = next;
8308 SET_PREV_INSN (next) = prev;
8309
8310 bb_header[last->index] = label;
8311
8312 if (last == first)
8313 break;
8314
8315 last = last->prev_bb;
8316 }
8317 while (1);
8318 }
8319
8320 /* Restore basic block notes.
8321 FIRST is the first basic block in the ebb. */
8322 static void
8323 restore_bb_notes (basic_block first)
8324 {
8325 if (!bb_header)
8326 return;
8327
8328 /* We DON'T unlink basic block notes of the first block in the ebb. */
8329 first = first->next_bb;
8330 /* Remember: FIRST is actually a second basic block in the ebb. */
8331
8332 while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
8333 && bb_header[first->index])
8334 {
8335 rtx_insn *prev, *label, *note, *next;
8336
8337 label = bb_header[first->index];
8338 prev = PREV_INSN (label);
8339 next = NEXT_INSN (prev);
8340
8341 if (LABEL_P (label))
8342 note = NEXT_INSN (label);
8343 else
8344 note = label;
8345 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
8346
8347 bb_header[first->index] = 0;
8348
8349 SET_NEXT_INSN (prev) = label;
8350 SET_NEXT_INSN (note) = next;
8351 SET_PREV_INSN (next) = note;
8352
8353 first = first->next_bb;
8354 }
8355
8356 free (bb_header);
8357 bb_header = 0;
8358 }
8359
8360 /* Helper function.
8361 Fix CFG after both in- and inter-block movement of
8362 control_flow_insn_p JUMP. */
8363 static void
8364 fix_jump_move (rtx_insn *jump)
8365 {
8366 basic_block bb, jump_bb, jump_bb_next;
8367
8368 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8369 jump_bb = BLOCK_FOR_INSN (jump);
8370 jump_bb_next = jump_bb->next_bb;
8371
8372 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
8373 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
8374
8375 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
8376 /* if jump_bb_next is not empty. */
8377 BB_END (jump_bb) = BB_END (jump_bb_next);
8378
8379 if (BB_END (bb) != PREV_INSN (jump))
8380 /* Then there are instruction after jump that should be placed
8381 to jump_bb_next. */
8382 BB_END (jump_bb_next) = BB_END (bb);
8383 else
8384 /* Otherwise jump_bb_next is empty. */
8385 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
8386
8387 /* To make assertion in move_insn happy. */
8388 BB_END (bb) = PREV_INSN (jump);
8389
8390 update_bb_for_insn (jump_bb_next);
8391 }
8392
8393 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
8394 static void
8395 move_block_after_check (rtx_insn *jump)
8396 {
8397 basic_block bb, jump_bb, jump_bb_next;
8398 vec<edge, va_gc> *t;
8399
8400 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
8401 jump_bb = BLOCK_FOR_INSN (jump);
8402 jump_bb_next = jump_bb->next_bb;
8403
8404 update_bb_for_insn (jump_bb);
8405
8406 gcc_assert (IS_SPECULATION_CHECK_P (jump)
8407 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
8408
8409 unlink_block (jump_bb_next);
8410 link_block (jump_bb_next, bb);
8411
8412 t = bb->succs;
8413 bb->succs = 0;
8414 move_succs (&(jump_bb->succs), bb);
8415 move_succs (&(jump_bb_next->succs), jump_bb);
8416 move_succs (&t, jump_bb_next);
8417
8418 df_mark_solutions_dirty ();
8419
8420 common_sched_info->fix_recovery_cfg
8421 (bb->index, jump_bb->index, jump_bb_next->index);
8422 }
8423
8424 /* Helper function for move_block_after_check.
8425 This functions attaches edge vector pointed to by SUCCSP to
8426 block TO. */
8427 static void
8428 move_succs (vec<edge, va_gc> **succsp, basic_block to)
8429 {
8430 edge e;
8431 edge_iterator ei;
8432
8433 gcc_assert (to->succs == 0);
8434
8435 to->succs = *succsp;
8436
8437 FOR_EACH_EDGE (e, ei, to->succs)
8438 e->src = to;
8439
8440 *succsp = 0;
8441 }
8442
8443 /* Remove INSN from the instruction stream.
8444 INSN should have any dependencies. */
8445 static void
8446 sched_remove_insn (rtx_insn *insn)
8447 {
8448 sd_finish_insn (insn);
8449
8450 change_queue_index (insn, QUEUE_NOWHERE);
8451 current_sched_info->add_remove_insn (insn, 1);
8452 delete_insn (insn);
8453 }
8454
8455 /* Clear priorities of all instructions, that are forward dependent on INSN.
8456 Store in vector pointed to by ROOTS_PTR insns on which priority () should
8457 be invoked to initialize all cleared priorities. */
8458 static void
8459 clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
8460 {
8461 sd_iterator_def sd_it;
8462 dep_t dep;
8463 bool insn_is_root_p = true;
8464
8465 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
8466
8467 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
8468 {
8469 rtx_insn *pro = DEP_PRO (dep);
8470
8471 if (INSN_PRIORITY_STATUS (pro) >= 0
8472 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
8473 {
8474 /* If DEP doesn't contribute to priority then INSN itself should
8475 be added to priority roots. */
8476 if (contributes_to_priority_p (dep))
8477 insn_is_root_p = false;
8478
8479 INSN_PRIORITY_STATUS (pro) = -1;
8480 clear_priorities (pro, roots_ptr);
8481 }
8482 }
8483
8484 if (insn_is_root_p)
8485 roots_ptr->safe_push (insn);
8486 }
8487
8488 /* Recompute priorities of instructions, whose priorities might have been
8489 changed. ROOTS is a vector of instructions whose priority computation will
8490 trigger initialization of all cleared priorities. */
8491 static void
8492 calc_priorities (rtx_vec_t roots)
8493 {
8494 int i;
8495 rtx_insn *insn;
8496
8497 FOR_EACH_VEC_ELT (roots, i, insn)
8498 priority (insn);
8499 }
8500
8501
8502 /* Add dependences between JUMP and other instructions in the recovery
8503 block. INSN is the first insn the recovery block. */
8504 static void
8505 add_jump_dependencies (rtx_insn *insn, rtx_insn *jump)
8506 {
8507 do
8508 {
8509 insn = NEXT_INSN (insn);
8510 if (insn == jump)
8511 break;
8512
8513 if (dep_list_size (insn, SD_LIST_FORW) == 0)
8514 {
8515 dep_def _new_dep, *new_dep = &_new_dep;
8516
8517 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
8518 sd_add_dep (new_dep, false);
8519 }
8520 }
8521 while (1);
8522
8523 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
8524 }
8525
8526 /* Extend data structures for logical insn UID. */
8527 void
8528 sched_extend_luids (void)
8529 {
8530 int new_luids_max_uid = get_max_uid () + 1;
8531
8532 sched_luids.safe_grow_cleared (new_luids_max_uid);
8533 }
8534
8535 /* Initialize LUID for INSN. */
8536 void
8537 sched_init_insn_luid (rtx_insn *insn)
8538 {
8539 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
8540 int luid;
8541
8542 if (i >= 0)
8543 {
8544 luid = sched_max_luid;
8545 sched_max_luid += i;
8546 }
8547 else
8548 luid = -1;
8549
8550 SET_INSN_LUID (insn, luid);
8551 }
8552
8553 /* Initialize luids for BBS.
8554 The hook common_sched_info->luid_for_non_insn () is used to determine
8555 if notes, labels, etc. need luids. */
8556 void
8557 sched_init_luids (bb_vec_t bbs)
8558 {
8559 int i;
8560 basic_block bb;
8561
8562 sched_extend_luids ();
8563 FOR_EACH_VEC_ELT (bbs, i, bb)
8564 {
8565 rtx_insn *insn;
8566
8567 FOR_BB_INSNS (bb, insn)
8568 sched_init_insn_luid (insn);
8569 }
8570 }
8571
8572 /* Free LUIDs. */
8573 void
8574 sched_finish_luids (void)
8575 {
8576 sched_luids.release ();
8577 sched_max_luid = 1;
8578 }
8579
8580 /* Return logical uid of INSN. Helpful while debugging. */
8581 int
8582 insn_luid (rtx_insn *insn)
8583 {
8584 return INSN_LUID (insn);
8585 }
8586
8587 /* Extend per insn data in the target. */
8588 void
8589 sched_extend_target (void)
8590 {
8591 if (targetm.sched.h_i_d_extended)
8592 targetm.sched.h_i_d_extended ();
8593 }
8594
8595 /* Extend global scheduler structures (those, that live across calls to
8596 schedule_block) to include information about just emitted INSN. */
8597 static void
8598 extend_h_i_d (void)
8599 {
8600 int reserve = (get_max_uid () + 1 - h_i_d.length ());
8601 if (reserve > 0
8602 && ! h_i_d.space (reserve))
8603 {
8604 h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
8605 sched_extend_target ();
8606 }
8607 }
8608
8609 /* Initialize h_i_d entry of the INSN with default values.
8610 Values, that are not explicitly initialized here, hold zero. */
8611 static void
8612 init_h_i_d (rtx_insn *insn)
8613 {
8614 if (INSN_LUID (insn) > 0)
8615 {
8616 INSN_COST (insn) = -1;
8617 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
8618 INSN_TICK (insn) = INVALID_TICK;
8619 INSN_EXACT_TICK (insn) = INVALID_TICK;
8620 INTER_TICK (insn) = INVALID_TICK;
8621 TODO_SPEC (insn) = HARD_DEP;
8622 }
8623 }
8624
8625 /* Initialize haifa_insn_data for BBS. */
8626 void
8627 haifa_init_h_i_d (bb_vec_t bbs)
8628 {
8629 int i;
8630 basic_block bb;
8631
8632 extend_h_i_d ();
8633 FOR_EACH_VEC_ELT (bbs, i, bb)
8634 {
8635 rtx_insn *insn;
8636
8637 FOR_BB_INSNS (bb, insn)
8638 init_h_i_d (insn);
8639 }
8640 }
8641
8642 /* Finalize haifa_insn_data. */
8643 void
8644 haifa_finish_h_i_d (void)
8645 {
8646 int i;
8647 haifa_insn_data_t data;
8648 struct reg_use_data *use, *next;
8649
8650 FOR_EACH_VEC_ELT (h_i_d, i, data)
8651 {
8652 free (data->max_reg_pressure);
8653 free (data->reg_pressure);
8654 for (use = data->reg_use_list; use != NULL; use = next)
8655 {
8656 next = use->next_insn_use;
8657 free (use);
8658 }
8659 }
8660 h_i_d.release ();
8661 }
8662
8663 /* Init data for the new insn INSN. */
8664 static void
8665 haifa_init_insn (rtx_insn *insn)
8666 {
8667 gcc_assert (insn != NULL);
8668
8669 sched_extend_luids ();
8670 sched_init_insn_luid (insn);
8671 sched_extend_target ();
8672 sched_deps_init (false);
8673 extend_h_i_d ();
8674 init_h_i_d (insn);
8675
8676 if (adding_bb_to_current_region_p)
8677 {
8678 sd_init_insn (insn);
8679
8680 /* Extend dependency caches by one element. */
8681 extend_dependency_caches (1, false);
8682 }
8683 if (sched_pressure != SCHED_PRESSURE_NONE)
8684 init_insn_reg_pressure_info (insn);
8685 }
8686
8687 /* Init data for the new basic block BB which comes after AFTER. */
8688 static void
8689 haifa_init_only_bb (basic_block bb, basic_block after)
8690 {
8691 gcc_assert (bb != NULL);
8692
8693 sched_init_bbs ();
8694
8695 if (common_sched_info->add_block)
8696 /* This changes only data structures of the front-end. */
8697 common_sched_info->add_block (bb, after);
8698 }
8699
8700 /* A generic version of sched_split_block (). */
8701 basic_block
8702 sched_split_block_1 (basic_block first_bb, rtx after)
8703 {
8704 edge e;
8705
8706 e = split_block (first_bb, after);
8707 gcc_assert (e->src == first_bb);
8708
8709 /* sched_split_block emits note if *check == BB_END. Probably it
8710 is better to rip that note off. */
8711
8712 return e->dest;
8713 }
8714
8715 /* A generic version of sched_create_empty_bb (). */
8716 basic_block
8717 sched_create_empty_bb_1 (basic_block after)
8718 {
8719 return create_empty_bb (after);
8720 }
8721
8722 /* Insert PAT as an INSN into the schedule and update the necessary data
8723 structures to account for it. */
8724 rtx_insn *
8725 sched_emit_insn (rtx pat)
8726 {
8727 rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
8728 haifa_init_insn (insn);
8729
8730 if (current_sched_info->add_remove_insn)
8731 current_sched_info->add_remove_insn (insn, 0);
8732
8733 (*current_sched_info->begin_schedule_ready) (insn);
8734 scheduled_insns.safe_push (insn);
8735
8736 last_scheduled_insn = insn;
8737 return insn;
8738 }
8739
8740 /* This function returns a candidate satisfying dispatch constraints from
8741 the ready list. */
8742
8743 static rtx_insn *
8744 ready_remove_first_dispatch (struct ready_list *ready)
8745 {
8746 int i;
8747 rtx_insn *insn = ready_element (ready, 0);
8748
8749 if (ready->n_ready == 1
8750 || !INSN_P (insn)
8751 || INSN_CODE (insn) < 0
8752 || !active_insn_p (insn)
8753 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8754 return ready_remove_first (ready);
8755
8756 for (i = 1; i < ready->n_ready; i++)
8757 {
8758 insn = ready_element (ready, i);
8759
8760 if (!INSN_P (insn)
8761 || INSN_CODE (insn) < 0
8762 || !active_insn_p (insn))
8763 continue;
8764
8765 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
8766 {
8767 /* Return ith element of ready. */
8768 insn = ready_remove (ready, i);
8769 return insn;
8770 }
8771 }
8772
8773 if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
8774 return ready_remove_first (ready);
8775
8776 for (i = 1; i < ready->n_ready; i++)
8777 {
8778 insn = ready_element (ready, i);
8779
8780 if (!INSN_P (insn)
8781 || INSN_CODE (insn) < 0
8782 || !active_insn_p (insn))
8783 continue;
8784
8785 /* Return i-th element of ready. */
8786 if (targetm.sched.dispatch (insn, IS_CMP))
8787 return ready_remove (ready, i);
8788 }
8789
8790 return ready_remove_first (ready);
8791 }
8792
8793 /* Get number of ready insn in the ready list. */
8794
8795 int
8796 number_in_ready (void)
8797 {
8798 return ready.n_ready;
8799 }
8800
8801 /* Get number of ready's in the ready list. */
8802
8803 rtx_insn *
8804 get_ready_element (int i)
8805 {
8806 return ready_element (&ready, i);
8807 }
8808
8809 #endif /* INSN_SCHEDULING */