haifa-sched.c (modulo_ii, [...]): New static variables.
[gcc.git] / gcc / haifa-sched.c
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
27
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
36
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
41
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
56
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
59
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
72
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
79
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
83
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using insn backward dependences
86 INSN_BACK_DEPS. INSN_BACK_DEPS are translated into forward dependences
87 INSN_FORW_DEPS the purpose of forward list scheduling.
88
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
97
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
102
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
107
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
111
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
116
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
120
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
126 \f
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "diagnostic-core.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "recog.h"
142 #include "sched-int.h"
143 #include "target.h"
144 #include "common/common-target.h"
145 #include "output.h"
146 #include "params.h"
147 #include "vecprim.h"
148 #include "dbgcnt.h"
149 #include "cfgloop.h"
150 #include "ira.h"
151 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
152 #include "hashtab.h"
153
154 #ifdef INSN_SCHEDULING
155
156 /* issue_rate is the number of insns that can be scheduled in the same
157 machine cycle. It can be defined in the config/mach/mach.h file,
158 otherwise we set it to 1. */
159
160 int issue_rate;
161
162 /* This can be set to true by a backend if the scheduler should not
163 enable a DCE pass. */
164 bool sched_no_dce;
165
166 /* The current initiation interval used when modulo scheduling. */
167 static int modulo_ii;
168
169 /* The maximum number of stages we are prepared to handle. */
170 static int modulo_max_stages;
171
172 /* The number of insns that exist in each iteration of the loop. We use this
173 to detect when we've scheduled all insns from the first iteration. */
174 static int modulo_n_insns;
175
176 /* The current count of insns in the first iteration of the loop that have
177 already been scheduled. */
178 static int modulo_insns_scheduled;
179
180 /* The maximum uid of insns from the first iteration of the loop. */
181 static int modulo_iter0_max_uid;
182
183 /* The number of times we should attempt to backtrack when modulo scheduling.
184 Decreased each time we have to backtrack. */
185 static int modulo_backtracks_left;
186
187 /* The stage in which the last insn from the original loop was
188 scheduled. */
189 static int modulo_last_stage;
190
191 /* sched-verbose controls the amount of debugging output the
192 scheduler prints. It is controlled by -fsched-verbose=N:
193 N>0 and no -DSR : the output is directed to stderr.
194 N>=10 will direct the printouts to stderr (regardless of -dSR).
195 N=1: same as -dSR.
196 N=2: bb's probabilities, detailed ready list info, unit/insn info.
197 N=3: rtl at abort point, control-flow, regions info.
198 N=5: dependences info. */
199
200 int sched_verbose = 0;
201
202 /* Debugging file. All printouts are sent to dump, which is always set,
203 either to stderr, or to the dump listing file (-dRS). */
204 FILE *sched_dump = 0;
205
206 /* This is a placeholder for the scheduler parameters common
207 to all schedulers. */
208 struct common_sched_info_def *common_sched_info;
209
210 #define INSN_TICK(INSN) (HID (INSN)->tick)
211 #define INSN_EXACT_TICK(INSN) (HID (INSN)->exact_tick)
212 #define INSN_TICK_ESTIMATE(INSN) (HID (INSN)->tick_estimate)
213 #define INTER_TICK(INSN) (HID (INSN)->inter_tick)
214 #define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
215 #define SHADOW_P(INSN) (HID (INSN)->shadow_p)
216
217 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
218 then it should be recalculated from scratch. */
219 #define INVALID_TICK (-(max_insn_queue_index + 1))
220 /* The minimal value of the INSN_TICK of an instruction. */
221 #define MIN_TICK (-max_insn_queue_index)
222
223 /* List of important notes we must keep around. This is a pointer to the
224 last element in the list. */
225 rtx note_list;
226
227 static struct spec_info_def spec_info_var;
228 /* Description of the speculative part of the scheduling.
229 If NULL - no speculation. */
230 spec_info_t spec_info = NULL;
231
232 /* True, if recovery block was added during scheduling of current block.
233 Used to determine, if we need to fix INSN_TICKs. */
234 static bool haifa_recovery_bb_recently_added_p;
235
236 /* True, if recovery block was added during this scheduling pass.
237 Used to determine if we should have empty memory pools of dependencies
238 after finishing current region. */
239 bool haifa_recovery_bb_ever_added_p;
240
241 /* Counters of different types of speculative instructions. */
242 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
243
244 /* Array used in {unlink, restore}_bb_notes. */
245 static rtx *bb_header = 0;
246
247 /* Basic block after which recovery blocks will be created. */
248 static basic_block before_recovery;
249
250 /* Basic block just before the EXIT_BLOCK and after recovery, if we have
251 created it. */
252 basic_block after_recovery;
253
254 /* FALSE if we add bb to another region, so we don't need to initialize it. */
255 bool adding_bb_to_current_region_p = true;
256
257 /* Queues, etc. */
258
259 /* An instruction is ready to be scheduled when all insns preceding it
260 have already been scheduled. It is important to ensure that all
261 insns which use its result will not be executed until its result
262 has been computed. An insn is maintained in one of four structures:
263
264 (P) the "Pending" set of insns which cannot be scheduled until
265 their dependencies have been satisfied.
266 (Q) the "Queued" set of insns that can be scheduled when sufficient
267 time has passed.
268 (R) the "Ready" list of unscheduled, uncommitted insns.
269 (S) the "Scheduled" list of insns.
270
271 Initially, all insns are either "Pending" or "Ready" depending on
272 whether their dependencies are satisfied.
273
274 Insns move from the "Ready" list to the "Scheduled" list as they
275 are committed to the schedule. As this occurs, the insns in the
276 "Pending" list have their dependencies satisfied and move to either
277 the "Ready" list or the "Queued" set depending on whether
278 sufficient time has passed to make them ready. As time passes,
279 insns move from the "Queued" set to the "Ready" list.
280
281 The "Pending" list (P) are the insns in the INSN_FORW_DEPS of the
282 unscheduled insns, i.e., those that are ready, queued, and pending.
283 The "Queued" set (Q) is implemented by the variable `insn_queue'.
284 The "Ready" list (R) is implemented by the variables `ready' and
285 `n_ready'.
286 The "Scheduled" list (S) is the new insn chain built by this pass.
287
288 The transition (R->S) is implemented in the scheduling loop in
289 `schedule_block' when the best insn to schedule is chosen.
290 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
291 insns move from the ready list to the scheduled list.
292 The transition (Q->R) is implemented in 'queue_to_insn' as time
293 passes or stalls are introduced. */
294
295 /* Implement a circular buffer to delay instructions until sufficient
296 time has passed. For the new pipeline description interface,
297 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
298 than maximal time of instruction execution computed by genattr.c on
299 the base maximal time of functional unit reservations and getting a
300 result. This is the longest time an insn may be queued. */
301
302 static rtx *insn_queue;
303 static int q_ptr = 0;
304 static int q_size = 0;
305 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
306 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
307
308 #define QUEUE_SCHEDULED (-3)
309 #define QUEUE_NOWHERE (-2)
310 #define QUEUE_READY (-1)
311 /* QUEUE_SCHEDULED - INSN is scheduled.
312 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
313 queue or ready list.
314 QUEUE_READY - INSN is in ready list.
315 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
316
317 #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index)
318
319 /* The following variable value refers for all current and future
320 reservations of the processor units. */
321 state_t curr_state;
322
323 /* The following variable value is size of memory representing all
324 current and future reservations of the processor units. */
325 size_t dfa_state_size;
326
327 /* The following array is used to find the best insn from ready when
328 the automaton pipeline interface is used. */
329 char *ready_try = NULL;
330
331 /* The ready list. */
332 struct ready_list ready = {NULL, 0, 0, 0, 0};
333
334 /* The pointer to the ready list (to be removed). */
335 static struct ready_list *readyp = &ready;
336
337 /* Scheduling clock. */
338 static int clock_var;
339
340 /* Clock at which the previous instruction was issued. */
341 static int last_clock_var;
342
343 /* Set to true if, when queuing a shadow insn, we discover that it would be
344 scheduled too late. */
345 static bool must_backtrack;
346
347 /* The following variable value is number of essential insns issued on
348 the current cycle. An insn is essential one if it changes the
349 processors state. */
350 int cycle_issued_insns;
351
352 /* This records the actual schedule. It is built up during the main phase
353 of schedule_block, and afterwards used to reorder the insns in the RTL. */
354 static VEC(rtx, heap) *scheduled_insns;
355
356 static int may_trap_exp (const_rtx, int);
357
358 /* Nonzero iff the address is comprised from at most 1 register. */
359 #define CONST_BASED_ADDRESS_P(x) \
360 (REG_P (x) \
361 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
362 || (GET_CODE (x) == LO_SUM)) \
363 && (CONSTANT_P (XEXP (x, 0)) \
364 || CONSTANT_P (XEXP (x, 1)))))
365
366 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
367 as found by analyzing insn's expression. */
368
369 \f
370 static int haifa_luid_for_non_insn (rtx x);
371
372 /* Haifa version of sched_info hooks common to all headers. */
373 const struct common_sched_info_def haifa_common_sched_info =
374 {
375 NULL, /* fix_recovery_cfg */
376 NULL, /* add_block */
377 NULL, /* estimate_number_of_insns */
378 haifa_luid_for_non_insn, /* luid_for_non_insn */
379 SCHED_PASS_UNKNOWN /* sched_pass_id */
380 };
381
382 /* Mapping from instruction UID to its Logical UID. */
383 VEC (int, heap) *sched_luids = NULL;
384
385 /* Next LUID to assign to an instruction. */
386 int sched_max_luid = 1;
387
388 /* Haifa Instruction Data. */
389 VEC (haifa_insn_data_def, heap) *h_i_d = NULL;
390
391 void (* sched_init_only_bb) (basic_block, basic_block);
392
393 /* Split block function. Different schedulers might use different functions
394 to handle their internal data consistent. */
395 basic_block (* sched_split_block) (basic_block, rtx);
396
397 /* Create empty basic block after the specified block. */
398 basic_block (* sched_create_empty_bb) (basic_block);
399
400 static int
401 may_trap_exp (const_rtx x, int is_store)
402 {
403 enum rtx_code code;
404
405 if (x == 0)
406 return TRAP_FREE;
407 code = GET_CODE (x);
408 if (is_store)
409 {
410 if (code == MEM && may_trap_p (x))
411 return TRAP_RISKY;
412 else
413 return TRAP_FREE;
414 }
415 if (code == MEM)
416 {
417 /* The insn uses memory: a volatile load. */
418 if (MEM_VOLATILE_P (x))
419 return IRISKY;
420 /* An exception-free load. */
421 if (!may_trap_p (x))
422 return IFREE;
423 /* A load with 1 base register, to be further checked. */
424 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
425 return PFREE_CANDIDATE;
426 /* No info on the load, to be further checked. */
427 return PRISKY_CANDIDATE;
428 }
429 else
430 {
431 const char *fmt;
432 int i, insn_class = TRAP_FREE;
433
434 /* Neither store nor load, check if it may cause a trap. */
435 if (may_trap_p (x))
436 return TRAP_RISKY;
437 /* Recursive step: walk the insn... */
438 fmt = GET_RTX_FORMAT (code);
439 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
440 {
441 if (fmt[i] == 'e')
442 {
443 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
444 insn_class = WORST_CLASS (insn_class, tmp_class);
445 }
446 else if (fmt[i] == 'E')
447 {
448 int j;
449 for (j = 0; j < XVECLEN (x, i); j++)
450 {
451 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
452 insn_class = WORST_CLASS (insn_class, tmp_class);
453 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
454 break;
455 }
456 }
457 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
458 break;
459 }
460 return insn_class;
461 }
462 }
463
464 /* Classifies rtx X of an insn for the purpose of verifying that X can be
465 executed speculatively (and consequently the insn can be moved
466 speculatively), by examining X, returning:
467 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
468 TRAP_FREE: non-load insn.
469 IFREE: load from a globally safe location.
470 IRISKY: volatile load.
471 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
472 being either PFREE or PRISKY. */
473
474 static int
475 haifa_classify_rtx (const_rtx x)
476 {
477 int tmp_class = TRAP_FREE;
478 int insn_class = TRAP_FREE;
479 enum rtx_code code;
480
481 if (GET_CODE (x) == PARALLEL)
482 {
483 int i, len = XVECLEN (x, 0);
484
485 for (i = len - 1; i >= 0; i--)
486 {
487 tmp_class = haifa_classify_rtx (XVECEXP (x, 0, i));
488 insn_class = WORST_CLASS (insn_class, tmp_class);
489 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
490 break;
491 }
492 }
493 else
494 {
495 code = GET_CODE (x);
496 switch (code)
497 {
498 case CLOBBER:
499 /* Test if it is a 'store'. */
500 tmp_class = may_trap_exp (XEXP (x, 0), 1);
501 break;
502 case SET:
503 /* Test if it is a store. */
504 tmp_class = may_trap_exp (SET_DEST (x), 1);
505 if (tmp_class == TRAP_RISKY)
506 break;
507 /* Test if it is a load. */
508 tmp_class =
509 WORST_CLASS (tmp_class,
510 may_trap_exp (SET_SRC (x), 0));
511 break;
512 case COND_EXEC:
513 tmp_class = haifa_classify_rtx (COND_EXEC_CODE (x));
514 if (tmp_class == TRAP_RISKY)
515 break;
516 tmp_class = WORST_CLASS (tmp_class,
517 may_trap_exp (COND_EXEC_TEST (x), 0));
518 break;
519 case TRAP_IF:
520 tmp_class = TRAP_RISKY;
521 break;
522 default:;
523 }
524 insn_class = tmp_class;
525 }
526
527 return insn_class;
528 }
529
530 int
531 haifa_classify_insn (const_rtx insn)
532 {
533 return haifa_classify_rtx (PATTERN (insn));
534 }
535 \f
536 /* After the scheduler initialization function has been called, this function
537 can be called to enable modulo scheduling. II is the initiation interval
538 we should use, it affects the delays for delay_pairs that were recorded as
539 separated by a given number of stages.
540
541 MAX_STAGES provides us with a limit
542 after which we give up scheduling; the caller must have unrolled at least
543 as many copies of the loop body and recorded delay_pairs for them.
544
545 INSNS is the number of real (non-debug) insns in one iteration of
546 the loop. MAX_UID can be used to test whether an insn belongs to
547 the first iteration of the loop; all of them have a uid lower than
548 MAX_UID. */
549 void
550 set_modulo_params (int ii, int max_stages, int insns, int max_uid)
551 {
552 modulo_ii = ii;
553 modulo_max_stages = max_stages;
554 modulo_n_insns = insns;
555 modulo_iter0_max_uid = max_uid;
556 modulo_backtracks_left = PARAM_VALUE (PARAM_MAX_MODULO_BACKTRACK_ATTEMPTS);
557 }
558
559 /* A structure to record a pair of insns where the first one is a real
560 insn that has delay slots, and the second is its delayed shadow.
561 I1 is scheduled normally and will emit an assembly instruction,
562 while I2 describes the side effect that takes place at the
563 transition between cycles CYCLES and (CYCLES + 1) after I1. */
564 struct delay_pair
565 {
566 struct delay_pair *next_same_i1;
567 rtx i1, i2;
568 int cycles;
569 /* When doing modulo scheduling, we a delay_pair can also be used to
570 show that I1 and I2 are the same insn in a different stage. If that
571 is the case, STAGES will be nonzero. */
572 int stages;
573 };
574
575 /* Two hash tables to record delay_pairs, one indexed by I1 and the other
576 indexed by I2. */
577 static htab_t delay_htab;
578 static htab_t delay_htab_i2;
579
580 /* Called through htab_traverse. Walk the hashtable using I2 as
581 index, and delete all elements involving an UID higher than
582 that pointed to by *DATA. */
583 static int
584 htab_i2_traverse (void **slot, void *data)
585 {
586 int maxuid = *(int *)data;
587 struct delay_pair *p = *(struct delay_pair **)slot;
588 if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
589 {
590 htab_clear_slot (delay_htab_i2, slot);
591 }
592 return 1;
593 }
594
595 /* Called through htab_traverse. Walk the hashtable using I2 as
596 index, and delete all elements involving an UID higher than
597 that pointed to by *DATA. */
598 static int
599 htab_i1_traverse (void **slot, void *data)
600 {
601 int maxuid = *(int *)data;
602 struct delay_pair **pslot = (struct delay_pair **)slot;
603 struct delay_pair *p, *first, **pprev;
604
605 if (INSN_UID ((*pslot)->i1) >= maxuid)
606 {
607 htab_clear_slot (delay_htab, slot);
608 return 1;
609 }
610 pprev = &first;
611 for (p = *pslot; p; p = p->next_same_i1)
612 {
613 if (INSN_UID (p->i2) < maxuid)
614 {
615 *pprev = p;
616 pprev = &p->next_same_i1;
617 }
618 }
619 *pprev = NULL;
620 if (first == NULL)
621 htab_clear_slot (delay_htab, slot);
622 else
623 *pslot = first;
624 return 1;
625 }
626
627 /* Discard all delay pairs which involve an insn with an UID higher
628 than MAX_UID. */
629 void
630 discard_delay_pairs_above (int max_uid)
631 {
632 htab_traverse (delay_htab, htab_i1_traverse, &max_uid);
633 htab_traverse (delay_htab_i2, htab_i2_traverse, &max_uid);
634 }
635
636 /* Returns a hash value for X (which really is a delay_pair), based on
637 hashing just I1. */
638 static hashval_t
639 delay_hash_i1 (const void *x)
640 {
641 return htab_hash_pointer (((const struct delay_pair *) x)->i1);
642 }
643
644 /* Returns a hash value for X (which really is a delay_pair), based on
645 hashing just I2. */
646 static hashval_t
647 delay_hash_i2 (const void *x)
648 {
649 return htab_hash_pointer (((const struct delay_pair *) x)->i2);
650 }
651
652 /* Return nonzero if I1 of pair X is the same as that of pair Y. */
653 static int
654 delay_i1_eq (const void *x, const void *y)
655 {
656 return ((const struct delay_pair *) x)->i1 == y;
657 }
658
659 /* Return nonzero if I2 of pair X is the same as that of pair Y. */
660 static int
661 delay_i2_eq (const void *x, const void *y)
662 {
663 return ((const struct delay_pair *) x)->i2 == y;
664 }
665
666 /* This function can be called by a port just before it starts the final
667 scheduling pass. It records the fact that an instruction with delay
668 slots has been split into two insns, I1 and I2. The first one will be
669 scheduled normally and initiates the operation. The second one is a
670 shadow which must follow a specific number of cycles after I1; its only
671 purpose is to show the side effect that occurs at that cycle in the RTL.
672 If a JUMP_INSN or a CALL_INSN has been split, I1 should be a normal INSN,
673 while I2 retains the original insn type.
674
675 There are two ways in which the number of cycles can be specified,
676 involving the CYCLES and STAGES arguments to this function. If STAGES
677 is zero, we just use the value of CYCLES. Otherwise, STAGES is a factor
678 which is multiplied by MODULO_II to give the number of cycles. This is
679 only useful if the caller also calls set_modulo_params to enable modulo
680 scheduling. */
681
682 void
683 record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
684 {
685 struct delay_pair *p = XNEW (struct delay_pair);
686 struct delay_pair **slot;
687
688 p->i1 = i1;
689 p->i2 = i2;
690 p->cycles = cycles;
691 p->stages = stages;
692
693 if (!delay_htab)
694 {
695 delay_htab = htab_create (10, delay_hash_i1, delay_i1_eq, NULL);
696 delay_htab_i2 = htab_create (10, delay_hash_i2, delay_i2_eq, free);
697 }
698 slot = ((struct delay_pair **)
699 htab_find_slot_with_hash (delay_htab, i1, htab_hash_pointer (i1),
700 INSERT));
701 p->next_same_i1 = *slot;
702 *slot = p;
703 slot = ((struct delay_pair **)
704 htab_find_slot_with_hash (delay_htab_i2, i2, htab_hash_pointer (i2),
705 INSERT));
706 *slot = p;
707 }
708
709 /* For a pair P of insns, return the fixed distance in cycles from the first
710 insn after which the second must be scheduled. */
711 static int
712 pair_delay (struct delay_pair *p)
713 {
714 if (p->stages == 0)
715 return p->cycles;
716 else
717 return p->stages * modulo_ii;
718 }
719
720 /* Given an insn INSN, add a dependence on its delayed shadow if it
721 has one. Also try to find situations where shadows depend on each other
722 and add dependencies to the real insns to limit the amount of backtracking
723 needed. */
724 void
725 add_delay_dependencies (rtx insn)
726 {
727 struct delay_pair *pair;
728 sd_iterator_def sd_it;
729 dep_t dep;
730
731 if (!delay_htab)
732 return;
733
734 pair
735 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
736 htab_hash_pointer (insn));
737 if (!pair)
738 return;
739 add_dependence (insn, pair->i1, REG_DEP_ANTI);
740 if (pair->stages)
741 return;
742
743 FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
744 {
745 rtx pro = DEP_PRO (dep);
746 struct delay_pair *other_pair
747 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, pro,
748 htab_hash_pointer (pro));
749 if (!other_pair || other_pair->stages)
750 continue;
751 if (pair_delay (other_pair) >= pair_delay (pair))
752 {
753 if (sched_verbose >= 4)
754 {
755 fprintf (sched_dump, ";;\tadding dependence %d <- %d\n",
756 INSN_UID (other_pair->i1),
757 INSN_UID (pair->i1));
758 fprintf (sched_dump, ";;\tpair1 %d <- %d, cost %d\n",
759 INSN_UID (pair->i1),
760 INSN_UID (pair->i2),
761 pair_delay (pair));
762 fprintf (sched_dump, ";;\tpair2 %d <- %d, cost %d\n",
763 INSN_UID (other_pair->i1),
764 INSN_UID (other_pair->i2),
765 pair_delay (other_pair));
766 }
767 add_dependence (pair->i1, other_pair->i1, REG_DEP_ANTI);
768 }
769 }
770 }
771 \f
772 /* Forward declarations. */
773
774 static int priority (rtx);
775 static int rank_for_schedule (const void *, const void *);
776 static void swap_sort (rtx *, int);
777 static void queue_insn (rtx, int, const char *);
778 static int schedule_insn (rtx);
779 static void adjust_priority (rtx);
780 static void advance_one_cycle (void);
781 static void extend_h_i_d (void);
782
783
784 /* Notes handling mechanism:
785 =========================
786 Generally, NOTES are saved before scheduling and restored after scheduling.
787 The scheduler distinguishes between two types of notes:
788
789 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
790 Before scheduling a region, a pointer to the note is added to the insn
791 that follows or precedes it. (This happens as part of the data dependence
792 computation). After scheduling an insn, the pointer contained in it is
793 used for regenerating the corresponding note (in reemit_notes).
794
795 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
796 these notes are put in a list (in rm_other_notes() and
797 unlink_other_notes ()). After scheduling the block, these notes are
798 inserted at the beginning of the block (in schedule_block()). */
799
800 static void ready_add (struct ready_list *, rtx, bool);
801 static rtx ready_remove_first (struct ready_list *);
802 static rtx ready_remove_first_dispatch (struct ready_list *ready);
803
804 static void queue_to_ready (struct ready_list *);
805 static int early_queue_to_ready (state_t, struct ready_list *);
806
807 static void debug_ready_list (struct ready_list *);
808
809 /* The following functions are used to implement multi-pass scheduling
810 on the first cycle. */
811 static rtx ready_remove (struct ready_list *, int);
812 static void ready_remove_insn (rtx);
813
814 static void fix_inter_tick (rtx, rtx);
815 static int fix_tick_ready (rtx);
816 static void change_queue_index (rtx, int);
817
818 /* The following functions are used to implement scheduling of data/control
819 speculative instructions. */
820
821 static void extend_h_i_d (void);
822 static void init_h_i_d (rtx);
823 static void generate_recovery_code (rtx);
824 static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
825 static void begin_speculative_block (rtx);
826 static void add_to_speculative_block (rtx);
827 static void init_before_recovery (basic_block *);
828 static void create_check_block_twin (rtx, bool);
829 static void fix_recovery_deps (basic_block);
830 static void haifa_change_pattern (rtx, rtx);
831 static void dump_new_block_header (int, basic_block, rtx, rtx);
832 static void restore_bb_notes (basic_block);
833 static void fix_jump_move (rtx);
834 static void move_block_after_check (rtx);
835 static void move_succs (VEC(edge,gc) **, basic_block);
836 static void sched_remove_insn (rtx);
837 static void clear_priorities (rtx, rtx_vec_t *);
838 static void calc_priorities (rtx_vec_t);
839 static void add_jump_dependencies (rtx, rtx);
840
841 #endif /* INSN_SCHEDULING */
842 \f
843 /* Point to state used for the current scheduling pass. */
844 struct haifa_sched_info *current_sched_info;
845 \f
846 #ifndef INSN_SCHEDULING
847 void
848 schedule_insns (void)
849 {
850 }
851 #else
852
853 /* Do register pressure sensitive insn scheduling if the flag is set
854 up. */
855 bool sched_pressure_p;
856
857 /* Map regno -> its pressure class. The map defined only when
858 SCHED_PRESSURE_P is true. */
859 enum reg_class *sched_regno_pressure_class;
860
861 /* The current register pressure. Only elements corresponding pressure
862 classes are defined. */
863 static int curr_reg_pressure[N_REG_CLASSES];
864
865 /* Saved value of the previous array. */
866 static int saved_reg_pressure[N_REG_CLASSES];
867
868 /* Register living at given scheduling point. */
869 static bitmap curr_reg_live;
870
871 /* Saved value of the previous array. */
872 static bitmap saved_reg_live;
873
874 /* Registers mentioned in the current region. */
875 static bitmap region_ref_regs;
876
877 /* Initiate register pressure relative info for scheduling the current
878 region. Currently it is only clearing register mentioned in the
879 current region. */
880 void
881 sched_init_region_reg_pressure_info (void)
882 {
883 bitmap_clear (region_ref_regs);
884 }
885
886 /* Update current register pressure related info after birth (if
887 BIRTH_P) or death of register REGNO. */
888 static void
889 mark_regno_birth_or_death (int regno, bool birth_p)
890 {
891 enum reg_class pressure_class;
892
893 pressure_class = sched_regno_pressure_class[regno];
894 if (regno >= FIRST_PSEUDO_REGISTER)
895 {
896 if (pressure_class != NO_REGS)
897 {
898 if (birth_p)
899 {
900 bitmap_set_bit (curr_reg_live, regno);
901 curr_reg_pressure[pressure_class]
902 += (ira_reg_class_max_nregs
903 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
904 }
905 else
906 {
907 bitmap_clear_bit (curr_reg_live, regno);
908 curr_reg_pressure[pressure_class]
909 -= (ira_reg_class_max_nregs
910 [pressure_class][PSEUDO_REGNO_MODE (regno)]);
911 }
912 }
913 }
914 else if (pressure_class != NO_REGS
915 && ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
916 {
917 if (birth_p)
918 {
919 bitmap_set_bit (curr_reg_live, regno);
920 curr_reg_pressure[pressure_class]++;
921 }
922 else
923 {
924 bitmap_clear_bit (curr_reg_live, regno);
925 curr_reg_pressure[pressure_class]--;
926 }
927 }
928 }
929
930 /* Initiate current register pressure related info from living
931 registers given by LIVE. */
932 static void
933 initiate_reg_pressure_info (bitmap live)
934 {
935 int i;
936 unsigned int j;
937 bitmap_iterator bi;
938
939 for (i = 0; i < ira_pressure_classes_num; i++)
940 curr_reg_pressure[ira_pressure_classes[i]] = 0;
941 bitmap_clear (curr_reg_live);
942 EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
943 if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
944 mark_regno_birth_or_death (j, true);
945 }
946
947 /* Mark registers in X as mentioned in the current region. */
948 static void
949 setup_ref_regs (rtx x)
950 {
951 int i, j, regno;
952 const RTX_CODE code = GET_CODE (x);
953 const char *fmt;
954
955 if (REG_P (x))
956 {
957 regno = REGNO (x);
958 if (HARD_REGISTER_NUM_P (regno))
959 bitmap_set_range (region_ref_regs, regno,
960 hard_regno_nregs[regno][GET_MODE (x)]);
961 else
962 bitmap_set_bit (region_ref_regs, REGNO (x));
963 return;
964 }
965 fmt = GET_RTX_FORMAT (code);
966 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
967 if (fmt[i] == 'e')
968 setup_ref_regs (XEXP (x, i));
969 else if (fmt[i] == 'E')
970 {
971 for (j = 0; j < XVECLEN (x, i); j++)
972 setup_ref_regs (XVECEXP (x, i, j));
973 }
974 }
975
976 /* Initiate current register pressure related info at the start of
977 basic block BB. */
978 static void
979 initiate_bb_reg_pressure_info (basic_block bb)
980 {
981 unsigned int i ATTRIBUTE_UNUSED;
982 rtx insn;
983
984 if (current_nr_blocks > 1)
985 FOR_BB_INSNS (bb, insn)
986 if (NONDEBUG_INSN_P (insn))
987 setup_ref_regs (PATTERN (insn));
988 initiate_reg_pressure_info (df_get_live_in (bb));
989 #ifdef EH_RETURN_DATA_REGNO
990 if (bb_has_eh_pred (bb))
991 for (i = 0; ; ++i)
992 {
993 unsigned int regno = EH_RETURN_DATA_REGNO (i);
994
995 if (regno == INVALID_REGNUM)
996 break;
997 if (! bitmap_bit_p (df_get_live_in (bb), regno))
998 mark_regno_birth_or_death (regno, true);
999 }
1000 #endif
1001 }
1002
1003 /* Save current register pressure related info. */
1004 static void
1005 save_reg_pressure (void)
1006 {
1007 int i;
1008
1009 for (i = 0; i < ira_pressure_classes_num; i++)
1010 saved_reg_pressure[ira_pressure_classes[i]]
1011 = curr_reg_pressure[ira_pressure_classes[i]];
1012 bitmap_copy (saved_reg_live, curr_reg_live);
1013 }
1014
1015 /* Restore saved register pressure related info. */
1016 static void
1017 restore_reg_pressure (void)
1018 {
1019 int i;
1020
1021 for (i = 0; i < ira_pressure_classes_num; i++)
1022 curr_reg_pressure[ira_pressure_classes[i]]
1023 = saved_reg_pressure[ira_pressure_classes[i]];
1024 bitmap_copy (curr_reg_live, saved_reg_live);
1025 }
1026
1027 /* Return TRUE if the register is dying after its USE. */
1028 static bool
1029 dying_use_p (struct reg_use_data *use)
1030 {
1031 struct reg_use_data *next;
1032
1033 for (next = use->next_regno_use; next != use; next = next->next_regno_use)
1034 if (NONDEBUG_INSN_P (next->insn)
1035 && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
1036 return false;
1037 return true;
1038 }
1039
1040 /* Print info about the current register pressure and its excess for
1041 each pressure class. */
1042 static void
1043 print_curr_reg_pressure (void)
1044 {
1045 int i;
1046 enum reg_class cl;
1047
1048 fprintf (sched_dump, ";;\t");
1049 for (i = 0; i < ira_pressure_classes_num; i++)
1050 {
1051 cl = ira_pressure_classes[i];
1052 gcc_assert (curr_reg_pressure[cl] >= 0);
1053 fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
1054 curr_reg_pressure[cl],
1055 curr_reg_pressure[cl] - ira_available_class_regs[cl]);
1056 }
1057 fprintf (sched_dump, "\n");
1058 }
1059
1060 /* Pointer to the last instruction scheduled. */
1061 static rtx last_scheduled_insn;
1062
1063 /* Pointer to the last nondebug instruction scheduled within the
1064 block, or the prev_head of the scheduling block. Used by
1065 rank_for_schedule, so that insns independent of the last scheduled
1066 insn will be preferred over dependent instructions. */
1067 static rtx last_nondebug_scheduled_insn;
1068
1069 /* Pointer that iterates through the list of unscheduled insns if we
1070 have a dbg_cnt enabled. It always points at an insn prior to the
1071 first unscheduled one. */
1072 static rtx nonscheduled_insns_begin;
1073
1074 /* Cached cost of the instruction. Use below function to get cost of the
1075 insn. -1 here means that the field is not initialized. */
1076 #define INSN_COST(INSN) (HID (INSN)->cost)
1077
1078 /* Compute cost of executing INSN.
1079 This is the number of cycles between instruction issue and
1080 instruction results. */
1081 int
1082 insn_cost (rtx insn)
1083 {
1084 int cost;
1085
1086 if (sel_sched_p ())
1087 {
1088 if (recog_memoized (insn) < 0)
1089 return 0;
1090
1091 cost = insn_default_latency (insn);
1092 if (cost < 0)
1093 cost = 0;
1094
1095 return cost;
1096 }
1097
1098 cost = INSN_COST (insn);
1099
1100 if (cost < 0)
1101 {
1102 /* A USE insn, or something else we don't need to
1103 understand. We can't pass these directly to
1104 result_ready_cost or insn_default_latency because it will
1105 trigger a fatal error for unrecognizable insns. */
1106 if (recog_memoized (insn) < 0)
1107 {
1108 INSN_COST (insn) = 0;
1109 return 0;
1110 }
1111 else
1112 {
1113 cost = insn_default_latency (insn);
1114 if (cost < 0)
1115 cost = 0;
1116
1117 INSN_COST (insn) = cost;
1118 }
1119 }
1120
1121 return cost;
1122 }
1123
1124 /* Compute cost of dependence LINK.
1125 This is the number of cycles between instruction issue and
1126 instruction results.
1127 ??? We also use this function to call recog_memoized on all insns. */
1128 int
1129 dep_cost_1 (dep_t link, dw_t dw)
1130 {
1131 rtx insn = DEP_PRO (link);
1132 rtx used = DEP_CON (link);
1133 int cost;
1134
1135 if (DEP_COST (link) != UNKNOWN_DEP_COST)
1136 return DEP_COST (link);
1137
1138 if (delay_htab)
1139 {
1140 struct delay_pair *delay_entry;
1141 delay_entry
1142 = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, used,
1143 htab_hash_pointer (used));
1144 if (delay_entry)
1145 {
1146 if (delay_entry->i1 == insn)
1147 {
1148 DEP_COST (link) = pair_delay (delay_entry);
1149 return DEP_COST (link);
1150 }
1151 }
1152 }
1153
1154 /* A USE insn should never require the value used to be computed.
1155 This allows the computation of a function's result and parameter
1156 values to overlap the return and call. We don't care about the
1157 dependence cost when only decreasing register pressure. */
1158 if (recog_memoized (used) < 0)
1159 {
1160 cost = 0;
1161 recog_memoized (insn);
1162 }
1163 else
1164 {
1165 enum reg_note dep_type = DEP_TYPE (link);
1166
1167 cost = insn_cost (insn);
1168
1169 if (INSN_CODE (insn) >= 0)
1170 {
1171 if (dep_type == REG_DEP_ANTI)
1172 cost = 0;
1173 else if (dep_type == REG_DEP_OUTPUT)
1174 {
1175 cost = (insn_default_latency (insn)
1176 - insn_default_latency (used));
1177 if (cost <= 0)
1178 cost = 1;
1179 }
1180 else if (bypass_p (insn))
1181 cost = insn_latency (insn, used);
1182 }
1183
1184
1185 if (targetm.sched.adjust_cost_2)
1186 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost,
1187 dw);
1188 else if (targetm.sched.adjust_cost != NULL)
1189 {
1190 /* This variable is used for backward compatibility with the
1191 targets. */
1192 rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
1193
1194 /* Make it self-cycled, so that if some tries to walk over this
1195 incomplete list he/she will be caught in an endless loop. */
1196 XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
1197
1198 /* Targets use only REG_NOTE_KIND of the link. */
1199 PUT_REG_NOTE_KIND (dep_cost_rtx_link, DEP_TYPE (link));
1200
1201 cost = targetm.sched.adjust_cost (used, dep_cost_rtx_link,
1202 insn, cost);
1203
1204 free_INSN_LIST_node (dep_cost_rtx_link);
1205 }
1206
1207 if (cost < 0)
1208 cost = 0;
1209 }
1210
1211 DEP_COST (link) = cost;
1212 return cost;
1213 }
1214
1215 /* Compute cost of dependence LINK.
1216 This is the number of cycles between instruction issue and
1217 instruction results. */
1218 int
1219 dep_cost (dep_t link)
1220 {
1221 return dep_cost_1 (link, 0);
1222 }
1223
1224 /* Use this sel-sched.c friendly function in reorder2 instead of increasing
1225 INSN_PRIORITY explicitly. */
1226 void
1227 increase_insn_priority (rtx insn, int amount)
1228 {
1229 if (!sel_sched_p ())
1230 {
1231 /* We're dealing with haifa-sched.c INSN_PRIORITY. */
1232 if (INSN_PRIORITY_KNOWN (insn))
1233 INSN_PRIORITY (insn) += amount;
1234 }
1235 else
1236 {
1237 /* In sel-sched.c INSN_PRIORITY is not kept up to date.
1238 Use EXPR_PRIORITY instead. */
1239 sel_add_to_insn_priority (insn, amount);
1240 }
1241 }
1242
1243 /* Return 'true' if DEP should be included in priority calculations. */
1244 static bool
1245 contributes_to_priority_p (dep_t dep)
1246 {
1247 if (DEBUG_INSN_P (DEP_CON (dep))
1248 || DEBUG_INSN_P (DEP_PRO (dep)))
1249 return false;
1250
1251 /* Critical path is meaningful in block boundaries only. */
1252 if (!current_sched_info->contributes_to_priority (DEP_CON (dep),
1253 DEP_PRO (dep)))
1254 return false;
1255
1256 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
1257 then speculative instructions will less likely be
1258 scheduled. That is because the priority of
1259 their producers will increase, and, thus, the
1260 producers will more likely be scheduled, thus,
1261 resolving the dependence. */
1262 if (sched_deps_info->generate_spec_deps
1263 && !(spec_info->flags & COUNT_SPEC_IN_CRITICAL_PATH)
1264 && (DEP_STATUS (dep) & SPECULATIVE))
1265 return false;
1266
1267 return true;
1268 }
1269
1270 /* Compute the number of nondebug forward deps of an insn. */
1271
1272 static int
1273 dep_list_size (rtx insn)
1274 {
1275 sd_iterator_def sd_it;
1276 dep_t dep;
1277 int dbgcount = 0, nodbgcount = 0;
1278
1279 if (!MAY_HAVE_DEBUG_INSNS)
1280 return sd_lists_size (insn, SD_LIST_FORW);
1281
1282 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1283 {
1284 if (DEBUG_INSN_P (DEP_CON (dep)))
1285 dbgcount++;
1286 else if (!DEBUG_INSN_P (DEP_PRO (dep)))
1287 nodbgcount++;
1288 }
1289
1290 gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
1291
1292 return nodbgcount;
1293 }
1294
1295 /* Compute the priority number for INSN. */
1296 static int
1297 priority (rtx insn)
1298 {
1299 if (! INSN_P (insn))
1300 return 0;
1301
1302 /* We should not be interested in priority of an already scheduled insn. */
1303 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
1304
1305 if (!INSN_PRIORITY_KNOWN (insn))
1306 {
1307 int this_priority = -1;
1308
1309 if (dep_list_size (insn) == 0)
1310 /* ??? We should set INSN_PRIORITY to insn_cost when and insn has
1311 some forward deps but all of them are ignored by
1312 contributes_to_priority hook. At the moment we set priority of
1313 such insn to 0. */
1314 this_priority = insn_cost (insn);
1315 else
1316 {
1317 rtx prev_first, twin;
1318 basic_block rec;
1319
1320 /* For recovery check instructions we calculate priority slightly
1321 different than that of normal instructions. Instead of walking
1322 through INSN_FORW_DEPS (check) list, we walk through
1323 INSN_FORW_DEPS list of each instruction in the corresponding
1324 recovery block. */
1325
1326 /* Selective scheduling does not define RECOVERY_BLOCK macro. */
1327 rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn);
1328 if (!rec || rec == EXIT_BLOCK_PTR)
1329 {
1330 prev_first = PREV_INSN (insn);
1331 twin = insn;
1332 }
1333 else
1334 {
1335 prev_first = NEXT_INSN (BB_HEAD (rec));
1336 twin = PREV_INSN (BB_END (rec));
1337 }
1338
1339 do
1340 {
1341 sd_iterator_def sd_it;
1342 dep_t dep;
1343
1344 FOR_EACH_DEP (twin, SD_LIST_FORW, sd_it, dep)
1345 {
1346 rtx next;
1347 int next_priority;
1348
1349 next = DEP_CON (dep);
1350
1351 if (BLOCK_FOR_INSN (next) != rec)
1352 {
1353 int cost;
1354
1355 if (!contributes_to_priority_p (dep))
1356 continue;
1357
1358 if (twin == insn)
1359 cost = dep_cost (dep);
1360 else
1361 {
1362 struct _dep _dep1, *dep1 = &_dep1;
1363
1364 init_dep (dep1, insn, next, REG_DEP_ANTI);
1365
1366 cost = dep_cost (dep1);
1367 }
1368
1369 next_priority = cost + priority (next);
1370
1371 if (next_priority > this_priority)
1372 this_priority = next_priority;
1373 }
1374 }
1375
1376 twin = PREV_INSN (twin);
1377 }
1378 while (twin != prev_first);
1379 }
1380
1381 if (this_priority < 0)
1382 {
1383 gcc_assert (this_priority == -1);
1384
1385 this_priority = insn_cost (insn);
1386 }
1387
1388 INSN_PRIORITY (insn) = this_priority;
1389 INSN_PRIORITY_STATUS (insn) = 1;
1390 }
1391
1392 return INSN_PRIORITY (insn);
1393 }
1394 \f
1395 /* Macros and functions for keeping the priority queue sorted, and
1396 dealing with queuing and dequeuing of instructions. */
1397
1398 #define SCHED_SORT(READY, N_READY) \
1399 do { if ((N_READY) == 2) \
1400 swap_sort (READY, N_READY); \
1401 else if ((N_READY) > 2) \
1402 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
1403 while (0)
1404
1405 /* Setup info about the current register pressure impact of scheduling
1406 INSN at the current scheduling point. */
1407 static void
1408 setup_insn_reg_pressure_info (rtx insn)
1409 {
1410 int i, change, before, after, hard_regno;
1411 int excess_cost_change;
1412 enum machine_mode mode;
1413 enum reg_class cl;
1414 struct reg_pressure_data *pressure_info;
1415 int *max_reg_pressure;
1416 struct reg_use_data *use;
1417 static int death[N_REG_CLASSES];
1418
1419 gcc_checking_assert (!DEBUG_INSN_P (insn));
1420
1421 excess_cost_change = 0;
1422 for (i = 0; i < ira_pressure_classes_num; i++)
1423 death[ira_pressure_classes[i]] = 0;
1424 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1425 if (dying_use_p (use))
1426 {
1427 cl = sched_regno_pressure_class[use->regno];
1428 if (use->regno < FIRST_PSEUDO_REGISTER)
1429 death[cl]++;
1430 else
1431 death[cl]
1432 += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
1433 }
1434 pressure_info = INSN_REG_PRESSURE (insn);
1435 max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
1436 gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
1437 for (i = 0; i < ira_pressure_classes_num; i++)
1438 {
1439 cl = ira_pressure_classes[i];
1440 gcc_assert (curr_reg_pressure[cl] >= 0);
1441 change = (int) pressure_info[i].set_increase - death[cl];
1442 before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
1443 after = MAX (0, max_reg_pressure[i] + change
1444 - ira_available_class_regs[cl]);
1445 hard_regno = ira_class_hard_regs[cl][0];
1446 gcc_assert (hard_regno >= 0);
1447 mode = reg_raw_mode[hard_regno];
1448 excess_cost_change += ((after - before)
1449 * (ira_memory_move_cost[mode][cl][0]
1450 + ira_memory_move_cost[mode][cl][1]));
1451 }
1452 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
1453 }
1454
1455 /* Returns a positive value if x is preferred; returns a negative value if
1456 y is preferred. Should never return 0, since that will make the sort
1457 unstable. */
1458
1459 static int
1460 rank_for_schedule (const void *x, const void *y)
1461 {
1462 rtx tmp = *(const rtx *) y;
1463 rtx tmp2 = *(const rtx *) x;
1464 int tmp_class, tmp2_class;
1465 int val, priority_val, info_val;
1466
1467 if (MAY_HAVE_DEBUG_INSNS)
1468 {
1469 /* Schedule debug insns as early as possible. */
1470 if (DEBUG_INSN_P (tmp) && !DEBUG_INSN_P (tmp2))
1471 return -1;
1472 else if (DEBUG_INSN_P (tmp2))
1473 return 1;
1474 }
1475
1476 /* The insn in a schedule group should be issued the first. */
1477 if (flag_sched_group_heuristic &&
1478 SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
1479 return SCHED_GROUP_P (tmp2) ? 1 : -1;
1480
1481 /* Make sure that priority of TMP and TMP2 are initialized. */
1482 gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
1483
1484 if (sched_pressure_p)
1485 {
1486 int diff;
1487
1488 /* Prefer insn whose scheduling results in the smallest register
1489 pressure excess. */
1490 if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
1491 + (INSN_TICK (tmp) > clock_var
1492 ? INSN_TICK (tmp) - clock_var : 0)
1493 - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
1494 - (INSN_TICK (tmp2) > clock_var
1495 ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
1496 return diff;
1497 }
1498
1499
1500 if (sched_pressure_p
1501 && (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
1502 {
1503 if (INSN_TICK (tmp) <= clock_var)
1504 return -1;
1505 else if (INSN_TICK (tmp2) <= clock_var)
1506 return 1;
1507 else
1508 return INSN_TICK (tmp) - INSN_TICK (tmp2);
1509 }
1510
1511 /* If we are doing backtracking in this schedule, prefer insns that
1512 have forward dependencies with negative cost against an insn that
1513 was already scheduled. */
1514 if (current_sched_info->flags & DO_BACKTRACKING)
1515 {
1516 priority_val = FEEDS_BACKTRACK_INSN (tmp2) - FEEDS_BACKTRACK_INSN (tmp);
1517 if (priority_val)
1518 return priority_val;
1519 }
1520
1521 /* Prefer insn with higher priority. */
1522 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
1523
1524 if (flag_sched_critical_path_heuristic && priority_val)
1525 return priority_val;
1526
1527 /* Prefer speculative insn with greater dependencies weakness. */
1528 if (flag_sched_spec_insn_heuristic && spec_info)
1529 {
1530 ds_t ds1, ds2;
1531 dw_t dw1, dw2;
1532 int dw;
1533
1534 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
1535 if (ds1)
1536 dw1 = ds_weak (ds1);
1537 else
1538 dw1 = NO_DEP_WEAK;
1539
1540 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
1541 if (ds2)
1542 dw2 = ds_weak (ds2);
1543 else
1544 dw2 = NO_DEP_WEAK;
1545
1546 dw = dw2 - dw1;
1547 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
1548 return dw;
1549 }
1550
1551 info_val = (*current_sched_info->rank) (tmp, tmp2);
1552 if(flag_sched_rank_heuristic && info_val)
1553 return info_val;
1554
1555 /* Compare insns based on their relation to the last scheduled
1556 non-debug insn. */
1557 if (flag_sched_last_insn_heuristic && last_nondebug_scheduled_insn)
1558 {
1559 dep_t dep1;
1560 dep_t dep2;
1561 rtx last = last_nondebug_scheduled_insn;
1562
1563 /* Classify the instructions into three classes:
1564 1) Data dependent on last schedule insn.
1565 2) Anti/Output dependent on last scheduled insn.
1566 3) Independent of last scheduled insn, or has latency of one.
1567 Choose the insn from the highest numbered class if different. */
1568 dep1 = sd_find_dep_between (last, tmp, true);
1569
1570 if (dep1 == NULL || dep_cost (dep1) == 1)
1571 tmp_class = 3;
1572 else if (/* Data dependence. */
1573 DEP_TYPE (dep1) == REG_DEP_TRUE)
1574 tmp_class = 1;
1575 else
1576 tmp_class = 2;
1577
1578 dep2 = sd_find_dep_between (last, tmp2, true);
1579
1580 if (dep2 == NULL || dep_cost (dep2) == 1)
1581 tmp2_class = 3;
1582 else if (/* Data dependence. */
1583 DEP_TYPE (dep2) == REG_DEP_TRUE)
1584 tmp2_class = 1;
1585 else
1586 tmp2_class = 2;
1587
1588 if ((val = tmp2_class - tmp_class))
1589 return val;
1590 }
1591
1592 /* Prefer the insn which has more later insns that depend on it.
1593 This gives the scheduler more freedom when scheduling later
1594 instructions at the expense of added register pressure. */
1595
1596 val = (dep_list_size (tmp2) - dep_list_size (tmp));
1597
1598 if (flag_sched_dep_count_heuristic && val != 0)
1599 return val;
1600
1601 /* If insns are equally good, sort by INSN_LUID (original insn order),
1602 so that we make the sort stable. This minimizes instruction movement,
1603 thus minimizing sched's effect on debugging and cross-jumping. */
1604 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1605 }
1606
1607 /* Resort the array A in which only element at index N may be out of order. */
1608
1609 HAIFA_INLINE static void
1610 swap_sort (rtx *a, int n)
1611 {
1612 rtx insn = a[n - 1];
1613 int i = n - 2;
1614
1615 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1616 {
1617 a[i + 1] = a[i];
1618 i -= 1;
1619 }
1620 a[i + 1] = insn;
1621 }
1622
1623 /* Add INSN to the insn queue so that it can be executed at least
1624 N_CYCLES after the currently executing insn. Preserve insns
1625 chain for debugging purposes. REASON will be printed in debugging
1626 output. */
1627
1628 HAIFA_INLINE static void
1629 queue_insn (rtx insn, int n_cycles, const char *reason)
1630 {
1631 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1632 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1633 int new_tick;
1634
1635 gcc_assert (n_cycles <= max_insn_queue_index);
1636 gcc_assert (!DEBUG_INSN_P (insn));
1637
1638 insn_queue[next_q] = link;
1639 q_size += 1;
1640
1641 if (sched_verbose >= 2)
1642 {
1643 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1644 (*current_sched_info->print_insn) (insn, 0));
1645
1646 fprintf (sched_dump, "queued for %d cycles (%s).\n", n_cycles, reason);
1647 }
1648
1649 QUEUE_INDEX (insn) = next_q;
1650
1651 if (current_sched_info->flags & DO_BACKTRACKING)
1652 {
1653 new_tick = clock_var + n_cycles;
1654 if (INSN_TICK (insn) == INVALID_TICK || INSN_TICK (insn) < new_tick)
1655 INSN_TICK (insn) = new_tick;
1656
1657 if (INSN_EXACT_TICK (insn) != INVALID_TICK
1658 && INSN_EXACT_TICK (insn) < clock_var + n_cycles)
1659 {
1660 must_backtrack = true;
1661 if (sched_verbose >= 2)
1662 fprintf (sched_dump, ";;\t\tcausing a backtrack.\n");
1663 }
1664 }
1665 }
1666
1667 /* Remove INSN from queue. */
1668 static void
1669 queue_remove (rtx insn)
1670 {
1671 gcc_assert (QUEUE_INDEX (insn) >= 0);
1672 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
1673 q_size--;
1674 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
1675 }
1676
1677 /* Return a pointer to the bottom of the ready list, i.e. the insn
1678 with the lowest priority. */
1679
1680 rtx *
1681 ready_lastpos (struct ready_list *ready)
1682 {
1683 gcc_assert (ready->n_ready >= 1);
1684 return ready->vec + ready->first - ready->n_ready + 1;
1685 }
1686
1687 /* Add an element INSN to the ready list so that it ends up with the
1688 lowest/highest priority depending on FIRST_P. */
1689
1690 HAIFA_INLINE static void
1691 ready_add (struct ready_list *ready, rtx insn, bool first_p)
1692 {
1693 if (!first_p)
1694 {
1695 if (ready->first == ready->n_ready)
1696 {
1697 memmove (ready->vec + ready->veclen - ready->n_ready,
1698 ready_lastpos (ready),
1699 ready->n_ready * sizeof (rtx));
1700 ready->first = ready->veclen - 1;
1701 }
1702 ready->vec[ready->first - ready->n_ready] = insn;
1703 }
1704 else
1705 {
1706 if (ready->first == ready->veclen - 1)
1707 {
1708 if (ready->n_ready)
1709 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1710 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1711 ready_lastpos (ready),
1712 ready->n_ready * sizeof (rtx));
1713 ready->first = ready->veclen - 2;
1714 }
1715 ready->vec[++(ready->first)] = insn;
1716 }
1717
1718 ready->n_ready++;
1719 if (DEBUG_INSN_P (insn))
1720 ready->n_debug++;
1721
1722 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1723 QUEUE_INDEX (insn) = QUEUE_READY;
1724
1725 if (INSN_EXACT_TICK (insn) != INVALID_TICK
1726 && INSN_EXACT_TICK (insn) < clock_var)
1727 {
1728 must_backtrack = true;
1729 }
1730 }
1731
1732 /* Remove the element with the highest priority from the ready list and
1733 return it. */
1734
1735 HAIFA_INLINE static rtx
1736 ready_remove_first (struct ready_list *ready)
1737 {
1738 rtx t;
1739
1740 gcc_assert (ready->n_ready);
1741 t = ready->vec[ready->first--];
1742 ready->n_ready--;
1743 if (DEBUG_INSN_P (t))
1744 ready->n_debug--;
1745 /* If the queue becomes empty, reset it. */
1746 if (ready->n_ready == 0)
1747 ready->first = ready->veclen - 1;
1748
1749 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1750 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1751
1752 return t;
1753 }
1754
1755 /* The following code implements multi-pass scheduling for the first
1756 cycle. In other words, we will try to choose ready insn which
1757 permits to start maximum number of insns on the same cycle. */
1758
1759 /* Return a pointer to the element INDEX from the ready. INDEX for
1760 insn with the highest priority is 0, and the lowest priority has
1761 N_READY - 1. */
1762
1763 rtx
1764 ready_element (struct ready_list *ready, int index)
1765 {
1766 gcc_assert (ready->n_ready && index < ready->n_ready);
1767
1768 return ready->vec[ready->first - index];
1769 }
1770
1771 /* Remove the element INDEX from the ready list and return it. INDEX
1772 for insn with the highest priority is 0, and the lowest priority
1773 has N_READY - 1. */
1774
1775 HAIFA_INLINE static rtx
1776 ready_remove (struct ready_list *ready, int index)
1777 {
1778 rtx t;
1779 int i;
1780
1781 if (index == 0)
1782 return ready_remove_first (ready);
1783 gcc_assert (ready->n_ready && index < ready->n_ready);
1784 t = ready->vec[ready->first - index];
1785 ready->n_ready--;
1786 if (DEBUG_INSN_P (t))
1787 ready->n_debug--;
1788 for (i = index; i < ready->n_ready; i++)
1789 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1790 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1791 return t;
1792 }
1793
1794 /* Remove INSN from the ready list. */
1795 static void
1796 ready_remove_insn (rtx insn)
1797 {
1798 int i;
1799
1800 for (i = 0; i < readyp->n_ready; i++)
1801 if (ready_element (readyp, i) == insn)
1802 {
1803 ready_remove (readyp, i);
1804 return;
1805 }
1806 gcc_unreachable ();
1807 }
1808
1809 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1810 macro. */
1811
1812 void
1813 ready_sort (struct ready_list *ready)
1814 {
1815 int i;
1816 rtx *first = ready_lastpos (ready);
1817
1818 if (sched_pressure_p)
1819 {
1820 for (i = 0; i < ready->n_ready; i++)
1821 if (!DEBUG_INSN_P (first[i]))
1822 setup_insn_reg_pressure_info (first[i]);
1823 }
1824 SCHED_SORT (first, ready->n_ready);
1825 }
1826
1827 /* PREV is an insn that is ready to execute. Adjust its priority if that
1828 will help shorten or lengthen register lifetimes as appropriate. Also
1829 provide a hook for the target to tweak itself. */
1830
1831 HAIFA_INLINE static void
1832 adjust_priority (rtx prev)
1833 {
1834 /* ??? There used to be code here to try and estimate how an insn
1835 affected register lifetimes, but it did it by looking at REG_DEAD
1836 notes, which we removed in schedule_region. Nor did it try to
1837 take into account register pressure or anything useful like that.
1838
1839 Revisit when we have a machine model to work with and not before. */
1840
1841 if (targetm.sched.adjust_priority)
1842 INSN_PRIORITY (prev) =
1843 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1844 }
1845
1846 /* Advance DFA state STATE on one cycle. */
1847 void
1848 advance_state (state_t state)
1849 {
1850 if (targetm.sched.dfa_pre_advance_cycle)
1851 targetm.sched.dfa_pre_advance_cycle ();
1852
1853 if (targetm.sched.dfa_pre_cycle_insn)
1854 state_transition (state,
1855 targetm.sched.dfa_pre_cycle_insn ());
1856
1857 state_transition (state, NULL);
1858
1859 if (targetm.sched.dfa_post_cycle_insn)
1860 state_transition (state,
1861 targetm.sched.dfa_post_cycle_insn ());
1862
1863 if (targetm.sched.dfa_post_advance_cycle)
1864 targetm.sched.dfa_post_advance_cycle ();
1865 }
1866
1867 /* Advance time on one cycle. */
1868 HAIFA_INLINE static void
1869 advance_one_cycle (void)
1870 {
1871 advance_state (curr_state);
1872 if (sched_verbose >= 6)
1873 fprintf (sched_dump, ";;\tAdvanced a state.\n");
1874 }
1875
1876 /* Update register pressure after scheduling INSN. */
1877 static void
1878 update_register_pressure (rtx insn)
1879 {
1880 struct reg_use_data *use;
1881 struct reg_set_data *set;
1882
1883 gcc_checking_assert (!DEBUG_INSN_P (insn));
1884
1885 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1886 if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
1887 mark_regno_birth_or_death (use->regno, false);
1888 for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
1889 mark_regno_birth_or_death (set->regno, true);
1890 }
1891
1892 /* Set up or update (if UPDATE_P) max register pressure (see its
1893 meaning in sched-int.h::_haifa_insn_data) for all current BB insns
1894 after insn AFTER. */
1895 static void
1896 setup_insn_max_reg_pressure (rtx after, bool update_p)
1897 {
1898 int i, p;
1899 bool eq_p;
1900 rtx insn;
1901 static int max_reg_pressure[N_REG_CLASSES];
1902
1903 save_reg_pressure ();
1904 for (i = 0; i < ira_pressure_classes_num; i++)
1905 max_reg_pressure[ira_pressure_classes[i]]
1906 = curr_reg_pressure[ira_pressure_classes[i]];
1907 for (insn = NEXT_INSN (after);
1908 insn != NULL_RTX && ! BARRIER_P (insn)
1909 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
1910 insn = NEXT_INSN (insn))
1911 if (NONDEBUG_INSN_P (insn))
1912 {
1913 eq_p = true;
1914 for (i = 0; i < ira_pressure_classes_num; i++)
1915 {
1916 p = max_reg_pressure[ira_pressure_classes[i]];
1917 if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
1918 {
1919 eq_p = false;
1920 INSN_MAX_REG_PRESSURE (insn)[i]
1921 = max_reg_pressure[ira_pressure_classes[i]];
1922 }
1923 }
1924 if (update_p && eq_p)
1925 break;
1926 update_register_pressure (insn);
1927 for (i = 0; i < ira_pressure_classes_num; i++)
1928 if (max_reg_pressure[ira_pressure_classes[i]]
1929 < curr_reg_pressure[ira_pressure_classes[i]])
1930 max_reg_pressure[ira_pressure_classes[i]]
1931 = curr_reg_pressure[ira_pressure_classes[i]];
1932 }
1933 restore_reg_pressure ();
1934 }
1935
1936 /* Update the current register pressure after scheduling INSN. Update
1937 also max register pressure for unscheduled insns of the current
1938 BB. */
1939 static void
1940 update_reg_and_insn_max_reg_pressure (rtx insn)
1941 {
1942 int i;
1943 int before[N_REG_CLASSES];
1944
1945 for (i = 0; i < ira_pressure_classes_num; i++)
1946 before[i] = curr_reg_pressure[ira_pressure_classes[i]];
1947 update_register_pressure (insn);
1948 for (i = 0; i < ira_pressure_classes_num; i++)
1949 if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
1950 break;
1951 if (i < ira_pressure_classes_num)
1952 setup_insn_max_reg_pressure (insn, true);
1953 }
1954
1955 /* Set up register pressure at the beginning of basic block BB whose
1956 insns starting after insn AFTER. Set up also max register pressure
1957 for all insns of the basic block. */
1958 void
1959 sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
1960 {
1961 gcc_assert (sched_pressure_p);
1962 initiate_bb_reg_pressure_info (bb);
1963 setup_insn_max_reg_pressure (after, false);
1964 }
1965 \f
1966 /* A structure that holds local state for the loop in schedule_block. */
1967 struct sched_block_state
1968 {
1969 /* True if no real insns have been scheduled in the current cycle. */
1970 bool first_cycle_insn_p;
1971 /* True if a shadow insn has been scheduled in the current cycle, which
1972 means that no more normal insns can be issued. */
1973 bool shadows_only_p;
1974 /* True if we're winding down a modulo schedule, which means that we only
1975 issue insns with INSN_EXACT_TICK set. */
1976 bool modulo_epilogue;
1977 /* Initialized with the machine's issue rate every cycle, and updated
1978 by calls to the variable_issue hook. */
1979 int can_issue_more;
1980 };
1981
1982 /* INSN is the "currently executing insn". Launch each insn which was
1983 waiting on INSN. READY is the ready list which contains the insns
1984 that are ready to fire. CLOCK is the current cycle. The function
1985 returns necessary cycle advance after issuing the insn (it is not
1986 zero for insns in a schedule group). */
1987
1988 static int
1989 schedule_insn (rtx insn)
1990 {
1991 sd_iterator_def sd_it;
1992 dep_t dep;
1993 int i;
1994 int advance = 0;
1995
1996 if (sched_verbose >= 1)
1997 {
1998 struct reg_pressure_data *pressure_info;
1999 char buf[2048];
2000
2001 print_insn (buf, insn, 0);
2002 buf[40] = 0;
2003 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
2004
2005 if (recog_memoized (insn) < 0)
2006 fprintf (sched_dump, "nothing");
2007 else
2008 print_reservation (sched_dump, insn);
2009 pressure_info = INSN_REG_PRESSURE (insn);
2010 if (pressure_info != NULL)
2011 {
2012 fputc (':', sched_dump);
2013 for (i = 0; i < ira_pressure_classes_num; i++)
2014 fprintf (sched_dump, "%s%+d(%d)",
2015 reg_class_names[ira_pressure_classes[i]],
2016 pressure_info[i].set_increase, pressure_info[i].change);
2017 }
2018 fputc ('\n', sched_dump);
2019 }
2020
2021 if (sched_pressure_p && !DEBUG_INSN_P (insn))
2022 update_reg_and_insn_max_reg_pressure (insn);
2023
2024 /* Scheduling instruction should have all its dependencies resolved and
2025 should have been removed from the ready list. */
2026 gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
2027
2028 /* Reset debug insns invalidated by moving this insn. */
2029 if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
2030 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
2031 sd_iterator_cond (&sd_it, &dep);)
2032 {
2033 rtx dbg = DEP_PRO (dep);
2034 struct reg_use_data *use, *next;
2035
2036 gcc_assert (DEBUG_INSN_P (dbg));
2037
2038 if (sched_verbose >= 6)
2039 fprintf (sched_dump, ";;\t\tresetting: debug insn %d\n",
2040 INSN_UID (dbg));
2041
2042 /* ??? Rather than resetting the debug insn, we might be able
2043 to emit a debug temp before the just-scheduled insn, but
2044 this would involve checking that the expression at the
2045 point of the debug insn is equivalent to the expression
2046 before the just-scheduled insn. They might not be: the
2047 expression in the debug insn may depend on other insns not
2048 yet scheduled that set MEMs, REGs or even other debug
2049 insns. It's not clear that attempting to preserve debug
2050 information in these cases is worth the effort, given how
2051 uncommon these resets are and the likelihood that the debug
2052 temps introduced won't survive the schedule change. */
2053 INSN_VAR_LOCATION_LOC (dbg) = gen_rtx_UNKNOWN_VAR_LOC ();
2054 df_insn_rescan (dbg);
2055
2056 /* Unknown location doesn't use any registers. */
2057 for (use = INSN_REG_USE_LIST (dbg); use != NULL; use = next)
2058 {
2059 struct reg_use_data *prev = use;
2060
2061 /* Remove use from the cyclic next_regno_use chain first. */
2062 while (prev->next_regno_use != use)
2063 prev = prev->next_regno_use;
2064 prev->next_regno_use = use->next_regno_use;
2065 next = use->next_insn_use;
2066 free (use);
2067 }
2068 INSN_REG_USE_LIST (dbg) = NULL;
2069
2070 /* We delete rather than resolve these deps, otherwise we
2071 crash in sched_free_deps(), because forward deps are
2072 expected to be released before backward deps. */
2073 sd_delete_dep (sd_it);
2074 }
2075
2076 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
2077 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
2078
2079 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
2080 if (INSN_TICK (insn) > clock_var)
2081 /* INSN has been prematurely moved from the queue to the ready list.
2082 This is possible only if following flag is set. */
2083 gcc_assert (flag_sched_stalled_insns);
2084
2085 /* ??? Probably, if INSN is scheduled prematurely, we should leave
2086 INSN_TICK untouched. This is a machine-dependent issue, actually. */
2087 INSN_TICK (insn) = clock_var;
2088
2089 /* Update dependent instructions. */
2090 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
2091 sd_iterator_cond (&sd_it, &dep);)
2092 {
2093 rtx next = DEP_CON (dep);
2094
2095 /* Resolve the dependence between INSN and NEXT.
2096 sd_resolve_dep () moves current dep to another list thus
2097 advancing the iterator. */
2098 sd_resolve_dep (sd_it);
2099
2100 /* Don't bother trying to mark next as ready if insn is a debug
2101 insn. If insn is the last hard dependency, it will have
2102 already been discounted. */
2103 if (DEBUG_INSN_P (insn) && !DEBUG_INSN_P (next))
2104 continue;
2105
2106 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
2107 {
2108 int effective_cost;
2109
2110 effective_cost = try_ready (next);
2111
2112 if (effective_cost >= 0
2113 && SCHED_GROUP_P (next)
2114 && advance < effective_cost)
2115 advance = effective_cost;
2116 }
2117 else
2118 /* Check always has only one forward dependence (to the first insn in
2119 the recovery block), therefore, this will be executed only once. */
2120 {
2121 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
2122 fix_recovery_deps (RECOVERY_BLOCK (insn));
2123 }
2124 }
2125
2126 /* Annotate the instruction with issue information -- TImode
2127 indicates that the instruction is expected not to be able
2128 to issue on the same cycle as the previous insn. A machine
2129 may use this information to decide how the instruction should
2130 be aligned. */
2131 if (issue_rate > 1
2132 && GET_CODE (PATTERN (insn)) != USE
2133 && GET_CODE (PATTERN (insn)) != CLOBBER
2134 && !DEBUG_INSN_P (insn))
2135 {
2136 if (reload_completed)
2137 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
2138 last_clock_var = clock_var;
2139 }
2140
2141 return advance;
2142 }
2143
2144 /* Functions for handling of notes. */
2145
2146 /* Add note list that ends on FROM_END to the end of TO_ENDP. */
2147 void
2148 concat_note_lists (rtx from_end, rtx *to_endp)
2149 {
2150 rtx from_start;
2151
2152 /* It's easy when have nothing to concat. */
2153 if (from_end == NULL)
2154 return;
2155
2156 /* It's also easy when destination is empty. */
2157 if (*to_endp == NULL)
2158 {
2159 *to_endp = from_end;
2160 return;
2161 }
2162
2163 from_start = from_end;
2164 while (PREV_INSN (from_start) != NULL)
2165 from_start = PREV_INSN (from_start);
2166
2167 PREV_INSN (from_start) = *to_endp;
2168 NEXT_INSN (*to_endp) = from_start;
2169 *to_endp = from_end;
2170 }
2171
2172 /* Delete notes between HEAD and TAIL and put them in the chain
2173 of notes ended by NOTE_LIST. */
2174 void
2175 remove_notes (rtx head, rtx tail)
2176 {
2177 rtx next_tail, insn, next;
2178
2179 note_list = 0;
2180 if (head == tail && !INSN_P (head))
2181 return;
2182
2183 next_tail = NEXT_INSN (tail);
2184 for (insn = head; insn != next_tail; insn = next)
2185 {
2186 next = NEXT_INSN (insn);
2187 if (!NOTE_P (insn))
2188 continue;
2189
2190 switch (NOTE_KIND (insn))
2191 {
2192 case NOTE_INSN_BASIC_BLOCK:
2193 continue;
2194
2195 case NOTE_INSN_EPILOGUE_BEG:
2196 if (insn != tail)
2197 {
2198 remove_insn (insn);
2199 add_reg_note (next, REG_SAVE_NOTE,
2200 GEN_INT (NOTE_INSN_EPILOGUE_BEG));
2201 break;
2202 }
2203 /* FALLTHRU */
2204
2205 default:
2206 remove_insn (insn);
2207
2208 /* Add the note to list that ends at NOTE_LIST. */
2209 PREV_INSN (insn) = note_list;
2210 NEXT_INSN (insn) = NULL_RTX;
2211 if (note_list)
2212 NEXT_INSN (note_list) = insn;
2213 note_list = insn;
2214 break;
2215 }
2216
2217 gcc_assert ((sel_sched_p () || insn != tail) && insn != head);
2218 }
2219 }
2220
2221 /* A structure to record enough data to allow us to backtrack the scheduler to
2222 a previous state. */
2223 struct haifa_saved_data
2224 {
2225 /* Next entry on the list. */
2226 struct haifa_saved_data *next;
2227
2228 /* Backtracking is associated with scheduling insns that have delay slots.
2229 DELAY_PAIR points to the structure that contains the insns involved, and
2230 the number of cycles between them. */
2231 struct delay_pair *delay_pair;
2232
2233 /* Data used by the frontend (e.g. sched-ebb or sched-rgn). */
2234 void *fe_saved_data;
2235 /* Data used by the backend. */
2236 void *be_saved_data;
2237
2238 /* Copies of global state. */
2239 int clock_var, last_clock_var;
2240 struct ready_list ready;
2241 state_t curr_state;
2242
2243 rtx last_scheduled_insn;
2244 rtx last_nondebug_scheduled_insn;
2245 int cycle_issued_insns;
2246
2247 /* Copies of state used in the inner loop of schedule_block. */
2248 struct sched_block_state sched_block;
2249
2250 /* We don't need to save q_ptr, as its value is arbitrary and we can set it
2251 to 0 when restoring. */
2252 int q_size;
2253 rtx *insn_queue;
2254 };
2255
2256 /* A record, in reverse order, of all scheduled insns which have delay slots
2257 and may require backtracking. */
2258 static struct haifa_saved_data *backtrack_queue;
2259
2260 /* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
2261 to SET_P. */
2262 static void
2263 mark_backtrack_feeds (rtx insn, int set_p)
2264 {
2265 sd_iterator_def sd_it;
2266 dep_t dep;
2267 FOR_EACH_DEP (insn, SD_LIST_HARD_BACK, sd_it, dep)
2268 {
2269 FEEDS_BACKTRACK_INSN (DEP_PRO (dep)) = set_p;
2270 }
2271 }
2272
2273 /* Make a copy of the INSN_LIST list LINK and return it. */
2274 static rtx
2275 copy_insn_list (rtx link)
2276 {
2277 rtx new_queue;
2278 rtx *pqueue = &new_queue;
2279
2280 for (; link; link = XEXP (link, 1))
2281 {
2282 rtx x = XEXP (link, 0);
2283 rtx newlink = alloc_INSN_LIST (x, NULL);
2284 *pqueue = newlink;
2285 pqueue = &XEXP (newlink, 1);
2286 }
2287 *pqueue = NULL_RTX;
2288 return new_queue;
2289 }
2290
2291 /* Save the current scheduler state so that we can backtrack to it
2292 later if necessary. PAIR gives the insns that make it necessary to
2293 save this point. SCHED_BLOCK is the local state of schedule_block
2294 that need to be saved. */
2295 static void
2296 save_backtrack_point (struct delay_pair *pair,
2297 struct sched_block_state sched_block)
2298 {
2299 int i;
2300 struct haifa_saved_data *save = XNEW (struct haifa_saved_data);
2301
2302 save->curr_state = xmalloc (dfa_state_size);
2303 memcpy (save->curr_state, curr_state, dfa_state_size);
2304
2305 save->ready.first = ready.first;
2306 save->ready.n_ready = ready.n_ready;
2307 save->ready.n_debug = ready.n_debug;
2308 save->ready.veclen = ready.veclen;
2309 save->ready.vec = XNEWVEC (rtx, ready.veclen);
2310 memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
2311
2312 save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
2313 save->q_size = q_size;
2314 for (i = 0; i <= max_insn_queue_index; i++)
2315 {
2316 int q = NEXT_Q_AFTER (q_ptr, i);
2317 save->insn_queue[i] = copy_insn_list (insn_queue[q]);
2318 }
2319
2320 save->clock_var = clock_var;
2321 save->last_clock_var = last_clock_var;
2322 save->cycle_issued_insns = cycle_issued_insns;
2323 save->last_scheduled_insn = last_scheduled_insn;
2324 save->last_nondebug_scheduled_insn = last_nondebug_scheduled_insn;
2325
2326 save->sched_block = sched_block;
2327
2328 if (current_sched_info->save_state)
2329 save->fe_saved_data = (*current_sched_info->save_state) ();
2330
2331 if (targetm.sched.alloc_sched_context)
2332 {
2333 save->be_saved_data = targetm.sched.alloc_sched_context ();
2334 targetm.sched.init_sched_context (save->be_saved_data, false);
2335 }
2336 else
2337 save->be_saved_data = NULL;
2338
2339 save->delay_pair = pair;
2340
2341 save->next = backtrack_queue;
2342 backtrack_queue = save;
2343
2344 while (pair)
2345 {
2346 mark_backtrack_feeds (pair->i2, 1);
2347 INSN_TICK (pair->i2) = INVALID_TICK;
2348 INSN_EXACT_TICK (pair->i2) = clock_var + pair_delay (pair);
2349 SHADOW_P (pair->i2) = pair->stages == 0;
2350 pair = pair->next_same_i1;
2351 }
2352 }
2353
2354 /* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
2355 Restore their dependencies to an unresolved state, and mark them as
2356 queued nowhere. */
2357
2358 static void
2359 unschedule_insns_until (rtx insn)
2360 {
2361 for (;;)
2362 {
2363 rtx last;
2364 sd_iterator_def sd_it;
2365 dep_t dep;
2366
2367 last = VEC_pop (rtx, scheduled_insns);
2368
2369 /* This will be changed by restore_backtrack_point if the insn is in
2370 any queue. */
2371 QUEUE_INDEX (last) = QUEUE_NOWHERE;
2372 if (last != insn)
2373 INSN_TICK (last) = INVALID_TICK;
2374
2375 if (modulo_ii > 0 && INSN_UID (last) < modulo_iter0_max_uid)
2376 modulo_insns_scheduled--;
2377
2378 for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
2379 sd_iterator_cond (&sd_it, &dep);)
2380 {
2381 rtx con = DEP_CON (dep);
2382 TODO_SPEC (con) = HARD_DEP;
2383 INSN_TICK (con) = INVALID_TICK;
2384 sd_unresolve_dep (sd_it);
2385 }
2386
2387 if (last == insn)
2388 break;
2389 }
2390 }
2391
2392 /* Restore scheduler state from the topmost entry on the backtracking queue.
2393 PSCHED_BLOCK_P points to the local data of schedule_block that we must
2394 overwrite with the saved data.
2395 The caller must already have called unschedule_insns_until. */
2396
2397 static void
2398 restore_last_backtrack_point (struct sched_block_state *psched_block)
2399
2400 {
2401 rtx link;
2402 int i;
2403 struct haifa_saved_data *save = backtrack_queue;
2404
2405 backtrack_queue = save->next;
2406
2407 if (current_sched_info->restore_state)
2408 (*current_sched_info->restore_state) (save->fe_saved_data);
2409
2410 if (targetm.sched.alloc_sched_context)
2411 {
2412 targetm.sched.set_sched_context (save->be_saved_data);
2413 targetm.sched.free_sched_context (save->be_saved_data);
2414 }
2415
2416 /* Clear the QUEUE_INDEX of everything in the ready list or one
2417 of the queues. */
2418 if (ready.n_ready > 0)
2419 {
2420 rtx *first = ready_lastpos (&ready);
2421 for (i = 0; i < ready.n_ready; i++)
2422 {
2423 QUEUE_INDEX (first[i]) = QUEUE_NOWHERE;
2424 INSN_TICK (first[i]) = INVALID_TICK;
2425 }
2426 }
2427 for (i = 0; i <= max_insn_queue_index; i++)
2428 {
2429 int q = NEXT_Q_AFTER (q_ptr, i);
2430
2431 for (link = insn_queue[q]; link; link = XEXP (link, 1))
2432 {
2433 rtx x = XEXP (link, 0);
2434 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2435 INSN_TICK (x) = INVALID_TICK;
2436 }
2437 free_INSN_LIST_list (&insn_queue[q]);
2438 }
2439
2440 free (ready.vec);
2441 ready = save->ready;
2442
2443 if (ready.n_ready > 0)
2444 {
2445 rtx *first = ready_lastpos (&ready);
2446 for (i = 0; i < ready.n_ready; i++)
2447 {
2448 QUEUE_INDEX (first[i]) = QUEUE_READY;
2449 INSN_TICK (first[i]) = save->clock_var;
2450 }
2451 }
2452
2453 q_ptr = 0;
2454 q_size = save->q_size;
2455 for (i = 0; i <= max_insn_queue_index; i++)
2456 {
2457 int q = NEXT_Q_AFTER (q_ptr, i);
2458
2459 insn_queue[q] = save->insn_queue[q];
2460
2461 for (link = insn_queue[q]; link; link = XEXP (link, 1))
2462 {
2463 rtx x = XEXP (link, 0);
2464 QUEUE_INDEX (x) = i;
2465 INSN_TICK (x) = save->clock_var + i;
2466 }
2467 }
2468 free (save->insn_queue);
2469
2470 clock_var = save->clock_var;
2471 last_clock_var = save->last_clock_var;
2472 cycle_issued_insns = save->cycle_issued_insns;
2473 last_scheduled_insn = save->last_scheduled_insn;
2474 last_nondebug_scheduled_insn = save->last_nondebug_scheduled_insn;
2475
2476 *psched_block = save->sched_block;
2477
2478 memcpy (curr_state, save->curr_state, dfa_state_size);
2479 free (save->curr_state);
2480
2481 mark_backtrack_feeds (save->delay_pair->i2, 0);
2482
2483 free (save);
2484
2485 for (save = backtrack_queue; save; save = save->next)
2486 {
2487 mark_backtrack_feeds (save->delay_pair->i2, 1);
2488 }
2489 }
2490
2491 /* Discard all data associated with the topmost entry in the backtrack
2492 queue. If RESET_TICK is false, we just want to free the data. If true,
2493 we are doing this because we discovered a reason to backtrack. In the
2494 latter case, also reset the INSN_TICK for the shadow insn. */
2495 static void
2496 free_topmost_backtrack_point (bool reset_tick)
2497 {
2498 struct haifa_saved_data *save = backtrack_queue;
2499 int i;
2500
2501 backtrack_queue = save->next;
2502
2503 if (reset_tick)
2504 {
2505 struct delay_pair *pair = save->delay_pair;
2506 while (pair)
2507 {
2508 INSN_TICK (pair->i2) = INVALID_TICK;
2509 INSN_EXACT_TICK (pair->i2) = INVALID_TICK;
2510 pair = pair->next_same_i1;
2511 }
2512 }
2513 if (targetm.sched.free_sched_context)
2514 targetm.sched.free_sched_context (save->be_saved_data);
2515 if (current_sched_info->restore_state)
2516 free (save->fe_saved_data);
2517 for (i = 0; i <= max_insn_queue_index; i++)
2518 free_INSN_LIST_list (&save->insn_queue[i]);
2519 free (save->insn_queue);
2520 free (save->curr_state);
2521 free (save->ready.vec);
2522 free (save);
2523 }
2524
2525 /* Free the entire backtrack queue. */
2526 static void
2527 free_backtrack_queue (void)
2528 {
2529 while (backtrack_queue)
2530 free_topmost_backtrack_point (false);
2531 }
2532
2533 /* Compute INSN_TICK_ESTIMATE for INSN. PROCESSED is a bitmap of
2534 instructions we've previously encountered, a set bit prevents
2535 recursion. BUDGET is a limit on how far ahead we look, it is
2536 reduced on recursive calls. Return true if we produced a good
2537 estimate, or false if we exceeded the budget. */
2538 static bool
2539 estimate_insn_tick (bitmap processed, rtx insn, int budget)
2540 {
2541 sd_iterator_def sd_it;
2542 dep_t dep;
2543 int earliest = INSN_TICK (insn);
2544
2545 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
2546 {
2547 rtx pro = DEP_PRO (dep);
2548 int t;
2549
2550 if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
2551 gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
2552 else
2553 {
2554 int cost = dep_cost (dep);
2555 if (cost >= budget)
2556 return false;
2557 if (!bitmap_bit_p (processed, INSN_LUID (pro)))
2558 {
2559 if (!estimate_insn_tick (processed, pro, budget - cost))
2560 return false;
2561 }
2562 gcc_assert (INSN_TICK_ESTIMATE (pro) != INVALID_TICK);
2563 t = INSN_TICK_ESTIMATE (pro) + cost;
2564 if (earliest == INVALID_TICK || t > earliest)
2565 earliest = t;
2566 }
2567 }
2568 bitmap_set_bit (processed, INSN_LUID (insn));
2569 INSN_TICK_ESTIMATE (insn) = earliest;
2570 return true;
2571 }
2572
2573 /* Examine the pair of insns in P, and estimate (optimistically, assuming
2574 infinite resources) the cycle in which the delayed shadow can be issued.
2575 Return the number of cycles that must pass before the real insn can be
2576 issued in order to meet this constraint. */
2577 static int
2578 estimate_shadow_tick (struct delay_pair *p)
2579 {
2580 bitmap_head processed;
2581 int t;
2582 bool cutoff;
2583 bitmap_initialize (&processed, 0);
2584
2585 cutoff = !estimate_insn_tick (&processed, p->i2,
2586 max_insn_queue_index + pair_delay (p));
2587 bitmap_clear (&processed);
2588 if (cutoff)
2589 return max_insn_queue_index;
2590 t = INSN_TICK_ESTIMATE (p->i2) - (clock_var + pair_delay (p) + 1);
2591 if (t > 0)
2592 return t;
2593 return 0;
2594 }
2595
2596 /* If INSN has no unresolved backwards dependencies, add it to the schedule and
2597 recursively resolve all its forward dependencies. */
2598 static void
2599 resolve_dependencies (rtx insn)
2600 {
2601 sd_iterator_def sd_it;
2602 dep_t dep;
2603
2604 /* Don't use sd_lists_empty_p; it ignores debug insns. */
2605 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (insn)) != NULL
2606 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (insn)) != NULL)
2607 return;
2608
2609 if (sched_verbose >= 4)
2610 fprintf (sched_dump, ";;\tquickly resolving %d\n", INSN_UID (insn));
2611
2612 if (QUEUE_INDEX (insn) >= 0)
2613 queue_remove (insn);
2614
2615 VEC_safe_push (rtx, heap, scheduled_insns, insn);
2616
2617 /* Update dependent instructions. */
2618 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
2619 sd_iterator_cond (&sd_it, &dep);)
2620 {
2621 rtx next = DEP_CON (dep);
2622
2623 if (sched_verbose >= 4)
2624 fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
2625 INSN_UID (next));
2626
2627 /* Resolve the dependence between INSN and NEXT.
2628 sd_resolve_dep () moves current dep to another list thus
2629 advancing the iterator. */
2630 sd_resolve_dep (sd_it);
2631
2632 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn))
2633 {
2634 resolve_dependencies (next);
2635 }
2636 else
2637 /* Check always has only one forward dependence (to the first insn in
2638 the recovery block), therefore, this will be executed only once. */
2639 {
2640 gcc_assert (sd_lists_empty_p (insn, SD_LIST_FORW));
2641 }
2642 }
2643 }
2644
2645
2646 /* Return the head and tail pointers of ebb starting at BEG and ending
2647 at END. */
2648 void
2649 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
2650 {
2651 rtx beg_head = BB_HEAD (beg);
2652 rtx beg_tail = BB_END (beg);
2653 rtx end_head = BB_HEAD (end);
2654 rtx end_tail = BB_END (end);
2655
2656 /* Don't include any notes or labels at the beginning of the BEG
2657 basic block, or notes at the end of the END basic blocks. */
2658
2659 if (LABEL_P (beg_head))
2660 beg_head = NEXT_INSN (beg_head);
2661
2662 while (beg_head != beg_tail)
2663 if (NOTE_P (beg_head))
2664 beg_head = NEXT_INSN (beg_head);
2665 else if (DEBUG_INSN_P (beg_head))
2666 {
2667 rtx note, next;
2668
2669 for (note = NEXT_INSN (beg_head);
2670 note != beg_tail;
2671 note = next)
2672 {
2673 next = NEXT_INSN (note);
2674 if (NOTE_P (note))
2675 {
2676 if (sched_verbose >= 9)
2677 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
2678
2679 reorder_insns_nobb (note, note, PREV_INSN (beg_head));
2680
2681 if (BLOCK_FOR_INSN (note) != beg)
2682 df_insn_change_bb (note, beg);
2683 }
2684 else if (!DEBUG_INSN_P (note))
2685 break;
2686 }
2687
2688 break;
2689 }
2690 else
2691 break;
2692
2693 *headp = beg_head;
2694
2695 if (beg == end)
2696 end_head = beg_head;
2697 else if (LABEL_P (end_head))
2698 end_head = NEXT_INSN (end_head);
2699
2700 while (end_head != end_tail)
2701 if (NOTE_P (end_tail))
2702 end_tail = PREV_INSN (end_tail);
2703 else if (DEBUG_INSN_P (end_tail))
2704 {
2705 rtx note, prev;
2706
2707 for (note = PREV_INSN (end_tail);
2708 note != end_head;
2709 note = prev)
2710 {
2711 prev = PREV_INSN (note);
2712 if (NOTE_P (note))
2713 {
2714 if (sched_verbose >= 9)
2715 fprintf (sched_dump, "reorder %i\n", INSN_UID (note));
2716
2717 reorder_insns_nobb (note, note, end_tail);
2718
2719 if (end_tail == BB_END (end))
2720 BB_END (end) = note;
2721
2722 if (BLOCK_FOR_INSN (note) != end)
2723 df_insn_change_bb (note, end);
2724 }
2725 else if (!DEBUG_INSN_P (note))
2726 break;
2727 }
2728
2729 break;
2730 }
2731 else
2732 break;
2733
2734 *tailp = end_tail;
2735 }
2736
2737 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
2738
2739 int
2740 no_real_insns_p (const_rtx head, const_rtx tail)
2741 {
2742 while (head != NEXT_INSN (tail))
2743 {
2744 if (!NOTE_P (head) && !LABEL_P (head))
2745 return 0;
2746 head = NEXT_INSN (head);
2747 }
2748 return 1;
2749 }
2750
2751 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2752 previously found among the insns. Insert them just before HEAD. */
2753 rtx
2754 restore_other_notes (rtx head, basic_block head_bb)
2755 {
2756 if (note_list != 0)
2757 {
2758 rtx note_head = note_list;
2759
2760 if (head)
2761 head_bb = BLOCK_FOR_INSN (head);
2762 else
2763 head = NEXT_INSN (bb_note (head_bb));
2764
2765 while (PREV_INSN (note_head))
2766 {
2767 set_block_for_insn (note_head, head_bb);
2768 note_head = PREV_INSN (note_head);
2769 }
2770 /* In the above cycle we've missed this note. */
2771 set_block_for_insn (note_head, head_bb);
2772
2773 PREV_INSN (note_head) = PREV_INSN (head);
2774 NEXT_INSN (PREV_INSN (head)) = note_head;
2775 PREV_INSN (head) = note_list;
2776 NEXT_INSN (note_list) = head;
2777
2778 if (BLOCK_FOR_INSN (head) != head_bb)
2779 BB_END (head_bb) = note_list;
2780
2781 head = note_head;
2782 }
2783
2784 return head;
2785 }
2786
2787 /* Move insns that became ready to fire from queue to ready list. */
2788
2789 static void
2790 queue_to_ready (struct ready_list *ready)
2791 {
2792 rtx insn;
2793 rtx link;
2794 rtx skip_insn;
2795
2796 q_ptr = NEXT_Q (q_ptr);
2797
2798 if (dbg_cnt (sched_insn) == false)
2799 {
2800 /* If debug counter is activated do not requeue the first
2801 nonscheduled insn. */
2802 skip_insn = nonscheduled_insns_begin;
2803 do
2804 {
2805 skip_insn = next_nonnote_nondebug_insn (skip_insn);
2806 }
2807 while (QUEUE_INDEX (skip_insn) == QUEUE_SCHEDULED);
2808 }
2809 else
2810 skip_insn = NULL_RTX;
2811
2812 /* Add all pending insns that can be scheduled without stalls to the
2813 ready list. */
2814 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
2815 {
2816 insn = XEXP (link, 0);
2817 q_size -= 1;
2818
2819 if (sched_verbose >= 2)
2820 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2821 (*current_sched_info->print_insn) (insn, 0));
2822
2823 /* If the ready list is full, delay the insn for 1 cycle.
2824 See the comment in schedule_block for the rationale. */
2825 if (!reload_completed
2826 && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
2827 && !SCHED_GROUP_P (insn)
2828 && insn != skip_insn)
2829 queue_insn (insn, 1, "ready full");
2830 else
2831 {
2832 ready_add (ready, insn, false);
2833 if (sched_verbose >= 2)
2834 fprintf (sched_dump, "moving to ready without stalls\n");
2835 }
2836 }
2837 free_INSN_LIST_list (&insn_queue[q_ptr]);
2838
2839 /* If there are no ready insns, stall until one is ready and add all
2840 of the pending insns at that point to the ready list. */
2841 if (ready->n_ready == 0)
2842 {
2843 int stalls;
2844
2845 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
2846 {
2847 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2848 {
2849 for (; link; link = XEXP (link, 1))
2850 {
2851 insn = XEXP (link, 0);
2852 q_size -= 1;
2853
2854 if (sched_verbose >= 2)
2855 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
2856 (*current_sched_info->print_insn) (insn, 0));
2857
2858 ready_add (ready, insn, false);
2859 if (sched_verbose >= 2)
2860 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
2861 }
2862 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
2863
2864 advance_one_cycle ();
2865
2866 break;
2867 }
2868
2869 advance_one_cycle ();
2870 }
2871
2872 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
2873 clock_var += stalls;
2874 }
2875 }
2876
2877 /* Used by early_queue_to_ready. Determines whether it is "ok" to
2878 prematurely move INSN from the queue to the ready list. Currently,
2879 if a target defines the hook 'is_costly_dependence', this function
2880 uses the hook to check whether there exist any dependences which are
2881 considered costly by the target, between INSN and other insns that
2882 have already been scheduled. Dependences are checked up to Y cycles
2883 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
2884 controlling this value.
2885 (Other considerations could be taken into account instead (or in
2886 addition) depending on user flags and target hooks. */
2887
2888 static bool
2889 ok_for_early_queue_removal (rtx insn)
2890 {
2891 if (targetm.sched.is_costly_dependence)
2892 {
2893 rtx prev_insn;
2894 int n_cycles;
2895 int i = VEC_length (rtx, scheduled_insns);
2896 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
2897 {
2898 while (i-- > 0)
2899 {
2900 int cost;
2901
2902 prev_insn = VEC_index (rtx, scheduled_insns, i);
2903
2904 if (!NOTE_P (prev_insn))
2905 {
2906 dep_t dep;
2907
2908 dep = sd_find_dep_between (prev_insn, insn, true);
2909
2910 if (dep != NULL)
2911 {
2912 cost = dep_cost (dep);
2913
2914 if (targetm.sched.is_costly_dependence (dep, cost,
2915 flag_sched_stalled_insns_dep - n_cycles))
2916 return false;
2917 }
2918 }
2919
2920 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
2921 break;
2922 }
2923
2924 if (i == 0)
2925 break;
2926 }
2927 }
2928
2929 return true;
2930 }
2931
2932
2933 /* Remove insns from the queue, before they become "ready" with respect
2934 to FU latency considerations. */
2935
2936 static int
2937 early_queue_to_ready (state_t state, struct ready_list *ready)
2938 {
2939 rtx insn;
2940 rtx link;
2941 rtx next_link;
2942 rtx prev_link;
2943 bool move_to_ready;
2944 int cost;
2945 state_t temp_state = alloca (dfa_state_size);
2946 int stalls;
2947 int insns_removed = 0;
2948
2949 /*
2950 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
2951 function:
2952
2953 X == 0: There is no limit on how many queued insns can be removed
2954 prematurely. (flag_sched_stalled_insns = -1).
2955
2956 X >= 1: Only X queued insns can be removed prematurely in each
2957 invocation. (flag_sched_stalled_insns = X).
2958
2959 Otherwise: Early queue removal is disabled.
2960 (flag_sched_stalled_insns = 0)
2961 */
2962
2963 if (! flag_sched_stalled_insns)
2964 return 0;
2965
2966 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
2967 {
2968 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
2969 {
2970 if (sched_verbose > 6)
2971 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
2972
2973 prev_link = 0;
2974 while (link)
2975 {
2976 next_link = XEXP (link, 1);
2977 insn = XEXP (link, 0);
2978 if (insn && sched_verbose > 6)
2979 print_rtl_single (sched_dump, insn);
2980
2981 memcpy (temp_state, state, dfa_state_size);
2982 if (recog_memoized (insn) < 0)
2983 /* non-negative to indicate that it's not ready
2984 to avoid infinite Q->R->Q->R... */
2985 cost = 0;
2986 else
2987 cost = state_transition (temp_state, insn);
2988
2989 if (sched_verbose >= 6)
2990 fprintf (sched_dump, "transition cost = %d\n", cost);
2991
2992 move_to_ready = false;
2993 if (cost < 0)
2994 {
2995 move_to_ready = ok_for_early_queue_removal (insn);
2996 if (move_to_ready == true)
2997 {
2998 /* move from Q to R */
2999 q_size -= 1;
3000 ready_add (ready, insn, false);
3001
3002 if (prev_link)
3003 XEXP (prev_link, 1) = next_link;
3004 else
3005 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
3006
3007 free_INSN_LIST_node (link);
3008
3009 if (sched_verbose >= 2)
3010 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
3011 (*current_sched_info->print_insn) (insn, 0));
3012
3013 insns_removed++;
3014 if (insns_removed == flag_sched_stalled_insns)
3015 /* Remove no more than flag_sched_stalled_insns insns
3016 from Q at a time. */
3017 return insns_removed;
3018 }
3019 }
3020
3021 if (move_to_ready == false)
3022 prev_link = link;
3023
3024 link = next_link;
3025 } /* while link */
3026 } /* if link */
3027
3028 } /* for stalls.. */
3029
3030 return insns_removed;
3031 }
3032
3033
3034 /* Print the ready list for debugging purposes. Callable from debugger. */
3035
3036 static void
3037 debug_ready_list (struct ready_list *ready)
3038 {
3039 rtx *p;
3040 int i;
3041
3042 if (ready->n_ready == 0)
3043 {
3044 fprintf (sched_dump, "\n");
3045 return;
3046 }
3047
3048 p = ready_lastpos (ready);
3049 for (i = 0; i < ready->n_ready; i++)
3050 {
3051 fprintf (sched_dump, " %s:%d",
3052 (*current_sched_info->print_insn) (p[i], 0),
3053 INSN_LUID (p[i]));
3054 if (sched_pressure_p)
3055 fprintf (sched_dump, "(cost=%d",
3056 INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
3057 if (INSN_TICK (p[i]) > clock_var)
3058 fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
3059 if (sched_pressure_p)
3060 fprintf (sched_dump, ")");
3061 }
3062 fprintf (sched_dump, "\n");
3063 }
3064
3065 /* Search INSN for REG_SAVE_NOTE notes and convert them back into insn
3066 NOTEs. This is used for NOTE_INSN_EPILOGUE_BEG, so that sched-ebb
3067 replaces the epilogue note in the correct basic block. */
3068 void
3069 reemit_notes (rtx insn)
3070 {
3071 rtx note, last = insn;
3072
3073 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3074 {
3075 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
3076 {
3077 enum insn_note note_type = (enum insn_note) INTVAL (XEXP (note, 0));
3078
3079 last = emit_note_before (note_type, last);
3080 remove_note (insn, note);
3081 }
3082 }
3083 }
3084
3085 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
3086 static void
3087 move_insn (rtx insn, rtx last, rtx nt)
3088 {
3089 if (PREV_INSN (insn) != last)
3090 {
3091 basic_block bb;
3092 rtx note;
3093 int jump_p = 0;
3094
3095 bb = BLOCK_FOR_INSN (insn);
3096
3097 /* BB_HEAD is either LABEL or NOTE. */
3098 gcc_assert (BB_HEAD (bb) != insn);
3099
3100 if (BB_END (bb) == insn)
3101 /* If this is last instruction in BB, move end marker one
3102 instruction up. */
3103 {
3104 /* Jumps are always placed at the end of basic block. */
3105 jump_p = control_flow_insn_p (insn);
3106
3107 gcc_assert (!jump_p
3108 || ((common_sched_info->sched_pass_id == SCHED_RGN_PASS)
3109 && IS_SPECULATION_BRANCHY_CHECK_P (insn))
3110 || (common_sched_info->sched_pass_id
3111 == SCHED_EBB_PASS));
3112
3113 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
3114
3115 BB_END (bb) = PREV_INSN (insn);
3116 }
3117
3118 gcc_assert (BB_END (bb) != last);
3119
3120 if (jump_p)
3121 /* We move the block note along with jump. */
3122 {
3123 gcc_assert (nt);
3124
3125 note = NEXT_INSN (insn);
3126 while (NOTE_NOT_BB_P (note) && note != nt)
3127 note = NEXT_INSN (note);
3128
3129 if (note != nt
3130 && (LABEL_P (note)
3131 || BARRIER_P (note)))
3132 note = NEXT_INSN (note);
3133
3134 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3135 }
3136 else
3137 note = insn;
3138
3139 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
3140 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
3141
3142 NEXT_INSN (note) = NEXT_INSN (last);
3143 PREV_INSN (NEXT_INSN (last)) = note;
3144
3145 NEXT_INSN (last) = insn;
3146 PREV_INSN (insn) = last;
3147
3148 bb = BLOCK_FOR_INSN (last);
3149
3150 if (jump_p)
3151 {
3152 fix_jump_move (insn);
3153
3154 if (BLOCK_FOR_INSN (insn) != bb)
3155 move_block_after_check (insn);
3156
3157 gcc_assert (BB_END (bb) == last);
3158 }
3159
3160 df_insn_change_bb (insn, bb);
3161
3162 /* Update BB_END, if needed. */
3163 if (BB_END (bb) == last)
3164 BB_END (bb) = insn;
3165 }
3166
3167 SCHED_GROUP_P (insn) = 0;
3168 }
3169
3170 /* Return true if scheduling INSN will finish current clock cycle. */
3171 static bool
3172 insn_finishes_cycle_p (rtx insn)
3173 {
3174 if (SCHED_GROUP_P (insn))
3175 /* After issuing INSN, rest of the sched_group will be forced to issue
3176 in order. Don't make any plans for the rest of cycle. */
3177 return true;
3178
3179 /* Finishing the block will, apparently, finish the cycle. */
3180 if (current_sched_info->insn_finishes_block_p
3181 && current_sched_info->insn_finishes_block_p (insn))
3182 return true;
3183
3184 return false;
3185 }
3186
3187 /* Define type for target data used in multipass scheduling. */
3188 #ifndef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T
3189 # define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T int
3190 #endif
3191 typedef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DATA_T first_cycle_multipass_data_t;
3192
3193 /* The following structure describe an entry of the stack of choices. */
3194 struct choice_entry
3195 {
3196 /* Ordinal number of the issued insn in the ready queue. */
3197 int index;
3198 /* The number of the rest insns whose issues we should try. */
3199 int rest;
3200 /* The number of issued essential insns. */
3201 int n;
3202 /* State after issuing the insn. */
3203 state_t state;
3204 /* Target-specific data. */
3205 first_cycle_multipass_data_t target_data;
3206 };
3207
3208 /* The following array is used to implement a stack of choices used in
3209 function max_issue. */
3210 static struct choice_entry *choice_stack;
3211
3212 /* This holds the value of the target dfa_lookahead hook. */
3213 int dfa_lookahead;
3214
3215 /* The following variable value is maximal number of tries of issuing
3216 insns for the first cycle multipass insn scheduling. We define
3217 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
3218 need this constraint if all real insns (with non-negative codes)
3219 had reservations because in this case the algorithm complexity is
3220 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
3221 might be incomplete and such insn might occur. For such
3222 descriptions, the complexity of algorithm (without the constraint)
3223 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
3224 static int max_lookahead_tries;
3225
3226 /* The following value is value of hook
3227 `first_cycle_multipass_dfa_lookahead' at the last call of
3228 `max_issue'. */
3229 static int cached_first_cycle_multipass_dfa_lookahead = 0;
3230
3231 /* The following value is value of `issue_rate' at the last call of
3232 `sched_init'. */
3233 static int cached_issue_rate = 0;
3234
3235 /* The following function returns maximal (or close to maximal) number
3236 of insns which can be issued on the same cycle and one of which
3237 insns is insns with the best rank (the first insn in READY). To
3238 make this function tries different samples of ready insns. READY
3239 is current queue `ready'. Global array READY_TRY reflects what
3240 insns are already issued in this try. The function stops immediately,
3241 if it reached the such a solution, that all instruction can be issued.
3242 INDEX will contain index of the best insn in READY. The following
3243 function is used only for first cycle multipass scheduling.
3244
3245 PRIVILEGED_N >= 0
3246
3247 This function expects recognized insns only. All USEs,
3248 CLOBBERs, etc must be filtered elsewhere. */
3249 int
3250 max_issue (struct ready_list *ready, int privileged_n, state_t state,
3251 bool first_cycle_insn_p, int *index)
3252 {
3253 int n, i, all, n_ready, best, delay, tries_num;
3254 int more_issue;
3255 struct choice_entry *top;
3256 rtx insn;
3257
3258 n_ready = ready->n_ready;
3259 gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
3260 && privileged_n <= n_ready);
3261
3262 /* Init MAX_LOOKAHEAD_TRIES. */
3263 if (cached_first_cycle_multipass_dfa_lookahead != dfa_lookahead)
3264 {
3265 cached_first_cycle_multipass_dfa_lookahead = dfa_lookahead;
3266 max_lookahead_tries = 100;
3267 for (i = 0; i < issue_rate; i++)
3268 max_lookahead_tries *= dfa_lookahead;
3269 }
3270
3271 /* Init max_points. */
3272 more_issue = issue_rate - cycle_issued_insns;
3273 gcc_assert (more_issue >= 0);
3274
3275 /* The number of the issued insns in the best solution. */
3276 best = 0;
3277
3278 top = choice_stack;
3279
3280 /* Set initial state of the search. */
3281 memcpy (top->state, state, dfa_state_size);
3282 top->rest = dfa_lookahead;
3283 top->n = 0;
3284 if (targetm.sched.first_cycle_multipass_begin)
3285 targetm.sched.first_cycle_multipass_begin (&top->target_data,
3286 ready_try, n_ready,
3287 first_cycle_insn_p);
3288
3289 /* Count the number of the insns to search among. */
3290 for (all = i = 0; i < n_ready; i++)
3291 if (!ready_try [i])
3292 all++;
3293
3294 /* I is the index of the insn to try next. */
3295 i = 0;
3296 tries_num = 0;
3297 for (;;)
3298 {
3299 if (/* If we've reached a dead end or searched enough of what we have
3300 been asked... */
3301 top->rest == 0
3302 /* or have nothing else to try... */
3303 || i >= n_ready
3304 /* or should not issue more. */
3305 || top->n >= more_issue)
3306 {
3307 /* ??? (... || i == n_ready). */
3308 gcc_assert (i <= n_ready);
3309
3310 /* We should not issue more than issue_rate instructions. */
3311 gcc_assert (top->n <= more_issue);
3312
3313 if (top == choice_stack)
3314 break;
3315
3316 if (best < top - choice_stack)
3317 {
3318 if (privileged_n)
3319 {
3320 n = privileged_n;
3321 /* Try to find issued privileged insn. */
3322 while (n && !ready_try[--n])
3323 ;
3324 }
3325
3326 if (/* If all insns are equally good... */
3327 privileged_n == 0
3328 /* Or a privileged insn will be issued. */
3329 || ready_try[n])
3330 /* Then we have a solution. */
3331 {
3332 best = top - choice_stack;
3333 /* This is the index of the insn issued first in this
3334 solution. */
3335 *index = choice_stack [1].index;
3336 if (top->n == more_issue || best == all)
3337 break;
3338 }
3339 }
3340
3341 /* Set ready-list index to point to the last insn
3342 ('i++' below will advance it to the next insn). */
3343 i = top->index;
3344
3345 /* Backtrack. */
3346 ready_try [i] = 0;
3347
3348 if (targetm.sched.first_cycle_multipass_backtrack)
3349 targetm.sched.first_cycle_multipass_backtrack (&top->target_data,
3350 ready_try, n_ready);
3351
3352 top--;
3353 memcpy (state, top->state, dfa_state_size);
3354 }
3355 else if (!ready_try [i])
3356 {
3357 tries_num++;
3358 if (tries_num > max_lookahead_tries)
3359 break;
3360 insn = ready_element (ready, i);
3361 delay = state_transition (state, insn);
3362 if (delay < 0)
3363 {
3364 if (state_dead_lock_p (state)
3365 || insn_finishes_cycle_p (insn))
3366 /* We won't issue any more instructions in the next
3367 choice_state. */
3368 top->rest = 0;
3369 else
3370 top->rest--;
3371
3372 n = top->n;
3373 if (memcmp (top->state, state, dfa_state_size) != 0)
3374 n++;
3375
3376 /* Advance to the next choice_entry. */
3377 top++;
3378 /* Initialize it. */
3379 top->rest = dfa_lookahead;
3380 top->index = i;
3381 top->n = n;
3382 memcpy (top->state, state, dfa_state_size);
3383 ready_try [i] = 1;
3384
3385 if (targetm.sched.first_cycle_multipass_issue)
3386 targetm.sched.first_cycle_multipass_issue (&top->target_data,
3387 ready_try, n_ready,
3388 insn,
3389 &((top - 1)
3390 ->target_data));
3391
3392 i = -1;
3393 }
3394 }
3395
3396 /* Increase ready-list index. */
3397 i++;
3398 }
3399
3400 if (targetm.sched.first_cycle_multipass_end)
3401 targetm.sched.first_cycle_multipass_end (best != 0
3402 ? &choice_stack[1].target_data
3403 : NULL);
3404
3405 /* Restore the original state of the DFA. */
3406 memcpy (state, choice_stack->state, dfa_state_size);
3407
3408 return best;
3409 }
3410
3411 /* The following function chooses insn from READY and modifies
3412 READY. The following function is used only for first
3413 cycle multipass scheduling.
3414 Return:
3415 -1 if cycle should be advanced,
3416 0 if INSN_PTR is set to point to the desirable insn,
3417 1 if choose_ready () should be restarted without advancing the cycle. */
3418 static int
3419 choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
3420 rtx *insn_ptr)
3421 {
3422 int lookahead;
3423
3424 if (dbg_cnt (sched_insn) == false)
3425 {
3426 rtx insn = nonscheduled_insns_begin;
3427 do
3428 {
3429 insn = next_nonnote_insn (insn);
3430 }
3431 while (QUEUE_INDEX (insn) == QUEUE_SCHEDULED);
3432
3433 if (QUEUE_INDEX (insn) == QUEUE_READY)
3434 /* INSN is in the ready_list. */
3435 {
3436 nonscheduled_insns_begin = insn;
3437 ready_remove_insn (insn);
3438 *insn_ptr = insn;
3439 return 0;
3440 }
3441
3442 /* INSN is in the queue. Advance cycle to move it to the ready list. */
3443 return -1;
3444 }
3445
3446 lookahead = 0;
3447
3448 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
3449 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
3450 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
3451 || DEBUG_INSN_P (ready_element (ready, 0)))
3452 {
3453 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
3454 *insn_ptr = ready_remove_first_dispatch (ready);
3455 else
3456 *insn_ptr = ready_remove_first (ready);
3457
3458 return 0;
3459 }
3460 else
3461 {
3462 /* Try to choose the better insn. */
3463 int index = 0, i, n;
3464 rtx insn;
3465 int try_data = 1, try_control = 1;
3466 ds_t ts;
3467
3468 insn = ready_element (ready, 0);
3469 if (INSN_CODE (insn) < 0)
3470 {
3471 *insn_ptr = ready_remove_first (ready);
3472 return 0;
3473 }
3474
3475 if (spec_info
3476 && spec_info->flags & (PREFER_NON_DATA_SPEC
3477 | PREFER_NON_CONTROL_SPEC))
3478 {
3479 for (i = 0, n = ready->n_ready; i < n; i++)
3480 {
3481 rtx x;
3482 ds_t s;
3483
3484 x = ready_element (ready, i);
3485 s = TODO_SPEC (x);
3486
3487 if (spec_info->flags & PREFER_NON_DATA_SPEC
3488 && !(s & DATA_SPEC))
3489 {
3490 try_data = 0;
3491 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
3492 || !try_control)
3493 break;
3494 }
3495
3496 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
3497 && !(s & CONTROL_SPEC))
3498 {
3499 try_control = 0;
3500 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
3501 break;
3502 }
3503 }
3504 }
3505
3506 ts = TODO_SPEC (insn);
3507 if ((ts & SPECULATIVE)
3508 && (((!try_data && (ts & DATA_SPEC))
3509 || (!try_control && (ts & CONTROL_SPEC)))
3510 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
3511 && !targetm.sched
3512 .first_cycle_multipass_dfa_lookahead_guard_spec (insn))))
3513 /* Discard speculative instruction that stands first in the ready
3514 list. */
3515 {
3516 change_queue_index (insn, 1);
3517 return 1;
3518 }
3519
3520 ready_try[0] = 0;
3521
3522 for (i = 1; i < ready->n_ready; i++)
3523 {
3524 insn = ready_element (ready, i);
3525
3526 ready_try [i]
3527 = ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
3528 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC)));
3529 }
3530
3531 /* Let the target filter the search space. */
3532 for (i = 1; i < ready->n_ready; i++)
3533 if (!ready_try[i])
3534 {
3535 insn = ready_element (ready, i);
3536
3537 /* If this insn is recognizable we should have already
3538 recognized it earlier.
3539 ??? Not very clear where this is supposed to be done.
3540 See dep_cost_1. */
3541 gcc_checking_assert (INSN_CODE (insn) >= 0
3542 || recog_memoized (insn) < 0);
3543
3544 ready_try [i]
3545 = (/* INSN_CODE check can be omitted here as it is also done later
3546 in max_issue (). */
3547 INSN_CODE (insn) < 0
3548 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
3549 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
3550 (insn)));
3551 }
3552
3553 if (max_issue (ready, 1, curr_state, first_cycle_insn_p, &index) == 0)
3554 {
3555 *insn_ptr = ready_remove_first (ready);
3556 if (sched_verbose >= 4)
3557 fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n",
3558 (*current_sched_info->print_insn) (*insn_ptr, 0));
3559 return 0;
3560 }
3561 else
3562 {
3563 if (sched_verbose >= 4)
3564 fprintf (sched_dump, ";;\t\tChosen insn : %s\n",
3565 (*current_sched_info->print_insn)
3566 (ready_element (ready, index), 0));
3567
3568 *insn_ptr = ready_remove (ready, index);
3569 return 0;
3570 }
3571 }
3572 }
3573
3574 /* This function is called when we have successfully scheduled a
3575 block. It uses the schedule stored in the scheduled_insns vector
3576 to rearrange the RTL. PREV_HEAD is used as the anchor to which we
3577 append the scheduled insns; TAIL is the insn after the scheduled
3578 block. TARGET_BB is the argument passed to schedule_block. */
3579
3580 static void
3581 commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
3582 {
3583 unsigned int i;
3584 rtx insn;
3585
3586 last_scheduled_insn = prev_head;
3587 for (i = 0;
3588 VEC_iterate (rtx, scheduled_insns, i, insn);
3589 i++)
3590 {
3591 if (control_flow_insn_p (last_scheduled_insn)
3592 || current_sched_info->advance_target_bb (*target_bb, insn))
3593 {
3594 *target_bb = current_sched_info->advance_target_bb (*target_bb, 0);
3595
3596 if (sched_verbose)
3597 {
3598 rtx x;
3599
3600 x = next_real_insn (last_scheduled_insn);
3601 gcc_assert (x);
3602 dump_new_block_header (1, *target_bb, x, tail);
3603 }
3604
3605 last_scheduled_insn = bb_note (*target_bb);
3606 }
3607
3608 if (current_sched_info->begin_move_insn)
3609 (*current_sched_info->begin_move_insn) (insn, last_scheduled_insn);
3610 move_insn (insn, last_scheduled_insn,
3611 current_sched_info->next_tail);
3612 if (!DEBUG_INSN_P (insn))
3613 reemit_notes (insn);
3614 last_scheduled_insn = insn;
3615 }
3616
3617 VEC_truncate (rtx, scheduled_insns, 0);
3618 }
3619
3620 /* Examine all insns on the ready list and queue those which can't be
3621 issued in this cycle. TEMP_STATE is temporary scheduler state we
3622 can use as scratch space. If FIRST_CYCLE_INSN_P is true, no insns
3623 have been issued for the current cycle, which means it is valid to
3624 issue an asm statement.
3625
3626 If SHADOWS_ONLY_P is true, we eliminate all real insns and only
3627 leave those for which SHADOW_P is true. If MODULO_EPILOGUE is true,
3628 we only leave insns which have an INSN_EXACT_TICK. */
3629
3630 static void
3631 prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
3632 bool shadows_only_p, bool modulo_epilogue_p)
3633 {
3634 int i;
3635
3636 restart:
3637 for (i = 0; i < ready.n_ready; i++)
3638 {
3639 rtx insn = ready_element (&ready, i);
3640 int cost = 0;
3641 const char *reason = "resource conflict";
3642
3643 if (modulo_epilogue_p && !DEBUG_INSN_P (insn)
3644 && INSN_EXACT_TICK (insn) == INVALID_TICK)
3645 {
3646 cost = max_insn_queue_index;
3647 reason = "not an epilogue insn";
3648 }
3649 if (shadows_only_p && !DEBUG_INSN_P (insn) && !SHADOW_P (insn))
3650 {
3651 cost = 1;
3652 reason = "not a shadow";
3653 }
3654 else if (recog_memoized (insn) < 0)
3655 {
3656 if (!first_cycle_insn_p
3657 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
3658 || asm_noperands (PATTERN (insn)) >= 0))
3659 cost = 1;
3660 reason = "asm";
3661 }
3662 else if (sched_pressure_p)
3663 cost = 0;
3664 else
3665 {
3666 int delay_cost = 0;
3667
3668 if (delay_htab)
3669 {
3670 struct delay_pair *delay_entry;
3671 delay_entry
3672 = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
3673 htab_hash_pointer (insn));
3674 while (delay_entry && delay_cost == 0)
3675 {
3676 delay_cost = estimate_shadow_tick (delay_entry);
3677 if (delay_cost > max_insn_queue_index)
3678 delay_cost = max_insn_queue_index;
3679 delay_entry = delay_entry->next_same_i1;
3680 }
3681 }
3682
3683 memcpy (temp_state, curr_state, dfa_state_size);
3684 cost = state_transition (temp_state, insn);
3685 if (cost < 0)
3686 cost = 0;
3687 else if (cost == 0)
3688 cost = 1;
3689 if (cost < delay_cost)
3690 {
3691 cost = delay_cost;
3692 reason = "shadow tick";
3693 }
3694 }
3695 if (cost >= 1)
3696 {
3697 ready_remove (&ready, i);
3698 queue_insn (insn, cost, reason);
3699 goto restart;
3700 }
3701 }
3702 }
3703
3704 /* Called when we detect that the schedule is impossible. We examine the
3705 backtrack queue to find the earliest insn that caused this condition. */
3706
3707 static struct haifa_saved_data *
3708 verify_shadows (void)
3709 {
3710 struct haifa_saved_data *save, *earliest_fail = NULL;
3711 for (save = backtrack_queue; save; save = save->next)
3712 {
3713 int t;
3714 struct delay_pair *pair = save->delay_pair;
3715 rtx i1 = pair->i1;
3716
3717 for (; pair; pair = pair->next_same_i1)
3718 {
3719 rtx i2 = pair->i2;
3720
3721 if (QUEUE_INDEX (i2) == QUEUE_SCHEDULED)
3722 continue;
3723
3724 t = INSN_TICK (i1) + pair_delay (pair);
3725 if (t < clock_var)
3726 {
3727 if (sched_verbose >= 2)
3728 fprintf (sched_dump,
3729 ";;\t\tfailed delay requirements for %d/%d (%d->%d)"
3730 ", not ready\n",
3731 INSN_UID (pair->i1), INSN_UID (pair->i2),
3732 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
3733 earliest_fail = save;
3734 break;
3735 }
3736 if (QUEUE_INDEX (i2) >= 0)
3737 {
3738 int queued_for = INSN_TICK (i2);
3739
3740 if (t < queued_for)
3741 {
3742 if (sched_verbose >= 2)
3743 fprintf (sched_dump,
3744 ";;\t\tfailed delay requirements for %d/%d"
3745 " (%d->%d), queued too late\n",
3746 INSN_UID (pair->i1), INSN_UID (pair->i2),
3747 INSN_TICK (pair->i1), INSN_EXACT_TICK (pair->i2));
3748 earliest_fail = save;
3749 break;
3750 }
3751 }
3752 }
3753 }
3754
3755 return earliest_fail;
3756 }
3757
3758 /* Use forward list scheduling to rearrange insns of block pointed to by
3759 TARGET_BB, possibly bringing insns from subsequent blocks in the same
3760 region. */
3761
3762 bool
3763 schedule_block (basic_block *target_bb)
3764 {
3765 int i;
3766 bool success = modulo_ii == 0;
3767 struct sched_block_state ls;
3768 state_t temp_state = NULL; /* It is used for multipass scheduling. */
3769 int sort_p, advance, start_clock_var;
3770
3771 /* Head/tail info for this block. */
3772 rtx prev_head = current_sched_info->prev_head;
3773 rtx next_tail = current_sched_info->next_tail;
3774 rtx head = NEXT_INSN (prev_head);
3775 rtx tail = PREV_INSN (next_tail);
3776
3777 /* We used to have code to avoid getting parameters moved from hard
3778 argument registers into pseudos.
3779
3780 However, it was removed when it proved to be of marginal benefit
3781 and caused problems because schedule_block and compute_forward_dependences
3782 had different notions of what the "head" insn was. */
3783
3784 gcc_assert (head != tail || INSN_P (head));
3785
3786 haifa_recovery_bb_recently_added_p = false;
3787
3788 backtrack_queue = NULL;
3789
3790 /* Debug info. */
3791 if (sched_verbose)
3792 dump_new_block_header (0, *target_bb, head, tail);
3793
3794 state_reset (curr_state);
3795
3796 /* Clear the ready list. */
3797 ready.first = ready.veclen - 1;
3798 ready.n_ready = 0;
3799 ready.n_debug = 0;
3800
3801 /* It is used for first cycle multipass scheduling. */
3802 temp_state = alloca (dfa_state_size);
3803
3804 if (targetm.sched.init)
3805 targetm.sched.init (sched_dump, sched_verbose, ready.veclen);
3806
3807 /* We start inserting insns after PREV_HEAD. */
3808 last_scheduled_insn = nonscheduled_insns_begin = prev_head;
3809 last_nondebug_scheduled_insn = NULL_RTX;
3810
3811 gcc_assert ((NOTE_P (last_scheduled_insn)
3812 || DEBUG_INSN_P (last_scheduled_insn))
3813 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
3814
3815 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
3816 queue. */
3817 q_ptr = 0;
3818 q_size = 0;
3819
3820 insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
3821 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
3822
3823 /* Start just before the beginning of time. */
3824 clock_var = -1;
3825
3826 /* We need queue and ready lists and clock_var be initialized
3827 in try_ready () (which is called through init_ready_list ()). */
3828 (*current_sched_info->init_ready_list) ();
3829
3830 /* The algorithm is O(n^2) in the number of ready insns at any given
3831 time in the worst case. Before reload we are more likely to have
3832 big lists so truncate them to a reasonable size. */
3833 if (!reload_completed
3834 && ready.n_ready - ready.n_debug > MAX_SCHED_READY_INSNS)
3835 {
3836 ready_sort (&ready);
3837
3838 /* Find first free-standing insn past MAX_SCHED_READY_INSNS.
3839 If there are debug insns, we know they're first. */
3840 for (i = MAX_SCHED_READY_INSNS + ready.n_debug; i < ready.n_ready; i++)
3841 if (!SCHED_GROUP_P (ready_element (&ready, i)))
3842 break;
3843
3844 if (sched_verbose >= 2)
3845 {
3846 fprintf (sched_dump,
3847 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
3848 fprintf (sched_dump,
3849 ";;\t\t before reload => truncated to %d insns\n", i);
3850 }
3851
3852 /* Delay all insns past it for 1 cycle. If debug counter is
3853 activated make an exception for the insn right after
3854 nonscheduled_insns_begin. */
3855 {
3856 rtx skip_insn;
3857
3858 if (dbg_cnt (sched_insn) == false)
3859 skip_insn = next_nonnote_insn (nonscheduled_insns_begin);
3860 else
3861 skip_insn = NULL_RTX;
3862
3863 while (i < ready.n_ready)
3864 {
3865 rtx insn;
3866
3867 insn = ready_remove (&ready, i);
3868
3869 if (insn != skip_insn)
3870 queue_insn (insn, 1, "list truncated");
3871 }
3872 if (skip_insn)
3873 ready_add (&ready, skip_insn, true);
3874 }
3875 }
3876
3877 /* Now we can restore basic block notes and maintain precise cfg. */
3878 restore_bb_notes (*target_bb);
3879
3880 last_clock_var = -1;
3881
3882 advance = 0;
3883
3884 gcc_assert (VEC_length (rtx, scheduled_insns) == 0);
3885 sort_p = TRUE;
3886 must_backtrack = false;
3887 modulo_insns_scheduled = 0;
3888
3889 ls.modulo_epilogue = false;
3890
3891 /* Loop until all the insns in BB are scheduled. */
3892 while ((*current_sched_info->schedule_more_p) ())
3893 {
3894 do
3895 {
3896 start_clock_var = clock_var;
3897
3898 clock_var++;
3899
3900 advance_one_cycle ();
3901
3902 /* Add to the ready list all pending insns that can be issued now.
3903 If there are no ready insns, increment clock until one
3904 is ready and add all pending insns at that point to the ready
3905 list. */
3906 queue_to_ready (&ready);
3907
3908 gcc_assert (ready.n_ready);
3909
3910 if (sched_verbose >= 2)
3911 {
3912 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
3913 debug_ready_list (&ready);
3914 }
3915 advance -= clock_var - start_clock_var;
3916 }
3917 while (advance > 0);
3918
3919 if (ls.modulo_epilogue)
3920 {
3921 int stage = clock_var / modulo_ii;
3922 if (stage > modulo_last_stage * 2 + 2)
3923 {
3924 if (sched_verbose >= 2)
3925 fprintf (sched_dump,
3926 ";;\t\tmodulo scheduled succeeded at II %d\n",
3927 modulo_ii);
3928 success = true;
3929 goto end_schedule;
3930 }
3931 }
3932 else if (modulo_ii > 0)
3933 {
3934 int stage = clock_var / modulo_ii;
3935 if (stage > modulo_max_stages)
3936 {
3937 if (sched_verbose >= 2)
3938 fprintf (sched_dump,
3939 ";;\t\tfailing schedule due to excessive stages\n");
3940 goto end_schedule;
3941 }
3942 if (modulo_n_insns == modulo_insns_scheduled
3943 && stage > modulo_last_stage)
3944 {
3945 if (sched_verbose >= 2)
3946 fprintf (sched_dump,
3947 ";;\t\tfound kernel after %d stages, II %d\n",
3948 stage, modulo_ii);
3949 ls.modulo_epilogue = true;
3950 }
3951 }
3952
3953 prune_ready_list (temp_state, true, false, ls.modulo_epilogue);
3954 if (ready.n_ready == 0)
3955 continue;
3956 if (must_backtrack)
3957 goto do_backtrack;
3958
3959 ls.first_cycle_insn_p = true;
3960 ls.shadows_only_p = false;
3961 cycle_issued_insns = 0;
3962 ls.can_issue_more = issue_rate;
3963 for (;;)
3964 {
3965 rtx insn;
3966 int cost;
3967 bool asm_p;
3968
3969 if (sort_p && ready.n_ready > 0)
3970 {
3971 /* Sort the ready list based on priority. This must be
3972 done every iteration through the loop, as schedule_insn
3973 may have readied additional insns that will not be
3974 sorted correctly. */
3975 ready_sort (&ready);
3976
3977 if (sched_verbose >= 2)
3978 {
3979 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
3980 debug_ready_list (&ready);
3981 }
3982 }
3983
3984 /* We don't want md sched reorder to even see debug isns, so put
3985 them out right away. */
3986 if (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0))
3987 && (*current_sched_info->schedule_more_p) ())
3988 {
3989 while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
3990 {
3991 rtx insn = ready_remove_first (&ready);
3992 gcc_assert (DEBUG_INSN_P (insn));
3993 (*current_sched_info->begin_schedule_ready) (insn);
3994 VEC_safe_push (rtx, heap, scheduled_insns, insn);
3995 last_scheduled_insn = insn;
3996 advance = schedule_insn (insn);
3997 gcc_assert (advance == 0);
3998 if (ready.n_ready > 0)
3999 ready_sort (&ready);
4000 }
4001 }
4002
4003 if (ls.first_cycle_insn_p && !ready.n_ready)
4004 break;
4005
4006 resume_after_backtrack:
4007 /* Allow the target to reorder the list, typically for
4008 better instruction bundling. */
4009 if (sort_p
4010 && (ready.n_ready == 0
4011 || !SCHED_GROUP_P (ready_element (&ready, 0))))
4012 {
4013 if (ls.first_cycle_insn_p && targetm.sched.reorder)
4014 ls.can_issue_more
4015 = targetm.sched.reorder (sched_dump, sched_verbose,
4016 ready_lastpos (&ready),
4017 &ready.n_ready, clock_var);
4018 else if (!ls.first_cycle_insn_p && targetm.sched.reorder2)
4019 ls.can_issue_more
4020 = targetm.sched.reorder2 (sched_dump, sched_verbose,
4021 ready.n_ready
4022 ? ready_lastpos (&ready) : NULL,
4023 &ready.n_ready, clock_var);
4024 }
4025
4026 restart_choose_ready:
4027 if (sched_verbose >= 2)
4028 {
4029 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
4030 clock_var);
4031 debug_ready_list (&ready);
4032 if (sched_pressure_p)
4033 print_curr_reg_pressure ();
4034 }
4035
4036 if (ready.n_ready == 0
4037 && ls.can_issue_more
4038 && reload_completed)
4039 {
4040 /* Allow scheduling insns directly from the queue in case
4041 there's nothing better to do (ready list is empty) but
4042 there are still vacant dispatch slots in the current cycle. */
4043 if (sched_verbose >= 6)
4044 fprintf (sched_dump,";;\t\tSecond chance\n");
4045 memcpy (temp_state, curr_state, dfa_state_size);
4046 if (early_queue_to_ready (temp_state, &ready))
4047 ready_sort (&ready);
4048 }
4049
4050 if (ready.n_ready == 0
4051 || !ls.can_issue_more
4052 || state_dead_lock_p (curr_state)
4053 || !(*current_sched_info->schedule_more_p) ())
4054 break;
4055
4056 /* Select and remove the insn from the ready list. */
4057 if (sort_p)
4058 {
4059 int res;
4060
4061 insn = NULL_RTX;
4062 res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
4063
4064 if (res < 0)
4065 /* Finish cycle. */
4066 break;
4067 if (res > 0)
4068 goto restart_choose_ready;
4069
4070 gcc_assert (insn != NULL_RTX);
4071 }
4072 else
4073 insn = ready_remove_first (&ready);
4074
4075 if (sched_pressure_p && INSN_TICK (insn) > clock_var)
4076 {
4077 ready_add (&ready, insn, true);
4078 advance = 1;
4079 break;
4080 }
4081
4082 if (targetm.sched.dfa_new_cycle
4083 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4084 insn, last_clock_var,
4085 clock_var, &sort_p))
4086 /* SORT_P is used by the target to override sorting
4087 of the ready list. This is needed when the target
4088 has modified its internal structures expecting that
4089 the insn will be issued next. As we need the insn
4090 to have the highest priority (so it will be returned by
4091 the ready_remove_first call above), we invoke
4092 ready_add (&ready, insn, true).
4093 But, still, there is one issue: INSN can be later
4094 discarded by scheduler's front end through
4095 current_sched_info->can_schedule_ready_p, hence, won't
4096 be issued next. */
4097 {
4098 ready_add (&ready, insn, true);
4099 break;
4100 }
4101
4102 sort_p = TRUE;
4103
4104 if (current_sched_info->can_schedule_ready_p
4105 && ! (*current_sched_info->can_schedule_ready_p) (insn))
4106 /* We normally get here only if we don't want to move
4107 insn from the split block. */
4108 {
4109 TODO_SPEC (insn) = HARD_DEP;
4110 goto restart_choose_ready;
4111 }
4112
4113 if (delay_htab)
4114 {
4115 /* If this insn is the first part of a delay-slot pair, record a
4116 backtrack point. */
4117 struct delay_pair *delay_entry;
4118 delay_entry
4119 = (struct delay_pair *)htab_find_with_hash (delay_htab, insn,
4120 htab_hash_pointer (insn));
4121 if (delay_entry)
4122 {
4123 save_backtrack_point (delay_entry, ls);
4124 if (sched_verbose >= 2)
4125 fprintf (sched_dump, ";;\t\tsaving backtrack point\n");
4126 }
4127 }
4128
4129 /* DECISION is made. */
4130
4131 if (modulo_ii > 0 && INSN_UID (insn) < modulo_iter0_max_uid)
4132 {
4133 modulo_insns_scheduled++;
4134 modulo_last_stage = clock_var / modulo_ii;
4135 }
4136 if (TODO_SPEC (insn) & SPECULATIVE)
4137 generate_recovery_code (insn);
4138
4139 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
4140 targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
4141
4142 /* Update counters, etc in the scheduler's front end. */
4143 (*current_sched_info->begin_schedule_ready) (insn);
4144 VEC_safe_push (rtx, heap, scheduled_insns, insn);
4145 gcc_assert (NONDEBUG_INSN_P (insn));
4146 last_nondebug_scheduled_insn = last_scheduled_insn = insn;
4147
4148 if (recog_memoized (insn) >= 0)
4149 {
4150 memcpy (temp_state, curr_state, dfa_state_size);
4151 cost = state_transition (curr_state, insn);
4152 if (!sched_pressure_p)
4153 gcc_assert (cost < 0);
4154 if (memcmp (temp_state, curr_state, dfa_state_size) != 0)
4155 cycle_issued_insns++;
4156 asm_p = false;
4157 }
4158 else
4159 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
4160 || asm_noperands (PATTERN (insn)) >= 0);
4161
4162 if (targetm.sched.variable_issue)
4163 ls.can_issue_more =
4164 targetm.sched.variable_issue (sched_dump, sched_verbose,
4165 insn, ls.can_issue_more);
4166 /* A naked CLOBBER or USE generates no instruction, so do
4167 not count them against the issue rate. */
4168 else if (GET_CODE (PATTERN (insn)) != USE
4169 && GET_CODE (PATTERN (insn)) != CLOBBER)
4170 ls.can_issue_more--;
4171 advance = schedule_insn (insn);
4172
4173 if (SHADOW_P (insn))
4174 ls.shadows_only_p = true;
4175
4176 /* After issuing an asm insn we should start a new cycle. */
4177 if (advance == 0 && asm_p)
4178 advance = 1;
4179
4180 if (must_backtrack)
4181 break;
4182
4183 if (advance != 0)
4184 break;
4185
4186 ls.first_cycle_insn_p = false;
4187 if (ready.n_ready > 0)
4188 prune_ready_list (temp_state, false, ls.shadows_only_p,
4189 ls.modulo_epilogue);
4190 }
4191
4192 do_backtrack:
4193 if (!must_backtrack)
4194 for (i = 0; i < ready.n_ready; i++)
4195 {
4196 rtx insn = ready_element (&ready, i);
4197 if (INSN_EXACT_TICK (insn) == clock_var)
4198 {
4199 must_backtrack = true;
4200 clock_var++;
4201 break;
4202 }
4203 }
4204 if (must_backtrack && modulo_ii > 0)
4205 {
4206 if (modulo_backtracks_left == 0)
4207 goto end_schedule;
4208 modulo_backtracks_left--;
4209 }
4210 while (must_backtrack)
4211 {
4212 struct haifa_saved_data *failed;
4213 rtx failed_insn;
4214
4215 must_backtrack = false;
4216 failed = verify_shadows ();
4217 gcc_assert (failed);
4218
4219 failed_insn = failed->delay_pair->i1;
4220 unschedule_insns_until (failed_insn);
4221 while (failed != backtrack_queue)
4222 free_topmost_backtrack_point (true);
4223 restore_last_backtrack_point (&ls);
4224 if (sched_verbose >= 2)
4225 fprintf (sched_dump, ";;\t\trewind to cycle %d\n", clock_var);
4226 /* Delay by at least a cycle. This could cause additional
4227 backtracking. */
4228 queue_insn (failed_insn, 1, "backtracked");
4229 advance = 0;
4230 if (must_backtrack)
4231 continue;
4232 if (ready.n_ready > 0)
4233 goto resume_after_backtrack;
4234 else
4235 {
4236 if (clock_var == 0 && ls.first_cycle_insn_p)
4237 goto end_schedule;
4238 advance = 1;
4239 break;
4240 }
4241 }
4242 }
4243 if (ls.modulo_epilogue)
4244 success = true;
4245 end_schedule:
4246 if (modulo_ii > 0)
4247 {
4248 /* Once again, debug insn suckiness: they can be on the ready list
4249 even if they have unresolved dependencies. To make our view
4250 of the world consistent, remove such "ready" insns. */
4251 restart_debug_insn_loop:
4252 for (i = ready.n_ready - 1; i >= 0; i--)
4253 {
4254 rtx x;
4255
4256 x = ready_element (&ready, i);
4257 if (DEPS_LIST_FIRST (INSN_HARD_BACK_DEPS (x)) != NULL
4258 || DEPS_LIST_FIRST (INSN_SPEC_BACK_DEPS (x)) != NULL)
4259 {
4260 ready_remove (&ready, i);
4261 goto restart_debug_insn_loop;
4262 }
4263 }
4264 for (i = ready.n_ready - 1; i >= 0; i--)
4265 {
4266 rtx x;
4267
4268 x = ready_element (&ready, i);
4269 resolve_dependencies (x);
4270 }
4271 for (i = 0; i <= max_insn_queue_index; i++)
4272 {
4273 rtx link;
4274 while ((link = insn_queue[i]) != NULL)
4275 {
4276 rtx x = XEXP (link, 0);
4277 insn_queue[i] = XEXP (link, 1);
4278 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4279 free_INSN_LIST_node (link);
4280 resolve_dependencies (x);
4281 }
4282 }
4283 }
4284
4285 /* Debug info. */
4286 if (sched_verbose)
4287 {
4288 fprintf (sched_dump, ";;\tReady list (final): ");
4289 debug_ready_list (&ready);
4290 }
4291
4292 if (modulo_ii == 0 && current_sched_info->queue_must_finish_empty)
4293 /* Sanity check -- queue must be empty now. Meaningless if region has
4294 multiple bbs. */
4295 gcc_assert (!q_size && !ready.n_ready && !ready.n_debug);
4296 else if (modulo_ii == 0)
4297 {
4298 /* We must maintain QUEUE_INDEX between blocks in region. */
4299 for (i = ready.n_ready - 1; i >= 0; i--)
4300 {
4301 rtx x;
4302
4303 x = ready_element (&ready, i);
4304 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4305 TODO_SPEC (x) = HARD_DEP;
4306 }
4307
4308 if (q_size)
4309 for (i = 0; i <= max_insn_queue_index; i++)
4310 {
4311 rtx link;
4312 for (link = insn_queue[i]; link; link = XEXP (link, 1))
4313 {
4314 rtx x;
4315
4316 x = XEXP (link, 0);
4317 QUEUE_INDEX (x) = QUEUE_NOWHERE;
4318 TODO_SPEC (x) = HARD_DEP;
4319 }
4320 free_INSN_LIST_list (&insn_queue[i]);
4321 }
4322 }
4323
4324 if (success)
4325 {
4326 commit_schedule (prev_head, tail, target_bb);
4327 if (sched_verbose)
4328 fprintf (sched_dump, ";; total time = %d\n", clock_var);
4329 }
4330 else
4331 last_scheduled_insn = tail;
4332
4333 VEC_truncate (rtx, scheduled_insns, 0);
4334
4335 if (!current_sched_info->queue_must_finish_empty
4336 || haifa_recovery_bb_recently_added_p)
4337 {
4338 /* INSN_TICK (minimum clock tick at which the insn becomes
4339 ready) may be not correct for the insn in the subsequent
4340 blocks of the region. We should use a correct value of
4341 `clock_var' or modify INSN_TICK. It is better to keep
4342 clock_var value equal to 0 at the start of a basic block.
4343 Therefore we modify INSN_TICK here. */
4344 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
4345 }
4346
4347 if (targetm.sched.finish)
4348 {
4349 targetm.sched.finish (sched_dump, sched_verbose);
4350 /* Target might have added some instructions to the scheduled block
4351 in its md_finish () hook. These new insns don't have any data
4352 initialized and to identify them we extend h_i_d so that they'll
4353 get zero luids. */
4354 sched_extend_luids ();
4355 }
4356
4357 if (sched_verbose)
4358 fprintf (sched_dump, ";; new head = %d\n;; new tail = %d\n\n",
4359 INSN_UID (head), INSN_UID (tail));
4360
4361 /* Update head/tail boundaries. */
4362 head = NEXT_INSN (prev_head);
4363 tail = last_scheduled_insn;
4364
4365 head = restore_other_notes (head, NULL);
4366
4367 current_sched_info->head = head;
4368 current_sched_info->tail = tail;
4369
4370 free_backtrack_queue ();
4371
4372 return success;
4373 }
4374 \f
4375 /* Set_priorities: compute priority of each insn in the block. */
4376
4377 int
4378 set_priorities (rtx head, rtx tail)
4379 {
4380 rtx insn;
4381 int n_insn;
4382 int sched_max_insns_priority =
4383 current_sched_info->sched_max_insns_priority;
4384 rtx prev_head;
4385
4386 if (head == tail && ! INSN_P (head))
4387 gcc_unreachable ();
4388
4389 n_insn = 0;
4390
4391 prev_head = PREV_INSN (head);
4392 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
4393 {
4394 if (!INSN_P (insn))
4395 continue;
4396
4397 n_insn++;
4398 (void) priority (insn);
4399
4400 gcc_assert (INSN_PRIORITY_KNOWN (insn));
4401
4402 sched_max_insns_priority = MAX (sched_max_insns_priority,
4403 INSN_PRIORITY (insn));
4404 }
4405
4406 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
4407
4408 return n_insn;
4409 }
4410
4411 /* Set dump and sched_verbose for the desired debugging output. If no
4412 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
4413 For -fsched-verbose=N, N>=10, print everything to stderr. */
4414 void
4415 setup_sched_dump (void)
4416 {
4417 sched_verbose = sched_verbose_param;
4418 if (sched_verbose_param == 0 && dump_file)
4419 sched_verbose = 1;
4420 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
4421 ? stderr : dump_file);
4422 }
4423
4424 /* Initialize some global state for the scheduler. This function works
4425 with the common data shared between all the schedulers. It is called
4426 from the scheduler specific initialization routine. */
4427
4428 void
4429 sched_init (void)
4430 {
4431 /* Disable speculative loads in their presence if cc0 defined. */
4432 #ifdef HAVE_cc0
4433 flag_schedule_speculative_load = 0;
4434 #endif
4435
4436 if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
4437 targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
4438
4439 sched_pressure_p = (flag_sched_pressure && ! reload_completed
4440 && common_sched_info->sched_pass_id == SCHED_RGN_PASS);
4441
4442 if (sched_pressure_p)
4443 ira_setup_eliminable_regset ();
4444
4445 /* Initialize SPEC_INFO. */
4446 if (targetm.sched.set_sched_flags)
4447 {
4448 spec_info = &spec_info_var;
4449 targetm.sched.set_sched_flags (spec_info);
4450
4451 if (spec_info->mask != 0)
4452 {
4453 spec_info->data_weakness_cutoff =
4454 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
4455 spec_info->control_weakness_cutoff =
4456 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF)
4457 * REG_BR_PROB_BASE) / 100;
4458 }
4459 else
4460 /* So we won't read anything accidentally. */
4461 spec_info = NULL;
4462
4463 }
4464 else
4465 /* So we won't read anything accidentally. */
4466 spec_info = 0;
4467
4468 /* Initialize issue_rate. */
4469 if (targetm.sched.issue_rate)
4470 issue_rate = targetm.sched.issue_rate ();
4471 else
4472 issue_rate = 1;
4473
4474 if (cached_issue_rate != issue_rate)
4475 {
4476 cached_issue_rate = issue_rate;
4477 /* To invalidate max_lookahead_tries: */
4478 cached_first_cycle_multipass_dfa_lookahead = 0;
4479 }
4480
4481 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
4482 dfa_lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
4483 else
4484 dfa_lookahead = 0;
4485
4486 if (targetm.sched.init_dfa_pre_cycle_insn)
4487 targetm.sched.init_dfa_pre_cycle_insn ();
4488
4489 if (targetm.sched.init_dfa_post_cycle_insn)
4490 targetm.sched.init_dfa_post_cycle_insn ();
4491
4492 dfa_start ();
4493 dfa_state_size = state_size ();
4494
4495 init_alias_analysis ();
4496
4497 if (!sched_no_dce)
4498 df_set_flags (DF_LR_RUN_DCE);
4499 df_note_add_problem ();
4500
4501 /* More problems needed for interloop dep calculation in SMS. */
4502 if (common_sched_info->sched_pass_id == SCHED_SMS_PASS)
4503 {
4504 df_rd_add_problem ();
4505 df_chain_add_problem (DF_DU_CHAIN + DF_UD_CHAIN);
4506 }
4507
4508 df_analyze ();
4509
4510 /* Do not run DCE after reload, as this can kill nops inserted
4511 by bundling. */
4512 if (reload_completed)
4513 df_clear_flags (DF_LR_RUN_DCE);
4514
4515 regstat_compute_calls_crossed ();
4516
4517 if (targetm.sched.init_global)
4518 targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
4519
4520 if (sched_pressure_p)
4521 {
4522 int i, max_regno = max_reg_num ();
4523
4524 ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
4525 sched_regno_pressure_class
4526 = (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
4527 for (i = 0; i < max_regno; i++)
4528 sched_regno_pressure_class[i]
4529 = (i < FIRST_PSEUDO_REGISTER
4530 ? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
4531 : ira_pressure_class_translate[reg_allocno_class (i)]);
4532 curr_reg_live = BITMAP_ALLOC (NULL);
4533 saved_reg_live = BITMAP_ALLOC (NULL);
4534 region_ref_regs = BITMAP_ALLOC (NULL);
4535 }
4536
4537 curr_state = xmalloc (dfa_state_size);
4538 }
4539
4540 static void haifa_init_only_bb (basic_block, basic_block);
4541
4542 /* Initialize data structures specific to the Haifa scheduler. */
4543 void
4544 haifa_sched_init (void)
4545 {
4546 setup_sched_dump ();
4547 sched_init ();
4548
4549 scheduled_insns = VEC_alloc (rtx, heap, 0);
4550
4551 if (spec_info != NULL)
4552 {
4553 sched_deps_info->use_deps_list = 1;
4554 sched_deps_info->generate_spec_deps = 1;
4555 }
4556
4557 /* Initialize luids, dependency caches, target and h_i_d for the
4558 whole function. */
4559 {
4560 bb_vec_t bbs = VEC_alloc (basic_block, heap, n_basic_blocks);
4561 basic_block bb;
4562
4563 sched_init_bbs ();
4564
4565 FOR_EACH_BB (bb)
4566 VEC_quick_push (basic_block, bbs, bb);
4567 sched_init_luids (bbs);
4568 sched_deps_init (true);
4569 sched_extend_target ();
4570 haifa_init_h_i_d (bbs);
4571
4572 VEC_free (basic_block, heap, bbs);
4573 }
4574
4575 sched_init_only_bb = haifa_init_only_bb;
4576 sched_split_block = sched_split_block_1;
4577 sched_create_empty_bb = sched_create_empty_bb_1;
4578 haifa_recovery_bb_ever_added_p = false;
4579
4580 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
4581 before_recovery = 0;
4582 after_recovery = 0;
4583
4584 modulo_ii = 0;
4585 }
4586
4587 /* Finish work with the data specific to the Haifa scheduler. */
4588 void
4589 haifa_sched_finish (void)
4590 {
4591 sched_create_empty_bb = NULL;
4592 sched_split_block = NULL;
4593 sched_init_only_bb = NULL;
4594
4595 if (spec_info && spec_info->dump)
4596 {
4597 char c = reload_completed ? 'a' : 'b';
4598
4599 fprintf (spec_info->dump,
4600 ";; %s:\n", current_function_name ());
4601
4602 fprintf (spec_info->dump,
4603 ";; Procedure %cr-begin-data-spec motions == %d\n",
4604 c, nr_begin_data);
4605 fprintf (spec_info->dump,
4606 ";; Procedure %cr-be-in-data-spec motions == %d\n",
4607 c, nr_be_in_data);
4608 fprintf (spec_info->dump,
4609 ";; Procedure %cr-begin-control-spec motions == %d\n",
4610 c, nr_begin_control);
4611 fprintf (spec_info->dump,
4612 ";; Procedure %cr-be-in-control-spec motions == %d\n",
4613 c, nr_be_in_control);
4614 }
4615
4616 VEC_free (rtx, heap, scheduled_insns);
4617
4618 /* Finalize h_i_d, dependency caches, and luids for the whole
4619 function. Target will be finalized in md_global_finish (). */
4620 sched_deps_finish ();
4621 sched_finish_luids ();
4622 current_sched_info = NULL;
4623 sched_finish ();
4624 }
4625
4626 /* Free global data used during insn scheduling. This function works with
4627 the common data shared between the schedulers. */
4628
4629 void
4630 sched_finish (void)
4631 {
4632 haifa_finish_h_i_d ();
4633 if (sched_pressure_p)
4634 {
4635 free (sched_regno_pressure_class);
4636 BITMAP_FREE (region_ref_regs);
4637 BITMAP_FREE (saved_reg_live);
4638 BITMAP_FREE (curr_reg_live);
4639 }
4640 free (curr_state);
4641
4642 if (targetm.sched.finish_global)
4643 targetm.sched.finish_global (sched_dump, sched_verbose);
4644
4645 end_alias_analysis ();
4646
4647 regstat_free_calls_crossed ();
4648
4649 dfa_finish ();
4650 }
4651
4652 /* Free all delay_pair structures that were recorded. */
4653 void
4654 free_delay_pairs (void)
4655 {
4656 if (delay_htab)
4657 {
4658 htab_empty (delay_htab);
4659 htab_empty (delay_htab_i2);
4660 }
4661 }
4662
4663 /* Fix INSN_TICKs of the instructions in the current block as well as
4664 INSN_TICKs of their dependents.
4665 HEAD and TAIL are the begin and the end of the current scheduled block. */
4666 static void
4667 fix_inter_tick (rtx head, rtx tail)
4668 {
4669 /* Set of instructions with corrected INSN_TICK. */
4670 bitmap_head processed;
4671 /* ??? It is doubtful if we should assume that cycle advance happens on
4672 basic block boundaries. Basically insns that are unconditionally ready
4673 on the start of the block are more preferable then those which have
4674 a one cycle dependency over insn from the previous block. */
4675 int next_clock = clock_var + 1;
4676
4677 bitmap_initialize (&processed, 0);
4678
4679 /* Iterates over scheduled instructions and fix their INSN_TICKs and
4680 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
4681 across different blocks. */
4682 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
4683 {
4684 if (INSN_P (head))
4685 {
4686 int tick;
4687 sd_iterator_def sd_it;
4688 dep_t dep;
4689
4690 tick = INSN_TICK (head);
4691 gcc_assert (tick >= MIN_TICK);
4692
4693 /* Fix INSN_TICK of instruction from just scheduled block. */
4694 if (bitmap_set_bit (&processed, INSN_LUID (head)))
4695 {
4696 tick -= next_clock;
4697
4698 if (tick < MIN_TICK)
4699 tick = MIN_TICK;
4700
4701 INSN_TICK (head) = tick;
4702 }
4703
4704 FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
4705 {
4706 rtx next;
4707
4708 next = DEP_CON (dep);
4709 tick = INSN_TICK (next);
4710
4711 if (tick != INVALID_TICK
4712 /* If NEXT has its INSN_TICK calculated, fix it.
4713 If not - it will be properly calculated from
4714 scratch later in fix_tick_ready. */
4715 && bitmap_set_bit (&processed, INSN_LUID (next)))
4716 {
4717 tick -= next_clock;
4718
4719 if (tick < MIN_TICK)
4720 tick = MIN_TICK;
4721
4722 if (tick > INTER_TICK (next))
4723 INTER_TICK (next) = tick;
4724 else
4725 tick = INTER_TICK (next);
4726
4727 INSN_TICK (next) = tick;
4728 }
4729 }
4730 }
4731 }
4732 bitmap_clear (&processed);
4733 }
4734
4735 static int haifa_speculate_insn (rtx, ds_t, rtx *);
4736
4737 /* Check if NEXT is ready to be added to the ready or queue list.
4738 If "yes", add it to the proper list.
4739 Returns:
4740 -1 - is not ready yet,
4741 0 - added to the ready list,
4742 0 < N - queued for N cycles. */
4743 int
4744 try_ready (rtx next)
4745 {
4746 ds_t old_ts, new_ts;
4747
4748 old_ts = TODO_SPEC (next);
4749
4750 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
4751 && ((old_ts & HARD_DEP)
4752 || (old_ts & SPECULATIVE)));
4753
4754 if (sd_lists_empty_p (next, SD_LIST_BACK))
4755 /* NEXT has all its dependencies resolved. */
4756 new_ts = 0;
4757 else
4758 {
4759 /* One of the NEXT's dependencies has been resolved.
4760 Recalculate NEXT's status. */
4761
4762 if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
4763 new_ts = HARD_DEP;
4764 else
4765 /* Now we've got NEXT with speculative deps only.
4766 1. Look at the deps to see what we have to do.
4767 2. Check if we can do 'todo'. */
4768 {
4769 sd_iterator_def sd_it;
4770 dep_t dep;
4771 bool first_p = true;
4772
4773 new_ts = 0;
4774
4775 FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
4776 {
4777 ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
4778
4779 if (DEBUG_INSN_P (DEP_PRO (dep))
4780 && !DEBUG_INSN_P (next))
4781 continue;
4782
4783 if (first_p)
4784 {
4785 first_p = false;
4786
4787 new_ts = ds;
4788 }
4789 else
4790 new_ts = ds_merge (new_ts, ds);
4791 }
4792
4793 if (ds_weak (new_ts) < spec_info->data_weakness_cutoff)
4794 /* Too few points. */
4795 new_ts = HARD_DEP;
4796 }
4797 }
4798
4799 if (new_ts & HARD_DEP)
4800 gcc_assert (new_ts == HARD_DEP && new_ts == old_ts
4801 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
4802 else if (current_sched_info->new_ready)
4803 new_ts = current_sched_info->new_ready (next, new_ts);
4804
4805 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
4806 have its original pattern or changed (speculative) one. This is due
4807 to changing ebb in region scheduling.
4808 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
4809 has speculative pattern.
4810
4811 We can't assert (!(new_ts & HARD_DEP) || new_ts == old_ts) here because
4812 control-speculative NEXT could have been discarded by sched-rgn.c
4813 (the same case as when discarded by can_schedule_ready_p ()). */
4814
4815 if ((new_ts & SPECULATIVE)
4816 /* If (old_ts == new_ts), then (old_ts & SPECULATIVE) and we don't
4817 need to change anything. */
4818 && new_ts != old_ts)
4819 {
4820 int res;
4821 rtx new_pat;
4822
4823 gcc_assert (!(new_ts & ~SPECULATIVE));
4824
4825 res = haifa_speculate_insn (next, new_ts, &new_pat);
4826
4827 switch (res)
4828 {
4829 case -1:
4830 /* It would be nice to change DEP_STATUS of all dependences,
4831 which have ((DEP_STATUS & SPECULATIVE) == new_ts) to HARD_DEP,
4832 so we won't reanalyze anything. */
4833 new_ts = HARD_DEP;
4834 break;
4835
4836 case 0:
4837 /* We follow the rule, that every speculative insn
4838 has non-null ORIG_PAT. */
4839 if (!ORIG_PAT (next))
4840 ORIG_PAT (next) = PATTERN (next);
4841 break;
4842
4843 case 1:
4844 if (!ORIG_PAT (next))
4845 /* If we gonna to overwrite the original pattern of insn,
4846 save it. */
4847 ORIG_PAT (next) = PATTERN (next);
4848
4849 haifa_change_pattern (next, new_pat);
4850 break;
4851
4852 default:
4853 gcc_unreachable ();
4854 }
4855 }
4856
4857 /* We need to restore pattern only if (new_ts == 0), because otherwise it is
4858 either correct (new_ts & SPECULATIVE),
4859 or we simply don't care (new_ts & HARD_DEP). */
4860
4861 gcc_assert (!ORIG_PAT (next)
4862 || !IS_SPECULATION_BRANCHY_CHECK_P (next));
4863
4864 TODO_SPEC (next) = new_ts;
4865
4866 if (new_ts & HARD_DEP)
4867 {
4868 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
4869 control-speculative NEXT could have been discarded by sched-rgn.c
4870 (the same case as when discarded by can_schedule_ready_p ()). */
4871 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
4872
4873 change_queue_index (next, QUEUE_NOWHERE);
4874 return -1;
4875 }
4876 else if (!(new_ts & BEGIN_SPEC)
4877 && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
4878 /* We should change pattern of every previously speculative
4879 instruction - and we determine if NEXT was speculative by using
4880 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
4881 pat too, so skip them. */
4882 {
4883 haifa_change_pattern (next, ORIG_PAT (next));
4884 ORIG_PAT (next) = 0;
4885 }
4886
4887 if (sched_verbose >= 2)
4888 {
4889 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
4890 (*current_sched_info->print_insn) (next, 0));
4891
4892 if (spec_info && spec_info->dump)
4893 {
4894 if (new_ts & BEGIN_DATA)
4895 fprintf (spec_info->dump, "; data-spec;");
4896 if (new_ts & BEGIN_CONTROL)
4897 fprintf (spec_info->dump, "; control-spec;");
4898 if (new_ts & BE_IN_CONTROL)
4899 fprintf (spec_info->dump, "; in-control-spec;");
4900 }
4901
4902 fprintf (sched_dump, "\n");
4903 }
4904
4905 adjust_priority (next);
4906
4907 return fix_tick_ready (next);
4908 }
4909
4910 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
4911 static int
4912 fix_tick_ready (rtx next)
4913 {
4914 int tick, delay;
4915
4916 if (!DEBUG_INSN_P (next) && !sd_lists_empty_p (next, SD_LIST_RES_BACK))
4917 {
4918 int full_p;
4919 sd_iterator_def sd_it;
4920 dep_t dep;
4921
4922 tick = INSN_TICK (next);
4923 /* if tick is not equal to INVALID_TICK, then update
4924 INSN_TICK of NEXT with the most recent resolved dependence
4925 cost. Otherwise, recalculate from scratch. */
4926 full_p = (tick == INVALID_TICK);
4927
4928 FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
4929 {
4930 rtx pro = DEP_PRO (dep);
4931 int tick1;
4932
4933 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
4934
4935 tick1 = INSN_TICK (pro) + dep_cost (dep);
4936 if (tick1 > tick)
4937 tick = tick1;
4938
4939 if (!full_p)
4940 break;
4941 }
4942 }
4943 else
4944 tick = -1;
4945
4946 INSN_TICK (next) = tick;
4947
4948 delay = tick - clock_var;
4949 if (delay <= 0 || sched_pressure_p)
4950 delay = QUEUE_READY;
4951
4952 change_queue_index (next, delay);
4953
4954 return delay;
4955 }
4956
4957 /* Move NEXT to the proper queue list with (DELAY >= 1),
4958 or add it to the ready list (DELAY == QUEUE_READY),
4959 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
4960 static void
4961 change_queue_index (rtx next, int delay)
4962 {
4963 int i = QUEUE_INDEX (next);
4964
4965 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
4966 && delay != 0);
4967 gcc_assert (i != QUEUE_SCHEDULED);
4968
4969 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
4970 || (delay < 0 && delay == i))
4971 /* We have nothing to do. */
4972 return;
4973
4974 /* Remove NEXT from wherever it is now. */
4975 if (i == QUEUE_READY)
4976 ready_remove_insn (next);
4977 else if (i >= 0)
4978 queue_remove (next);
4979
4980 /* Add it to the proper place. */
4981 if (delay == QUEUE_READY)
4982 ready_add (readyp, next, false);
4983 else if (delay >= 1)
4984 queue_insn (next, delay, "change queue index");
4985
4986 if (sched_verbose >= 2)
4987 {
4988 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
4989 (*current_sched_info->print_insn) (next, 0));
4990
4991 if (delay == QUEUE_READY)
4992 fprintf (sched_dump, " into ready\n");
4993 else if (delay >= 1)
4994 fprintf (sched_dump, " into queue with cost=%d\n", delay);
4995 else
4996 fprintf (sched_dump, " removed from ready or queue lists\n");
4997 }
4998 }
4999
5000 static int sched_ready_n_insns = -1;
5001
5002 /* Initialize per region data structures. */
5003 void
5004 sched_extend_ready_list (int new_sched_ready_n_insns)
5005 {
5006 int i;
5007
5008 if (sched_ready_n_insns == -1)
5009 /* At the first call we need to initialize one more choice_stack
5010 entry. */
5011 {
5012 i = 0;
5013 sched_ready_n_insns = 0;
5014 VEC_reserve (rtx, heap, scheduled_insns, new_sched_ready_n_insns);
5015 }
5016 else
5017 i = sched_ready_n_insns + 1;
5018
5019 ready.veclen = new_sched_ready_n_insns + issue_rate;
5020 ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
5021
5022 gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
5023
5024 ready_try = (char *) xrecalloc (ready_try, new_sched_ready_n_insns,
5025 sched_ready_n_insns, sizeof (*ready_try));
5026
5027 /* We allocate +1 element to save initial state in the choice_stack[0]
5028 entry. */
5029 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
5030 new_sched_ready_n_insns + 1);
5031
5032 for (; i <= new_sched_ready_n_insns; i++)
5033 {
5034 choice_stack[i].state = xmalloc (dfa_state_size);
5035
5036 if (targetm.sched.first_cycle_multipass_init)
5037 targetm.sched.first_cycle_multipass_init (&(choice_stack[i]
5038 .target_data));
5039 }
5040
5041 sched_ready_n_insns = new_sched_ready_n_insns;
5042 }
5043
5044 /* Free per region data structures. */
5045 void
5046 sched_finish_ready_list (void)
5047 {
5048 int i;
5049
5050 free (ready.vec);
5051 ready.vec = NULL;
5052 ready.veclen = 0;
5053
5054 free (ready_try);
5055 ready_try = NULL;
5056
5057 for (i = 0; i <= sched_ready_n_insns; i++)
5058 {
5059 if (targetm.sched.first_cycle_multipass_fini)
5060 targetm.sched.first_cycle_multipass_fini (&(choice_stack[i]
5061 .target_data));
5062
5063 free (choice_stack [i].state);
5064 }
5065 free (choice_stack);
5066 choice_stack = NULL;
5067
5068 sched_ready_n_insns = -1;
5069 }
5070
5071 static int
5072 haifa_luid_for_non_insn (rtx x)
5073 {
5074 gcc_assert (NOTE_P (x) || LABEL_P (x));
5075
5076 return 0;
5077 }
5078
5079 /* Generates recovery code for INSN. */
5080 static void
5081 generate_recovery_code (rtx insn)
5082 {
5083 if (TODO_SPEC (insn) & BEGIN_SPEC)
5084 begin_speculative_block (insn);
5085
5086 /* Here we have insn with no dependencies to
5087 instructions other then CHECK_SPEC ones. */
5088
5089 if (TODO_SPEC (insn) & BE_IN_SPEC)
5090 add_to_speculative_block (insn);
5091 }
5092
5093 /* Helper function.
5094 Tries to add speculative dependencies of type FS between instructions
5095 in deps_list L and TWIN. */
5096 static void
5097 process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs)
5098 {
5099 sd_iterator_def sd_it;
5100 dep_t dep;
5101
5102 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
5103 {
5104 ds_t ds;
5105 rtx consumer;
5106
5107 consumer = DEP_CON (dep);
5108
5109 ds = DEP_STATUS (dep);
5110
5111 if (/* If we want to create speculative dep. */
5112 fs
5113 /* And we can do that because this is a true dep. */
5114 && (ds & DEP_TYPES) == DEP_TRUE)
5115 {
5116 gcc_assert (!(ds & BE_IN_SPEC));
5117
5118 if (/* If this dep can be overcome with 'begin speculation'. */
5119 ds & BEGIN_SPEC)
5120 /* Then we have a choice: keep the dep 'begin speculative'
5121 or transform it into 'be in speculative'. */
5122 {
5123 if (/* In try_ready we assert that if insn once became ready
5124 it can be removed from the ready (or queue) list only
5125 due to backend decision. Hence we can't let the
5126 probability of the speculative dep to decrease. */
5127 ds_weak (ds) <= ds_weak (fs))
5128 {
5129 ds_t new_ds;
5130
5131 new_ds = (ds & ~BEGIN_SPEC) | fs;
5132
5133 if (/* consumer can 'be in speculative'. */
5134 sched_insn_is_legitimate_for_speculation_p (consumer,
5135 new_ds))
5136 /* Transform it to be in speculative. */
5137 ds = new_ds;
5138 }
5139 }
5140 else
5141 /* Mark the dep as 'be in speculative'. */
5142 ds |= fs;
5143 }
5144
5145 {
5146 dep_def _new_dep, *new_dep = &_new_dep;
5147
5148 init_dep_1 (new_dep, twin, consumer, DEP_TYPE (dep), ds);
5149 sd_add_dep (new_dep, false);
5150 }
5151 }
5152 }
5153
5154 /* Generates recovery code for BEGIN speculative INSN. */
5155 static void
5156 begin_speculative_block (rtx insn)
5157 {
5158 if (TODO_SPEC (insn) & BEGIN_DATA)
5159 nr_begin_data++;
5160 if (TODO_SPEC (insn) & BEGIN_CONTROL)
5161 nr_begin_control++;
5162
5163 create_check_block_twin (insn, false);
5164
5165 TODO_SPEC (insn) &= ~BEGIN_SPEC;
5166 }
5167
5168 static void haifa_init_insn (rtx);
5169
5170 /* Generates recovery code for BE_IN speculative INSN. */
5171 static void
5172 add_to_speculative_block (rtx insn)
5173 {
5174 ds_t ts;
5175 sd_iterator_def sd_it;
5176 dep_t dep;
5177 rtx twins = NULL;
5178 rtx_vec_t priorities_roots;
5179
5180 ts = TODO_SPEC (insn);
5181 gcc_assert (!(ts & ~BE_IN_SPEC));
5182
5183 if (ts & BE_IN_DATA)
5184 nr_be_in_data++;
5185 if (ts & BE_IN_CONTROL)
5186 nr_be_in_control++;
5187
5188 TODO_SPEC (insn) &= ~BE_IN_SPEC;
5189 gcc_assert (!TODO_SPEC (insn));
5190
5191 DONE_SPEC (insn) |= ts;
5192
5193 /* First we convert all simple checks to branchy. */
5194 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5195 sd_iterator_cond (&sd_it, &dep);)
5196 {
5197 rtx check = DEP_PRO (dep);
5198
5199 if (IS_SPECULATION_SIMPLE_CHECK_P (check))
5200 {
5201 create_check_block_twin (check, true);
5202
5203 /* Restart search. */
5204 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5205 }
5206 else
5207 /* Continue search. */
5208 sd_iterator_next (&sd_it);
5209 }
5210
5211 priorities_roots = NULL;
5212 clear_priorities (insn, &priorities_roots);
5213
5214 while (1)
5215 {
5216 rtx check, twin;
5217 basic_block rec;
5218
5219 /* Get the first backward dependency of INSN. */
5220 sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5221 if (!sd_iterator_cond (&sd_it, &dep))
5222 /* INSN has no backward dependencies left. */
5223 break;
5224
5225 gcc_assert ((DEP_STATUS (dep) & BEGIN_SPEC) == 0
5226 && (DEP_STATUS (dep) & BE_IN_SPEC) != 0
5227 && (DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
5228
5229 check = DEP_PRO (dep);
5230
5231 gcc_assert (!IS_SPECULATION_CHECK_P (check) && !ORIG_PAT (check)
5232 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
5233
5234 rec = BLOCK_FOR_INSN (check);
5235
5236 twin = emit_insn_before (copy_insn (PATTERN (insn)), BB_END (rec));
5237 haifa_init_insn (twin);
5238
5239 sd_copy_back_deps (twin, insn, true);
5240
5241 if (sched_verbose && spec_info->dump)
5242 /* INSN_BB (insn) isn't determined for twin insns yet.
5243 So we can't use current_sched_info->print_insn. */
5244 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
5245 INSN_UID (twin), rec->index);
5246
5247 twins = alloc_INSN_LIST (twin, twins);
5248
5249 /* Add dependences between TWIN and all appropriate
5250 instructions from REC. */
5251 FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
5252 {
5253 rtx pro = DEP_PRO (dep);
5254
5255 gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
5256
5257 /* INSN might have dependencies from the instructions from
5258 several recovery blocks. At this iteration we process those
5259 producers that reside in REC. */
5260 if (BLOCK_FOR_INSN (pro) == rec)
5261 {
5262 dep_def _new_dep, *new_dep = &_new_dep;
5263
5264 init_dep (new_dep, pro, twin, REG_DEP_TRUE);
5265 sd_add_dep (new_dep, false);
5266 }
5267 }
5268
5269 process_insn_forw_deps_be_in_spec (insn, twin, ts);
5270
5271 /* Remove all dependencies between INSN and insns in REC. */
5272 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5273 sd_iterator_cond (&sd_it, &dep);)
5274 {
5275 rtx pro = DEP_PRO (dep);
5276
5277 if (BLOCK_FOR_INSN (pro) == rec)
5278 sd_delete_dep (sd_it);
5279 else
5280 sd_iterator_next (&sd_it);
5281 }
5282 }
5283
5284 /* We couldn't have added the dependencies between INSN and TWINS earlier
5285 because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
5286 while (twins)
5287 {
5288 rtx twin;
5289
5290 twin = XEXP (twins, 0);
5291
5292 {
5293 dep_def _new_dep, *new_dep = &_new_dep;
5294
5295 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
5296 sd_add_dep (new_dep, false);
5297 }
5298
5299 twin = XEXP (twins, 1);
5300 free_INSN_LIST_node (twins);
5301 twins = twin;
5302 }
5303
5304 calc_priorities (priorities_roots);
5305 VEC_free (rtx, heap, priorities_roots);
5306 }
5307
5308 /* Extends and fills with zeros (only the new part) array pointed to by P. */
5309 void *
5310 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
5311 {
5312 gcc_assert (new_nmemb >= old_nmemb);
5313 p = XRESIZEVAR (void, p, new_nmemb * size);
5314 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
5315 return p;
5316 }
5317
5318 /* Helper function.
5319 Find fallthru edge from PRED. */
5320 edge
5321 find_fallthru_edge_from (basic_block pred)
5322 {
5323 edge e;
5324 basic_block succ;
5325
5326 succ = pred->next_bb;
5327 gcc_assert (succ->prev_bb == pred);
5328
5329 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
5330 {
5331 e = find_fallthru_edge (pred->succs);
5332
5333 if (e)
5334 {
5335 gcc_assert (e->dest == succ);
5336 return e;
5337 }
5338 }
5339 else
5340 {
5341 e = find_fallthru_edge (succ->preds);
5342
5343 if (e)
5344 {
5345 gcc_assert (e->src == pred);
5346 return e;
5347 }
5348 }
5349
5350 return NULL;
5351 }
5352
5353 /* Extend per basic block data structures. */
5354 static void
5355 sched_extend_bb (void)
5356 {
5357 rtx insn;
5358
5359 /* The following is done to keep current_sched_info->next_tail non null. */
5360 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
5361 if (NEXT_INSN (insn) == 0
5362 || (!NOTE_P (insn)
5363 && !LABEL_P (insn)
5364 /* Don't emit a NOTE if it would end up before a BARRIER. */
5365 && !BARRIER_P (NEXT_INSN (insn))))
5366 {
5367 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
5368 /* Make insn appear outside BB. */
5369 set_block_for_insn (note, NULL);
5370 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
5371 }
5372 }
5373
5374 /* Init per basic block data structures. */
5375 void
5376 sched_init_bbs (void)
5377 {
5378 sched_extend_bb ();
5379 }
5380
5381 /* Initialize BEFORE_RECOVERY variable. */
5382 static void
5383 init_before_recovery (basic_block *before_recovery_ptr)
5384 {
5385 basic_block last;
5386 edge e;
5387
5388 last = EXIT_BLOCK_PTR->prev_bb;
5389 e = find_fallthru_edge_from (last);
5390
5391 if (e)
5392 {
5393 /* We create two basic blocks:
5394 1. Single instruction block is inserted right after E->SRC
5395 and has jump to
5396 2. Empty block right before EXIT_BLOCK.
5397 Between these two blocks recovery blocks will be emitted. */
5398
5399 basic_block single, empty;
5400 rtx x, label;
5401
5402 /* If the fallthrough edge to exit we've found is from the block we've
5403 created before, don't do anything more. */
5404 if (last == after_recovery)
5405 return;
5406
5407 adding_bb_to_current_region_p = false;
5408
5409 single = sched_create_empty_bb (last);
5410 empty = sched_create_empty_bb (single);
5411
5412 /* Add new blocks to the root loop. */
5413 if (current_loops != NULL)
5414 {
5415 add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
5416 add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
5417 }
5418
5419 single->count = last->count;
5420 empty->count = last->count;
5421 single->frequency = last->frequency;
5422 empty->frequency = last->frequency;
5423 BB_COPY_PARTITION (single, last);
5424 BB_COPY_PARTITION (empty, last);
5425
5426 redirect_edge_succ (e, single);
5427 make_single_succ_edge (single, empty, 0);
5428 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
5429 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
5430
5431 label = block_label (empty);
5432 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
5433 JUMP_LABEL (x) = label;
5434 LABEL_NUSES (label)++;
5435 haifa_init_insn (x);
5436
5437 emit_barrier_after (x);
5438
5439 sched_init_only_bb (empty, NULL);
5440 sched_init_only_bb (single, NULL);
5441 sched_extend_bb ();
5442
5443 adding_bb_to_current_region_p = true;
5444 before_recovery = single;
5445 after_recovery = empty;
5446
5447 if (before_recovery_ptr)
5448 *before_recovery_ptr = before_recovery;
5449
5450 if (sched_verbose >= 2 && spec_info->dump)
5451 fprintf (spec_info->dump,
5452 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
5453 last->index, single->index, empty->index);
5454 }
5455 else
5456 before_recovery = last;
5457 }
5458
5459 /* Returns new recovery block. */
5460 basic_block
5461 sched_create_recovery_block (basic_block *before_recovery_ptr)
5462 {
5463 rtx label;
5464 rtx barrier;
5465 basic_block rec;
5466
5467 haifa_recovery_bb_recently_added_p = true;
5468 haifa_recovery_bb_ever_added_p = true;
5469
5470 init_before_recovery (before_recovery_ptr);
5471
5472 barrier = get_last_bb_insn (before_recovery);
5473 gcc_assert (BARRIER_P (barrier));
5474
5475 label = emit_label_after (gen_label_rtx (), barrier);
5476
5477 rec = create_basic_block (label, label, before_recovery);
5478
5479 /* A recovery block always ends with an unconditional jump. */
5480 emit_barrier_after (BB_END (rec));
5481
5482 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
5483 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
5484
5485 if (sched_verbose && spec_info->dump)
5486 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
5487 rec->index);
5488
5489 return rec;
5490 }
5491
5492 /* Create edges: FIRST_BB -> REC; FIRST_BB -> SECOND_BB; REC -> SECOND_BB
5493 and emit necessary jumps. */
5494 void
5495 sched_create_recovery_edges (basic_block first_bb, basic_block rec,
5496 basic_block second_bb)
5497 {
5498 rtx label;
5499 rtx jump;
5500 int edge_flags;
5501
5502 /* This is fixing of incoming edge. */
5503 /* ??? Which other flags should be specified? */
5504 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
5505 /* Partition type is the same, if it is "unpartitioned". */
5506 edge_flags = EDGE_CROSSING;
5507 else
5508 edge_flags = 0;
5509
5510 make_edge (first_bb, rec, edge_flags);
5511 label = block_label (second_bb);
5512 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
5513 JUMP_LABEL (jump) = label;
5514 LABEL_NUSES (label)++;
5515
5516 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
5517 /* Partition type is the same, if it is "unpartitioned". */
5518 {
5519 /* Rewritten from cfgrtl.c. */
5520 if (flag_reorder_blocks_and_partition
5521 && targetm_common.have_named_sections)
5522 {
5523 /* We don't need the same note for the check because
5524 any_condjump_p (check) == true. */
5525 add_reg_note (jump, REG_CROSSING_JUMP, NULL_RTX);
5526 }
5527 edge_flags = EDGE_CROSSING;
5528 }
5529 else
5530 edge_flags = 0;
5531
5532 make_single_succ_edge (rec, second_bb, edge_flags);
5533 if (dom_info_available_p (CDI_DOMINATORS))
5534 set_immediate_dominator (CDI_DOMINATORS, rec, first_bb);
5535 }
5536
5537 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
5538 INSN is a simple check, that should be converted to branchy one. */
5539 static void
5540 create_check_block_twin (rtx insn, bool mutate_p)
5541 {
5542 basic_block rec;
5543 rtx label, check, twin;
5544 ds_t fs;
5545 sd_iterator_def sd_it;
5546 dep_t dep;
5547 dep_def _new_dep, *new_dep = &_new_dep;
5548 ds_t todo_spec;
5549
5550 gcc_assert (ORIG_PAT (insn) != NULL_RTX);
5551
5552 if (!mutate_p)
5553 todo_spec = TODO_SPEC (insn);
5554 else
5555 {
5556 gcc_assert (IS_SPECULATION_SIMPLE_CHECK_P (insn)
5557 && (TODO_SPEC (insn) & SPECULATIVE) == 0);
5558
5559 todo_spec = CHECK_SPEC (insn);
5560 }
5561
5562 todo_spec &= SPECULATIVE;
5563
5564 /* Create recovery block. */
5565 if (mutate_p || targetm.sched.needs_block_p (todo_spec))
5566 {
5567 rec = sched_create_recovery_block (NULL);
5568 label = BB_HEAD (rec);
5569 }
5570 else
5571 {
5572 rec = EXIT_BLOCK_PTR;
5573 label = NULL_RTX;
5574 }
5575
5576 /* Emit CHECK. */
5577 check = targetm.sched.gen_spec_check (insn, label, todo_spec);
5578
5579 if (rec != EXIT_BLOCK_PTR)
5580 {
5581 /* To have mem_reg alive at the beginning of second_bb,
5582 we emit check BEFORE insn, so insn after splitting
5583 insn will be at the beginning of second_bb, which will
5584 provide us with the correct life information. */
5585 check = emit_jump_insn_before (check, insn);
5586 JUMP_LABEL (check) = label;
5587 LABEL_NUSES (label)++;
5588 }
5589 else
5590 check = emit_insn_before (check, insn);
5591
5592 /* Extend data structures. */
5593 haifa_init_insn (check);
5594
5595 /* CHECK is being added to current region. Extend ready list. */
5596 gcc_assert (sched_ready_n_insns != -1);
5597 sched_extend_ready_list (sched_ready_n_insns + 1);
5598
5599 if (current_sched_info->add_remove_insn)
5600 current_sched_info->add_remove_insn (insn, 0);
5601
5602 RECOVERY_BLOCK (check) = rec;
5603
5604 if (sched_verbose && spec_info->dump)
5605 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
5606 (*current_sched_info->print_insn) (check, 0));
5607
5608 gcc_assert (ORIG_PAT (insn));
5609
5610 /* Initialize TWIN (twin is a duplicate of original instruction
5611 in the recovery block). */
5612 if (rec != EXIT_BLOCK_PTR)
5613 {
5614 sd_iterator_def sd_it;
5615 dep_t dep;
5616
5617 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
5618 if ((DEP_STATUS (dep) & DEP_OUTPUT) != 0)
5619 {
5620 struct _dep _dep2, *dep2 = &_dep2;
5621
5622 init_dep (dep2, DEP_PRO (dep), check, REG_DEP_TRUE);
5623
5624 sd_add_dep (dep2, true);
5625 }
5626
5627 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
5628 haifa_init_insn (twin);
5629
5630 if (sched_verbose && spec_info->dump)
5631 /* INSN_BB (insn) isn't determined for twin insns yet.
5632 So we can't use current_sched_info->print_insn. */
5633 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
5634 INSN_UID (twin), rec->index);
5635 }
5636 else
5637 {
5638 ORIG_PAT (check) = ORIG_PAT (insn);
5639 HAS_INTERNAL_DEP (check) = 1;
5640 twin = check;
5641 /* ??? We probably should change all OUTPUT dependencies to
5642 (TRUE | OUTPUT). */
5643 }
5644
5645 /* Copy all resolved back dependencies of INSN to TWIN. This will
5646 provide correct value for INSN_TICK (TWIN). */
5647 sd_copy_back_deps (twin, insn, true);
5648
5649 if (rec != EXIT_BLOCK_PTR)
5650 /* In case of branchy check, fix CFG. */
5651 {
5652 basic_block first_bb, second_bb;
5653 rtx jump;
5654
5655 first_bb = BLOCK_FOR_INSN (check);
5656 second_bb = sched_split_block (first_bb, check);
5657
5658 sched_create_recovery_edges (first_bb, rec, second_bb);
5659
5660 sched_init_only_bb (second_bb, first_bb);
5661 sched_init_only_bb (rec, EXIT_BLOCK_PTR);
5662
5663 jump = BB_END (rec);
5664 haifa_init_insn (jump);
5665 }
5666
5667 /* Move backward dependences from INSN to CHECK and
5668 move forward dependences from INSN to TWIN. */
5669
5670 /* First, create dependencies between INSN's producers and CHECK & TWIN. */
5671 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
5672 {
5673 rtx pro = DEP_PRO (dep);
5674 ds_t ds;
5675
5676 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
5677 check --TRUE--> producer ??? or ANTI ???
5678 twin --TRUE--> producer
5679 twin --ANTI--> check
5680
5681 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
5682 check --ANTI--> producer
5683 twin --ANTI--> producer
5684 twin --ANTI--> check
5685
5686 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
5687 check ~~TRUE~~> producer
5688 twin ~~TRUE~~> producer
5689 twin --ANTI--> check */
5690
5691 ds = DEP_STATUS (dep);
5692
5693 if (ds & BEGIN_SPEC)
5694 {
5695 gcc_assert (!mutate_p);
5696 ds &= ~BEGIN_SPEC;
5697 }
5698
5699 init_dep_1 (new_dep, pro, check, DEP_TYPE (dep), ds);
5700 sd_add_dep (new_dep, false);
5701
5702 if (rec != EXIT_BLOCK_PTR)
5703 {
5704 DEP_CON (new_dep) = twin;
5705 sd_add_dep (new_dep, false);
5706 }
5707 }
5708
5709 /* Second, remove backward dependencies of INSN. */
5710 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
5711 sd_iterator_cond (&sd_it, &dep);)
5712 {
5713 if ((DEP_STATUS (dep) & BEGIN_SPEC)
5714 || mutate_p)
5715 /* We can delete this dep because we overcome it with
5716 BEGIN_SPECULATION. */
5717 sd_delete_dep (sd_it);
5718 else
5719 sd_iterator_next (&sd_it);
5720 }
5721
5722 /* Future Speculations. Determine what BE_IN speculations will be like. */
5723 fs = 0;
5724
5725 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
5726 here. */
5727
5728 gcc_assert (!DONE_SPEC (insn));
5729
5730 if (!mutate_p)
5731 {
5732 ds_t ts = TODO_SPEC (insn);
5733
5734 DONE_SPEC (insn) = ts & BEGIN_SPEC;
5735 CHECK_SPEC (check) = ts & BEGIN_SPEC;
5736
5737 /* Luckiness of future speculations solely depends upon initial
5738 BEGIN speculation. */
5739 if (ts & BEGIN_DATA)
5740 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
5741 if (ts & BEGIN_CONTROL)
5742 fs = set_dep_weak (fs, BE_IN_CONTROL,
5743 get_dep_weak (ts, BEGIN_CONTROL));
5744 }
5745 else
5746 CHECK_SPEC (check) = CHECK_SPEC (insn);
5747
5748 /* Future speculations: call the helper. */
5749 process_insn_forw_deps_be_in_spec (insn, twin, fs);
5750
5751 if (rec != EXIT_BLOCK_PTR)
5752 {
5753 /* Which types of dependencies should we use here is,
5754 generally, machine-dependent question... But, for now,
5755 it is not. */
5756
5757 if (!mutate_p)
5758 {
5759 init_dep (new_dep, insn, check, REG_DEP_TRUE);
5760 sd_add_dep (new_dep, false);
5761
5762 init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
5763 sd_add_dep (new_dep, false);
5764 }
5765 else
5766 {
5767 if (spec_info->dump)
5768 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
5769 (*current_sched_info->print_insn) (insn, 0));
5770
5771 /* Remove all dependencies of the INSN. */
5772 {
5773 sd_it = sd_iterator_start (insn, (SD_LIST_FORW
5774 | SD_LIST_BACK
5775 | SD_LIST_RES_BACK));
5776 while (sd_iterator_cond (&sd_it, &dep))
5777 sd_delete_dep (sd_it);
5778 }
5779
5780 /* If former check (INSN) already was moved to the ready (or queue)
5781 list, add new check (CHECK) there too. */
5782 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
5783 try_ready (check);
5784
5785 /* Remove old check from instruction stream and free its
5786 data. */
5787 sched_remove_insn (insn);
5788 }
5789
5790 init_dep (new_dep, check, twin, REG_DEP_ANTI);
5791 sd_add_dep (new_dep, false);
5792 }
5793 else
5794 {
5795 init_dep_1 (new_dep, insn, check, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
5796 sd_add_dep (new_dep, false);
5797 }
5798
5799 if (!mutate_p)
5800 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
5801 because it'll be done later in add_to_speculative_block. */
5802 {
5803 rtx_vec_t priorities_roots = NULL;
5804
5805 clear_priorities (twin, &priorities_roots);
5806 calc_priorities (priorities_roots);
5807 VEC_free (rtx, heap, priorities_roots);
5808 }
5809 }
5810
5811 /* Removes dependency between instructions in the recovery block REC
5812 and usual region instructions. It keeps inner dependences so it
5813 won't be necessary to recompute them. */
5814 static void
5815 fix_recovery_deps (basic_block rec)
5816 {
5817 rtx note, insn, jump, ready_list = 0;
5818 bitmap_head in_ready;
5819 rtx link;
5820
5821 bitmap_initialize (&in_ready, 0);
5822
5823 /* NOTE - a basic block note. */
5824 note = NEXT_INSN (BB_HEAD (rec));
5825 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5826 insn = BB_END (rec);
5827 gcc_assert (JUMP_P (insn));
5828 insn = PREV_INSN (insn);
5829
5830 do
5831 {
5832 sd_iterator_def sd_it;
5833 dep_t dep;
5834
5835 for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
5836 sd_iterator_cond (&sd_it, &dep);)
5837 {
5838 rtx consumer = DEP_CON (dep);
5839
5840 if (BLOCK_FOR_INSN (consumer) != rec)
5841 {
5842 sd_delete_dep (sd_it);
5843
5844 if (bitmap_set_bit (&in_ready, INSN_LUID (consumer)))
5845 ready_list = alloc_INSN_LIST (consumer, ready_list);
5846 }
5847 else
5848 {
5849 gcc_assert ((DEP_STATUS (dep) & DEP_TYPES) == DEP_TRUE);
5850
5851 sd_iterator_next (&sd_it);
5852 }
5853 }
5854
5855 insn = PREV_INSN (insn);
5856 }
5857 while (insn != note);
5858
5859 bitmap_clear (&in_ready);
5860
5861 /* Try to add instructions to the ready or queue list. */
5862 for (link = ready_list; link; link = XEXP (link, 1))
5863 try_ready (XEXP (link, 0));
5864 free_INSN_LIST_list (&ready_list);
5865
5866 /* Fixing jump's dependences. */
5867 insn = BB_HEAD (rec);
5868 jump = BB_END (rec);
5869
5870 gcc_assert (LABEL_P (insn));
5871 insn = NEXT_INSN (insn);
5872
5873 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
5874 add_jump_dependencies (insn, jump);
5875 }
5876
5877 /* Change pattern of INSN to NEW_PAT. */
5878 void
5879 sched_change_pattern (rtx insn, rtx new_pat)
5880 {
5881 sd_iterator_def sd_it;
5882 dep_t dep;
5883 int t;
5884
5885 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
5886 gcc_assert (t);
5887 dfa_clear_single_insn_cache (insn);
5888
5889 for (sd_it = sd_iterator_start (insn, (SD_LIST_FORW | SD_LIST_BACK
5890 | SD_LIST_RES_BACK));
5891 sd_iterator_cond (&sd_it, &dep);)
5892 {
5893 DEP_COST (dep) = UNKNOWN_DEP_COST;
5894 sd_iterator_next (&sd_it);
5895 }
5896 }
5897
5898 /* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
5899 instruction data. */
5900 static void
5901 haifa_change_pattern (rtx insn, rtx new_pat)
5902 {
5903 sched_change_pattern (insn, new_pat);
5904
5905 /* Invalidate INSN_COST, so it'll be recalculated. */
5906 INSN_COST (insn) = -1;
5907 /* Invalidate INSN_TICK, so it'll be recalculated. */
5908 INSN_TICK (insn) = INVALID_TICK;
5909 }
5910
5911 /* -1 - can't speculate,
5912 0 - for speculation with REQUEST mode it is OK to use
5913 current instruction pattern,
5914 1 - need to change pattern for *NEW_PAT to be speculative. */
5915 int
5916 sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
5917 {
5918 gcc_assert (current_sched_info->flags & DO_SPECULATION
5919 && (request & SPECULATIVE)
5920 && sched_insn_is_legitimate_for_speculation_p (insn, request));
5921
5922 if ((request & spec_info->mask) != request)
5923 return -1;
5924
5925 if (request & BE_IN_SPEC
5926 && !(request & BEGIN_SPEC))
5927 return 0;
5928
5929 return targetm.sched.speculate_insn (insn, request, new_pat);
5930 }
5931
5932 static int
5933 haifa_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
5934 {
5935 gcc_assert (sched_deps_info->generate_spec_deps
5936 && !IS_SPECULATION_CHECK_P (insn));
5937
5938 if (HAS_INTERNAL_DEP (insn)
5939 || SCHED_GROUP_P (insn))
5940 return -1;
5941
5942 return sched_speculate_insn (insn, request, new_pat);
5943 }
5944
5945 /* Print some information about block BB, which starts with HEAD and
5946 ends with TAIL, before scheduling it.
5947 I is zero, if scheduler is about to start with the fresh ebb. */
5948 static void
5949 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
5950 {
5951 if (!i)
5952 fprintf (sched_dump,
5953 ";; ======================================================\n");
5954 else
5955 fprintf (sched_dump,
5956 ";; =====================ADVANCING TO=====================\n");
5957 fprintf (sched_dump,
5958 ";; -- basic block %d from %d to %d -- %s reload\n",
5959 bb->index, INSN_UID (head), INSN_UID (tail),
5960 (reload_completed ? "after" : "before"));
5961 fprintf (sched_dump,
5962 ";; ======================================================\n");
5963 fprintf (sched_dump, "\n");
5964 }
5965
5966 /* Unlink basic block notes and labels and saves them, so they
5967 can be easily restored. We unlink basic block notes in EBB to
5968 provide back-compatibility with the previous code, as target backends
5969 assume, that there'll be only instructions between
5970 current_sched_info->{head and tail}. We restore these notes as soon
5971 as we can.
5972 FIRST (LAST) is the first (last) basic block in the ebb.
5973 NB: In usual case (FIRST == LAST) nothing is really done. */
5974 void
5975 unlink_bb_notes (basic_block first, basic_block last)
5976 {
5977 /* We DON'T unlink basic block notes of the first block in the ebb. */
5978 if (first == last)
5979 return;
5980
5981 bb_header = XNEWVEC (rtx, last_basic_block);
5982
5983 /* Make a sentinel. */
5984 if (last->next_bb != EXIT_BLOCK_PTR)
5985 bb_header[last->next_bb->index] = 0;
5986
5987 first = first->next_bb;
5988 do
5989 {
5990 rtx prev, label, note, next;
5991
5992 label = BB_HEAD (last);
5993 if (LABEL_P (label))
5994 note = NEXT_INSN (label);
5995 else
5996 note = label;
5997 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
5998
5999 prev = PREV_INSN (label);
6000 next = NEXT_INSN (note);
6001 gcc_assert (prev && next);
6002
6003 NEXT_INSN (prev) = next;
6004 PREV_INSN (next) = prev;
6005
6006 bb_header[last->index] = label;
6007
6008 if (last == first)
6009 break;
6010
6011 last = last->prev_bb;
6012 }
6013 while (1);
6014 }
6015
6016 /* Restore basic block notes.
6017 FIRST is the first basic block in the ebb. */
6018 static void
6019 restore_bb_notes (basic_block first)
6020 {
6021 if (!bb_header)
6022 return;
6023
6024 /* We DON'T unlink basic block notes of the first block in the ebb. */
6025 first = first->next_bb;
6026 /* Remember: FIRST is actually a second basic block in the ebb. */
6027
6028 while (first != EXIT_BLOCK_PTR
6029 && bb_header[first->index])
6030 {
6031 rtx prev, label, note, next;
6032
6033 label = bb_header[first->index];
6034 prev = PREV_INSN (label);
6035 next = NEXT_INSN (prev);
6036
6037 if (LABEL_P (label))
6038 note = NEXT_INSN (label);
6039 else
6040 note = label;
6041 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6042
6043 bb_header[first->index] = 0;
6044
6045 NEXT_INSN (prev) = label;
6046 NEXT_INSN (note) = next;
6047 PREV_INSN (next) = note;
6048
6049 first = first->next_bb;
6050 }
6051
6052 free (bb_header);
6053 bb_header = 0;
6054 }
6055
6056 /* Helper function.
6057 Fix CFG after both in- and inter-block movement of
6058 control_flow_insn_p JUMP. */
6059 static void
6060 fix_jump_move (rtx jump)
6061 {
6062 basic_block bb, jump_bb, jump_bb_next;
6063
6064 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
6065 jump_bb = BLOCK_FOR_INSN (jump);
6066 jump_bb_next = jump_bb->next_bb;
6067
6068 gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS
6069 || IS_SPECULATION_BRANCHY_CHECK_P (jump));
6070
6071 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
6072 /* if jump_bb_next is not empty. */
6073 BB_END (jump_bb) = BB_END (jump_bb_next);
6074
6075 if (BB_END (bb) != PREV_INSN (jump))
6076 /* Then there are instruction after jump that should be placed
6077 to jump_bb_next. */
6078 BB_END (jump_bb_next) = BB_END (bb);
6079 else
6080 /* Otherwise jump_bb_next is empty. */
6081 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
6082
6083 /* To make assertion in move_insn happy. */
6084 BB_END (bb) = PREV_INSN (jump);
6085
6086 update_bb_for_insn (jump_bb_next);
6087 }
6088
6089 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
6090 static void
6091 move_block_after_check (rtx jump)
6092 {
6093 basic_block bb, jump_bb, jump_bb_next;
6094 VEC(edge,gc) *t;
6095
6096 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
6097 jump_bb = BLOCK_FOR_INSN (jump);
6098 jump_bb_next = jump_bb->next_bb;
6099
6100 update_bb_for_insn (jump_bb);
6101
6102 gcc_assert (IS_SPECULATION_CHECK_P (jump)
6103 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next)));
6104
6105 unlink_block (jump_bb_next);
6106 link_block (jump_bb_next, bb);
6107
6108 t = bb->succs;
6109 bb->succs = 0;
6110 move_succs (&(jump_bb->succs), bb);
6111 move_succs (&(jump_bb_next->succs), jump_bb);
6112 move_succs (&t, jump_bb_next);
6113
6114 df_mark_solutions_dirty ();
6115
6116 common_sched_info->fix_recovery_cfg
6117 (bb->index, jump_bb->index, jump_bb_next->index);
6118 }
6119
6120 /* Helper function for move_block_after_check.
6121 This functions attaches edge vector pointed to by SUCCSP to
6122 block TO. */
6123 static void
6124 move_succs (VEC(edge,gc) **succsp, basic_block to)
6125 {
6126 edge e;
6127 edge_iterator ei;
6128
6129 gcc_assert (to->succs == 0);
6130
6131 to->succs = *succsp;
6132
6133 FOR_EACH_EDGE (e, ei, to->succs)
6134 e->src = to;
6135
6136 *succsp = 0;
6137 }
6138
6139 /* Remove INSN from the instruction stream.
6140 INSN should have any dependencies. */
6141 static void
6142 sched_remove_insn (rtx insn)
6143 {
6144 sd_finish_insn (insn);
6145
6146 change_queue_index (insn, QUEUE_NOWHERE);
6147 current_sched_info->add_remove_insn (insn, 1);
6148 remove_insn (insn);
6149 }
6150
6151 /* Clear priorities of all instructions, that are forward dependent on INSN.
6152 Store in vector pointed to by ROOTS_PTR insns on which priority () should
6153 be invoked to initialize all cleared priorities. */
6154 static void
6155 clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
6156 {
6157 sd_iterator_def sd_it;
6158 dep_t dep;
6159 bool insn_is_root_p = true;
6160
6161 gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
6162
6163 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6164 {
6165 rtx pro = DEP_PRO (dep);
6166
6167 if (INSN_PRIORITY_STATUS (pro) >= 0
6168 && QUEUE_INDEX (insn) != QUEUE_SCHEDULED)
6169 {
6170 /* If DEP doesn't contribute to priority then INSN itself should
6171 be added to priority roots. */
6172 if (contributes_to_priority_p (dep))
6173 insn_is_root_p = false;
6174
6175 INSN_PRIORITY_STATUS (pro) = -1;
6176 clear_priorities (pro, roots_ptr);
6177 }
6178 }
6179
6180 if (insn_is_root_p)
6181 VEC_safe_push (rtx, heap, *roots_ptr, insn);
6182 }
6183
6184 /* Recompute priorities of instructions, whose priorities might have been
6185 changed. ROOTS is a vector of instructions whose priority computation will
6186 trigger initialization of all cleared priorities. */
6187 static void
6188 calc_priorities (rtx_vec_t roots)
6189 {
6190 int i;
6191 rtx insn;
6192
6193 FOR_EACH_VEC_ELT (rtx, roots, i, insn)
6194 priority (insn);
6195 }
6196
6197
6198 /* Add dependences between JUMP and other instructions in the recovery
6199 block. INSN is the first insn the recovery block. */
6200 static void
6201 add_jump_dependencies (rtx insn, rtx jump)
6202 {
6203 do
6204 {
6205 insn = NEXT_INSN (insn);
6206 if (insn == jump)
6207 break;
6208
6209 if (dep_list_size (insn) == 0)
6210 {
6211 dep_def _new_dep, *new_dep = &_new_dep;
6212
6213 init_dep (new_dep, insn, jump, REG_DEP_ANTI);
6214 sd_add_dep (new_dep, false);
6215 }
6216 }
6217 while (1);
6218
6219 gcc_assert (!sd_lists_empty_p (jump, SD_LIST_BACK));
6220 }
6221
6222 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
6223 rtx
6224 bb_note (basic_block bb)
6225 {
6226 rtx note;
6227
6228 note = BB_HEAD (bb);
6229 if (LABEL_P (note))
6230 note = NEXT_INSN (note);
6231
6232 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
6233 return note;
6234 }
6235
6236 /* Extend data structures for logical insn UID. */
6237 void
6238 sched_extend_luids (void)
6239 {
6240 int new_luids_max_uid = get_max_uid () + 1;
6241
6242 VEC_safe_grow_cleared (int, heap, sched_luids, new_luids_max_uid);
6243 }
6244
6245 /* Initialize LUID for INSN. */
6246 void
6247 sched_init_insn_luid (rtx insn)
6248 {
6249 int i = INSN_P (insn) ? 1 : common_sched_info->luid_for_non_insn (insn);
6250 int luid;
6251
6252 if (i >= 0)
6253 {
6254 luid = sched_max_luid;
6255 sched_max_luid += i;
6256 }
6257 else
6258 luid = -1;
6259
6260 SET_INSN_LUID (insn, luid);
6261 }
6262
6263 /* Initialize luids for BBS.
6264 The hook common_sched_info->luid_for_non_insn () is used to determine
6265 if notes, labels, etc. need luids. */
6266 void
6267 sched_init_luids (bb_vec_t bbs)
6268 {
6269 int i;
6270 basic_block bb;
6271
6272 sched_extend_luids ();
6273 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
6274 {
6275 rtx insn;
6276
6277 FOR_BB_INSNS (bb, insn)
6278 sched_init_insn_luid (insn);
6279 }
6280 }
6281
6282 /* Free LUIDs. */
6283 void
6284 sched_finish_luids (void)
6285 {
6286 VEC_free (int, heap, sched_luids);
6287 sched_max_luid = 1;
6288 }
6289
6290 /* Return logical uid of INSN. Helpful while debugging. */
6291 int
6292 insn_luid (rtx insn)
6293 {
6294 return INSN_LUID (insn);
6295 }
6296
6297 /* Extend per insn data in the target. */
6298 void
6299 sched_extend_target (void)
6300 {
6301 if (targetm.sched.h_i_d_extended)
6302 targetm.sched.h_i_d_extended ();
6303 }
6304
6305 /* Extend global scheduler structures (those, that live across calls to
6306 schedule_block) to include information about just emitted INSN. */
6307 static void
6308 extend_h_i_d (void)
6309 {
6310 int reserve = (get_max_uid () + 1
6311 - VEC_length (haifa_insn_data_def, h_i_d));
6312 if (reserve > 0
6313 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve))
6314 {
6315 VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d,
6316 3 * get_max_uid () / 2);
6317 sched_extend_target ();
6318 }
6319 }
6320
6321 /* Initialize h_i_d entry of the INSN with default values.
6322 Values, that are not explicitly initialized here, hold zero. */
6323 static void
6324 init_h_i_d (rtx insn)
6325 {
6326 if (INSN_LUID (insn) > 0)
6327 {
6328 INSN_COST (insn) = -1;
6329 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
6330 INSN_TICK (insn) = INVALID_TICK;
6331 INSN_EXACT_TICK (insn) = INVALID_TICK;
6332 INTER_TICK (insn) = INVALID_TICK;
6333 TODO_SPEC (insn) = HARD_DEP;
6334 }
6335 }
6336
6337 /* Initialize haifa_insn_data for BBS. */
6338 void
6339 haifa_init_h_i_d (bb_vec_t bbs)
6340 {
6341 int i;
6342 basic_block bb;
6343
6344 extend_h_i_d ();
6345 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
6346 {
6347 rtx insn;
6348
6349 FOR_BB_INSNS (bb, insn)
6350 init_h_i_d (insn);
6351 }
6352 }
6353
6354 /* Finalize haifa_insn_data. */
6355 void
6356 haifa_finish_h_i_d (void)
6357 {
6358 int i;
6359 haifa_insn_data_t data;
6360 struct reg_use_data *use, *next;
6361
6362 FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
6363 {
6364 free (data->reg_pressure);
6365 for (use = data->reg_use_list; use != NULL; use = next)
6366 {
6367 next = use->next_insn_use;
6368 free (use);
6369 }
6370 }
6371 VEC_free (haifa_insn_data_def, heap, h_i_d);
6372 }
6373
6374 /* Init data for the new insn INSN. */
6375 static void
6376 haifa_init_insn (rtx insn)
6377 {
6378 gcc_assert (insn != NULL);
6379
6380 sched_extend_luids ();
6381 sched_init_insn_luid (insn);
6382 sched_extend_target ();
6383 sched_deps_init (false);
6384 extend_h_i_d ();
6385 init_h_i_d (insn);
6386
6387 if (adding_bb_to_current_region_p)
6388 {
6389 sd_init_insn (insn);
6390
6391 /* Extend dependency caches by one element. */
6392 extend_dependency_caches (1, false);
6393 }
6394 if (sched_pressure_p)
6395 init_insn_reg_pressure_info (insn);
6396 }
6397
6398 /* Init data for the new basic block BB which comes after AFTER. */
6399 static void
6400 haifa_init_only_bb (basic_block bb, basic_block after)
6401 {
6402 gcc_assert (bb != NULL);
6403
6404 sched_init_bbs ();
6405
6406 if (common_sched_info->add_block)
6407 /* This changes only data structures of the front-end. */
6408 common_sched_info->add_block (bb, after);
6409 }
6410
6411 /* A generic version of sched_split_block (). */
6412 basic_block
6413 sched_split_block_1 (basic_block first_bb, rtx after)
6414 {
6415 edge e;
6416
6417 e = split_block (first_bb, after);
6418 gcc_assert (e->src == first_bb);
6419
6420 /* sched_split_block emits note if *check == BB_END. Probably it
6421 is better to rip that note off. */
6422
6423 return e->dest;
6424 }
6425
6426 /* A generic version of sched_create_empty_bb (). */
6427 basic_block
6428 sched_create_empty_bb_1 (basic_block after)
6429 {
6430 return create_empty_bb (after);
6431 }
6432
6433 /* Insert PAT as an INSN into the schedule and update the necessary data
6434 structures to account for it. */
6435 rtx
6436 sched_emit_insn (rtx pat)
6437 {
6438 rtx insn = emit_insn_before (pat, nonscheduled_insns_begin);
6439 haifa_init_insn (insn);
6440
6441 if (current_sched_info->add_remove_insn)
6442 current_sched_info->add_remove_insn (insn, 0);
6443
6444 (*current_sched_info->begin_schedule_ready) (insn);
6445 VEC_safe_push (rtx, heap, scheduled_insns, insn);
6446
6447 last_scheduled_insn = insn;
6448 return insn;
6449 }
6450
6451 /* This function returns a candidate satisfying dispatch constraints from
6452 the ready list. */
6453
6454 static rtx
6455 ready_remove_first_dispatch (struct ready_list *ready)
6456 {
6457 int i;
6458 rtx insn = ready_element (ready, 0);
6459
6460 if (ready->n_ready == 1
6461 || INSN_CODE (insn) < 0
6462 || !INSN_P (insn)
6463 || !active_insn_p (insn)
6464 || targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
6465 return ready_remove_first (ready);
6466
6467 for (i = 1; i < ready->n_ready; i++)
6468 {
6469 insn = ready_element (ready, i);
6470
6471 if (INSN_CODE (insn) < 0
6472 || !INSN_P (insn)
6473 || !active_insn_p (insn))
6474 continue;
6475
6476 if (targetm.sched.dispatch (insn, FITS_DISPATCH_WINDOW))
6477 {
6478 /* Return ith element of ready. */
6479 insn = ready_remove (ready, i);
6480 return insn;
6481 }
6482 }
6483
6484 if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
6485 return ready_remove_first (ready);
6486
6487 for (i = 1; i < ready->n_ready; i++)
6488 {
6489 insn = ready_element (ready, i);
6490
6491 if (INSN_CODE (insn) < 0
6492 || !INSN_P (insn)
6493 || !active_insn_p (insn))
6494 continue;
6495
6496 /* Return i-th element of ready. */
6497 if (targetm.sched.dispatch (insn, IS_CMP))
6498 return ready_remove (ready, i);
6499 }
6500
6501 return ready_remove_first (ready);
6502 }
6503
6504 /* Get number of ready insn in the ready list. */
6505
6506 int
6507 number_in_ready (void)
6508 {
6509 return ready.n_ready;
6510 }
6511
6512 /* Get number of ready's in the ready list. */
6513
6514 rtx
6515 get_ready_element (int i)
6516 {
6517 return ready_element (&ready, i);
6518 }
6519
6520 #endif /* INSN_SCHEDULING */